code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import tensorflow as tf
slim = tf.contrib.slim
from tensorflow.contrib.framework.python.ops import add_arg_scope
from utils.slim_utils import _build_variable_getter, _add_variable_to_collections, convert_data_format
from tensorflow.python.ops import nn_ops
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size=[3, 3],
spectral_normalization=False,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with tf.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = tf.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank != 4:
raise ValueError('Invalid input rank: %i' % (input_rank,))
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = SpectralNormalizedConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
spectral_normalization=spectral_normalization,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs)
NO_OPS = 'NO_OPS'
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
# Usually num_iters = 1 will be enough
W_shape = W.shape.as_list()
W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
if u is None:
u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
def power_iteration(i, u_i, v_i):
v_ip1 = tf.nn.l2_normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
u_ip1 = tf.nn.l2_normalize(tf.matmul(v_ip1, W_reshaped))
return i + 1, u_ip1, v_ip1
_, u_final, v_final = tf.while_loop(
cond=lambda i, _1, _2: i < num_iters,
body=power_iteration,
loop_vars=(tf.constant(0, dtype=tf.int32),
u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
)
if update_collection is None:
# warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable'
# '. Please consider using a update collection instead.')
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
with tf.control_dependencies([u.assign(u_final)]):
W_bar = tf.reshape(W_bar, W_shape)
else:
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
W_bar = tf.reshape(W_bar, W_shape)
# Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
# has already been collected on the first call.
if update_collection != NO_OPS:
tf.add_to_collection(update_collection, u.assign(u_final))
if with_sigma:
return W_bar, sigma
else:
return W_bar
class SpectralNormalizedConvolution2D(tf.layers.Conv2D):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
# print("update collection = ", upd_coll)
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = tf.layers.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=convert_data_format(self.data_format,
self.rank + 2))
self.built = True
class SpectralNormalizedConvolution2DTranspose(tf.layers.Conv2DTranspose):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
self.input_spec = tf.layers.InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
spectral_normalization=False,
padding='SAME',
data_format='NHWC',
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with tf.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in ('NCHW', 'NHWC'):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = tf.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = SpectralNormalizedConvolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
spectral_normalization=False,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs) | utils/sn_conv.py | import tensorflow as tf
slim = tf.contrib.slim
from tensorflow.contrib.framework.python.ops import add_arg_scope
from utils.slim_utils import _build_variable_getter, _add_variable_to_collections, convert_data_format
from tensorflow.python.ops import nn_ops
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size=[3, 3],
spectral_normalization=False,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with tf.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = tf.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank != 4:
raise ValueError('Invalid input rank: %i' % (input_rank,))
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = SpectralNormalizedConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
spectral_normalization=spectral_normalization,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs)
NO_OPS = 'NO_OPS'
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
# Usually num_iters = 1 will be enough
W_shape = W.shape.as_list()
W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
if u is None:
u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
def power_iteration(i, u_i, v_i):
v_ip1 = tf.nn.l2_normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
u_ip1 = tf.nn.l2_normalize(tf.matmul(v_ip1, W_reshaped))
return i + 1, u_ip1, v_ip1
_, u_final, v_final = tf.while_loop(
cond=lambda i, _1, _2: i < num_iters,
body=power_iteration,
loop_vars=(tf.constant(0, dtype=tf.int32),
u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
)
if update_collection is None:
# warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable'
# '. Please consider using a update collection instead.')
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
with tf.control_dependencies([u.assign(u_final)]):
W_bar = tf.reshape(W_bar, W_shape)
else:
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
# sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
W_bar = W_reshaped / sigma
W_bar = tf.reshape(W_bar, W_shape)
# Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
# has already been collected on the first call.
if update_collection != NO_OPS:
tf.add_to_collection(update_collection, u.assign(u_final))
if with_sigma:
return W_bar, sigma
else:
return W_bar
class SpectralNormalizedConvolution2D(tf.layers.Conv2D):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
# print("update collection = ", upd_coll)
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = tf.layers.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=convert_data_format(self.data_format,
self.rank + 2))
self.built = True
class SpectralNormalizedConvolution2DTranspose(tf.layers.Conv2DTranspose):
def __init__(self, filters,
kernel_size,
spectral_normalization=False,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SpectralNormalizedConvolution2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
self.spectral_normalization = spectral_normalization
def build(self, input_shape):
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
self.input_spec = tf.layers.InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.spectral_normalization:
# TODO pass update_collection?
vs = tf.get_variable_scope()
upd_coll = None if not vs.reuse else NO_OPS
self.kernel = spectral_normed_weight(self.kernel, update_collection=upd_coll)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
spectral_normalization=False,
padding='SAME',
data_format='NHWC',
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with tf.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in ('NCHW', 'NHWC'):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = tf.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = SpectralNormalizedConvolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
spectral_normalization=False,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs) | 0.959001 | 0.472866 |
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
# Installed Packages
from knox.models import AuthToken
from knox import settings
# Django Packages
from django.contrib.auth import get_user_model
url = reverse("rest-social-email-auth:user-login")
# response = api_client.post(url, data)
def test_authenticate_account_with_username(api_client, user_factory):
"""
Ensuring the user is in the system and does the spot yaamean I was listening to reggae writing affi man
:return:
"""
user_account = user_factory()
data = {"username": user_account.username, "password": "password"}
response = api_client.post(url, data)
assert response.data["user"]["username"] == data["username"]
assert user_account.check_password(data["password"])
def test_authenticate_account_with_email(api_client, user_factory):
"""
Ensuring the user is in the system and does the activatian with email yaa mean is a plan
:return:
"""
user_account = user_factory()
data = {"username": user_account.email, "password": "password"}
response = api_client.post(url, data)
assert response.data["user"]["email"] == data["username"]
assert user_account.check_password(data["password"])
def test_authenticate_account_with_phone_number(api_client, user_factory):
"""
Ensuring the user is in the system and does the activatian with email yaa mean is a plan
:return:
"""
user_account = user_factory()
data = {"username": user_account.phone_number, "password": "password"}
response = api_client.post(url, data)
print(response.data)
assert response.data["user"]["phone_number"] == data["username"]
assert user_account.check_password(data["password"])
class AccountTokenLoginTest(APITestCase):
def token_verification(self, auth_token):
token = auth_token.split("Token ")[1]
return token[: settings.CONSTANTS.TOKEN_KEY_LENGTH]
def setUp(self):
"""
Originally creating a user from scratch to add up to users at the same time
:return:
"""
self.test_user = get_user_model().objects.create_user(
username="testuser",
email="<EMAIL>",
password="<PASSWORD>",
phone_number="+254715943570",
)
self.test_user.save()
self.create_url = url
def test_authenticate_account_with_token_recognition(self):
"""
Ensuring the user in the system has token Authenticatian naa mean
"""
self.assertEqual(AuthToken.objects.count(), 0)
# account = Account.objects.latest('id')
data = {
"username": "<EMAIL>",
"password": "<PASSWORD>",
}
response = self.client.post(self.create_url, data, format="json")
self.client.credentials(HTTP_AUTHORIZATION=response["Authorization"])
self.assertEqual(AuthToken.objects.count(), 1)
self.assertEqual(
self.token_verification(response["Authorization"]),
AuthToken.objects.latest("user_id").token_key,
)
self.assertEqual(1, 1)
self.assertTrue(all(e.token_key for e in AuthToken.objects.all())) | tests/views/test_user_login_view.py | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
# Installed Packages
from knox.models import AuthToken
from knox import settings
# Django Packages
from django.contrib.auth import get_user_model
url = reverse("rest-social-email-auth:user-login")
# response = api_client.post(url, data)
def test_authenticate_account_with_username(api_client, user_factory):
"""
Ensuring the user is in the system and does the spot yaamean I was listening to reggae writing affi man
:return:
"""
user_account = user_factory()
data = {"username": user_account.username, "password": "password"}
response = api_client.post(url, data)
assert response.data["user"]["username"] == data["username"]
assert user_account.check_password(data["password"])
def test_authenticate_account_with_email(api_client, user_factory):
"""
Ensuring the user is in the system and does the activatian with email yaa mean is a plan
:return:
"""
user_account = user_factory()
data = {"username": user_account.email, "password": "password"}
response = api_client.post(url, data)
assert response.data["user"]["email"] == data["username"]
assert user_account.check_password(data["password"])
def test_authenticate_account_with_phone_number(api_client, user_factory):
"""
Ensuring the user is in the system and does the activatian with email yaa mean is a plan
:return:
"""
user_account = user_factory()
data = {"username": user_account.phone_number, "password": "password"}
response = api_client.post(url, data)
print(response.data)
assert response.data["user"]["phone_number"] == data["username"]
assert user_account.check_password(data["password"])
class AccountTokenLoginTest(APITestCase):
def token_verification(self, auth_token):
token = auth_token.split("Token ")[1]
return token[: settings.CONSTANTS.TOKEN_KEY_LENGTH]
def setUp(self):
"""
Originally creating a user from scratch to add up to users at the same time
:return:
"""
self.test_user = get_user_model().objects.create_user(
username="testuser",
email="<EMAIL>",
password="<PASSWORD>",
phone_number="+254715943570",
)
self.test_user.save()
self.create_url = url
def test_authenticate_account_with_token_recognition(self):
"""
Ensuring the user in the system has token Authenticatian naa mean
"""
self.assertEqual(AuthToken.objects.count(), 0)
# account = Account.objects.latest('id')
data = {
"username": "<EMAIL>",
"password": "<PASSWORD>",
}
response = self.client.post(self.create_url, data, format="json")
self.client.credentials(HTTP_AUTHORIZATION=response["Authorization"])
self.assertEqual(AuthToken.objects.count(), 1)
self.assertEqual(
self.token_verification(response["Authorization"]),
AuthToken.objects.latest("user_id").token_key,
)
self.assertEqual(1, 1)
self.assertTrue(all(e.token_key for e in AuthToken.objects.all())) | 0.599837 | 0.438485 |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_ips_global
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_ips_global.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_ips_global_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_ips_global_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_ips_global_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_ips_global_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'random_attribute_not_valid': 'tag',
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200 | v6.0.6/ips/test_fortios_ips_global.py |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_ips_global
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_ips_global.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_ips_global_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_ips_global_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_ips_global_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_ips_global_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'random_attribute_not_valid': 'tag',
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200 | 0.699152 | 0.200597 |
from .models import account
from django.contrib.auth import authenticate,login,logout
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework.decorators import api_view
import json
from .serializers import accountSerializer
from django.contrib.sessions.models import Session
from django.db.models import Q
@api_view(['POST'])
#@csrf_exempt
def Login(request):
"""
List all code snippets, or create a new snippet.
"""
body=request.data
username=body.get('username',None)
password=body.get('password',None)
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return Response(accountSerializer(user, many=False).data)
return Response({"user":"failed to login"})
@api_view(['GET'])
#@csrf_exempt
def checkLogin(request):
userCookies=request.query_params.get('sid',None)
if userCookies is None:
return Response({"status":"no user with this session"})
s=Session.objects.get(pk=userCookies)
data=s.get_decoded()['_auth_user_id']
user=account.objects.filter(id=data)[0]
serializer=accountSerializer(user, many=False)
return Response(serializer.data)
@api_view(['POST'])
#@csrf_exempt
def Logout(request):
"""
List all code snippets, or create a new snippet.
"""
logout(request)
return Response({"status":"user logged out"})
@api_view(['POST'])
#@csrf_exempt
def Register(request):
"""
List all code snippets, or create a new snippet.
"""
address=request.data.get('address')
role=request.data.get('role')
password=request.data.get('password')
displayName=request.data.get('displayName')
additionalData=request.data.get('additionalData')
myuser=account.objects.create_user(address=address,role=int(role),password=password,displayName=displayName,additionalData=additionalData)
login(request,myuser)
return Response(accountSerializer(myuser, many=False).data)
@api_view(['GET'])
def getUsersByRole(request):
role=request.query_params.get('role')
users=account.objects.filter(role=int(role))
serializer=accountSerializer(users,many=True)
return Response(serializer.data)
@api_view(['POST'])
@csrf_exempt
def setAdditionalData(request):
address=request.data.get('address',None)
newData=request.data.get('data',None)
account.objects.filter(address=address).update(additionalData=newData)
user=account.objects.filter(address=address)
return Response(accountSerializer(user, many=True).data)
@api_view(['GET'])
@csrf_exempt
def getUsersByAddress(request):
currentAddress=request.query_params.get('address')
if currentAddress is None:
return Response({"status":"need at least one addres"})
addresses=currentAddress.split(',')
queries=[Q(address=i) for i in addresses]
query=queries.pop()
for item in queries:
query |=item
users=account.objects.filter(query)
return Response(accountSerializer(users,many=True).data) | backendApi/accounts/views.py | from .models import account
from django.contrib.auth import authenticate,login,logout
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework.decorators import api_view
import json
from .serializers import accountSerializer
from django.contrib.sessions.models import Session
from django.db.models import Q
@api_view(['POST'])
#@csrf_exempt
def Login(request):
"""
List all code snippets, or create a new snippet.
"""
body=request.data
username=body.get('username',None)
password=body.get('password',None)
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return Response(accountSerializer(user, many=False).data)
return Response({"user":"failed to login"})
@api_view(['GET'])
#@csrf_exempt
def checkLogin(request):
userCookies=request.query_params.get('sid',None)
if userCookies is None:
return Response({"status":"no user with this session"})
s=Session.objects.get(pk=userCookies)
data=s.get_decoded()['_auth_user_id']
user=account.objects.filter(id=data)[0]
serializer=accountSerializer(user, many=False)
return Response(serializer.data)
@api_view(['POST'])
#@csrf_exempt
def Logout(request):
"""
List all code snippets, or create a new snippet.
"""
logout(request)
return Response({"status":"user logged out"})
@api_view(['POST'])
#@csrf_exempt
def Register(request):
"""
List all code snippets, or create a new snippet.
"""
address=request.data.get('address')
role=request.data.get('role')
password=request.data.get('password')
displayName=request.data.get('displayName')
additionalData=request.data.get('additionalData')
myuser=account.objects.create_user(address=address,role=int(role),password=password,displayName=displayName,additionalData=additionalData)
login(request,myuser)
return Response(accountSerializer(myuser, many=False).data)
@api_view(['GET'])
def getUsersByRole(request):
role=request.query_params.get('role')
users=account.objects.filter(role=int(role))
serializer=accountSerializer(users,many=True)
return Response(serializer.data)
@api_view(['POST'])
@csrf_exempt
def setAdditionalData(request):
address=request.data.get('address',None)
newData=request.data.get('data',None)
account.objects.filter(address=address).update(additionalData=newData)
user=account.objects.filter(address=address)
return Response(accountSerializer(user, many=True).data)
@api_view(['GET'])
@csrf_exempt
def getUsersByAddress(request):
currentAddress=request.query_params.get('address')
if currentAddress is None:
return Response({"status":"need at least one addres"})
addresses=currentAddress.split(',')
queries=[Q(address=i) for i in addresses]
query=queries.pop()
for item in queries:
query |=item
users=account.objects.filter(query)
return Response(accountSerializer(users,many=True).data) | 0.375936 | 0.066873 |
import sys
class handleParameter():
def __init__(self):
self.args = []
for arg in sys.argv[1:]:
if sys.platform == 'win32' and sys.stdin.encoding == 'cp936':
arg = arg.decode('gbk')
self.args.append(arg)
def getAction(self):
if self.args.__len__() >= 2:
return self.args[1]
def _getCommand(self):
if self.args.__len__() >= 1:
return self.args[0]
def _getOperations(self):
operations = []
i = 1
_len = self.args.__len__()
if _len >= 2:
while i < _len:
if self.args[i].strip().find('--'):
operations.append(self.args[i])
else:
break
i = i+1
if len(operations):
return operations
else:
return None
def _getKeyValues(self):
keyValues = dict()
argslen = len(self.args)
if argslen >= 2:
current = 1
while current < argslen:
if self.args[current].strip().startswith('--'):
start = current + 1
if '=' in self.args[current].strip():
a = self.args[current].strip().split('=', 2)[0]
b = self.args[current].strip().split('=', 2)[1]
self.args[current] = a
self.args.insert(current + 1, b)
argslen = len(self.args)
key = self.args[current].strip()
values = list()
length = len(self.args)
while (start < length and
not self.args[start].strip().startswith('--')):
values.append(self.args[start].strip())
start = start + 1
keyValues[key] = values
current = start
else:
current = current+1
keys = list(keyValues.keys())
result = dict()
for key in keys:
value = keyValues.get(key)
key = key.replace('--', '')
result[key] = value
return result
def getUserDefinedOutPutFormat(self, keyValues):
keys = list(keyValues.keys())
for key in keys:
if key == 'output':
return keyValues.get(key)
return None
def getTempKeyAndSecret(self):
keyValues = dict()
len = self.args.__len__()
keystr = "--SecretId"
secretstr = "--SecretKey"
_key = None
_secret = None
if len >= 3:
for index in range(2, len):
currentValue = self.args[index]
if currentValue.find('--') >= 0:
index = index+1
values = list()
while index < len and self.args[index].find('--') < 0:
values.append(self.args[index])
index = index + 1
keyValues[currentValue] = values
if keystr in keyValues and keyValues[keystr].__len__() > 0:
_key = keyValues[keystr][0]
if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
_secret = keyValues[secretstr][0]
return _key, _secret
def getAllExtensionCommands(self):
cmds = list()
cmds = ['help', '-h', '--help', ]
return cmds
def _getOpenApiKeyValues(self, map):
keys = list(map.keys())
newMap = dict()
for key in keys:
value = map.get(key)
key = key.replace('--', '')
newMap[key] = value
return newMap
def getExtensionKeyValues(self, map):
pass | qcloudcli/handleParameter.py | import sys
class handleParameter():
def __init__(self):
self.args = []
for arg in sys.argv[1:]:
if sys.platform == 'win32' and sys.stdin.encoding == 'cp936':
arg = arg.decode('gbk')
self.args.append(arg)
def getAction(self):
if self.args.__len__() >= 2:
return self.args[1]
def _getCommand(self):
if self.args.__len__() >= 1:
return self.args[0]
def _getOperations(self):
operations = []
i = 1
_len = self.args.__len__()
if _len >= 2:
while i < _len:
if self.args[i].strip().find('--'):
operations.append(self.args[i])
else:
break
i = i+1
if len(operations):
return operations
else:
return None
def _getKeyValues(self):
keyValues = dict()
argslen = len(self.args)
if argslen >= 2:
current = 1
while current < argslen:
if self.args[current].strip().startswith('--'):
start = current + 1
if '=' in self.args[current].strip():
a = self.args[current].strip().split('=', 2)[0]
b = self.args[current].strip().split('=', 2)[1]
self.args[current] = a
self.args.insert(current + 1, b)
argslen = len(self.args)
key = self.args[current].strip()
values = list()
length = len(self.args)
while (start < length and
not self.args[start].strip().startswith('--')):
values.append(self.args[start].strip())
start = start + 1
keyValues[key] = values
current = start
else:
current = current+1
keys = list(keyValues.keys())
result = dict()
for key in keys:
value = keyValues.get(key)
key = key.replace('--', '')
result[key] = value
return result
def getUserDefinedOutPutFormat(self, keyValues):
keys = list(keyValues.keys())
for key in keys:
if key == 'output':
return keyValues.get(key)
return None
def getTempKeyAndSecret(self):
keyValues = dict()
len = self.args.__len__()
keystr = "--SecretId"
secretstr = "--SecretKey"
_key = None
_secret = None
if len >= 3:
for index in range(2, len):
currentValue = self.args[index]
if currentValue.find('--') >= 0:
index = index+1
values = list()
while index < len and self.args[index].find('--') < 0:
values.append(self.args[index])
index = index + 1
keyValues[currentValue] = values
if keystr in keyValues and keyValues[keystr].__len__() > 0:
_key = keyValues[keystr][0]
if secretstr in keyValues and keyValues[secretstr].__len__() > 0:
_secret = keyValues[secretstr][0]
return _key, _secret
def getAllExtensionCommands(self):
cmds = list()
cmds = ['help', '-h', '--help', ]
return cmds
def _getOpenApiKeyValues(self, map):
keys = list(map.keys())
newMap = dict()
for key in keys:
value = map.get(key)
key = key.replace('--', '')
newMap[key] = value
return newMap
def getExtensionKeyValues(self, map):
pass | 0.14436 | 0.083143 |
from django.test import TestCase
from .models import Post, Profile, Neighbourhood
from datetime import datetime
from django.contrib.auth.models import User
class ProfileTest(TestCase):
''' test class for Profile model'''
def setUp(self):
''' method called before each test case'''
self.user = User.objects.create_user(username='vik')
def tearDown(self):
''' method to clear all setup instances after each test run '''
self.user.delete()
def test_profile_creation(self):
''' method to test profile instance is created only once for each user '''
self.assertIsInstance(self.user.profile, Profile)
self.user.save()
self.assertIsInstance(self.user.profile, Profile)
class TestPost(TestCase):
''' test class for image model '''
def setUp(self):
''' method called before each test case'''
self.test_user = User(username='vik', password='<PASSWORD>')
self.test_user.save()
self.test_profile = self.test_user.profile
self.test_profile.save()
self.test_post = Post(image='images/vik.jpg', title='vinstagram',description='Instagram Clone', profile=self.test_profile, live_link='https://vinsta.herokuapp.com/', created_on=datetime.now())
def test_instance(self):
''' test method to ensure post instance creation '''
self.assertTrue(isinstance(self.test_post, Post))
def test_save_and_delete(self):
''' test method to save and delete post instance to db '''
self.test_post.save_post()
self.assertEqual(len(Post.objects.all()), 1)
self.test_post.delete_post()
self.assertEqual(len(Post.objects.all()), 0)
def test_search_project(self):
''' test method to search projects by title '''
self.test_post.save_post()
res = Post.search_project('Vinstagram')
self.assertIsNotNone(res)
def tearDown(self):
''' method to clear all setup instances after each test run '''
self.test_user.delete()
Post.objects.all().delete()
class TestNeighbourhood(TestCase):
''' test class for Neighbourhood model '''
def setUp(self):
''' method called before all tests '''
self.test_user = User(username='vik', password='<PASSWORD>')
self.test_user.save()
self.test_profile = self.test_user.profile
self.test_profile.save()
self.test_post = Post(image='images/vik.jpg', title='Vinstagram',description='Instagram Clone', profile=self.test_profile, live_link='https://vinsta.herokuapp.com/', created_on=datetime.now())
self.test_post.save()
self.test_rate = Neighbourhood(interface=5, experience=6, content=5, user=self.test_profile, post=self.test_post)
def tearDown(self):
''' method called after every test '''
self.test_user.delete()
Post.objects.all().delete()
Neighbourhood.objects.all().delete()
def test_instance(self):
''' method to test instance creation '''
self.assertIsInstance(self.test_rate, Neighbourhood)
def test_save_and_delete_hood(self):
''' test method to save and delete hoods'''
self.test_rate.save_hood()
self.assertEqual(len(Neighbourhood.objects.all()), 1)
self.test_rate.delete_hood()
self.assertEqual(len(Neighbourhood.objects.all()), 0) | hoodapp/tests.py | from django.test import TestCase
from .models import Post, Profile, Neighbourhood
from datetime import datetime
from django.contrib.auth.models import User
class ProfileTest(TestCase):
''' test class for Profile model'''
def setUp(self):
''' method called before each test case'''
self.user = User.objects.create_user(username='vik')
def tearDown(self):
''' method to clear all setup instances after each test run '''
self.user.delete()
def test_profile_creation(self):
''' method to test profile instance is created only once for each user '''
self.assertIsInstance(self.user.profile, Profile)
self.user.save()
self.assertIsInstance(self.user.profile, Profile)
class TestPost(TestCase):
''' test class for image model '''
def setUp(self):
''' method called before each test case'''
self.test_user = User(username='vik', password='<PASSWORD>')
self.test_user.save()
self.test_profile = self.test_user.profile
self.test_profile.save()
self.test_post = Post(image='images/vik.jpg', title='vinstagram',description='Instagram Clone', profile=self.test_profile, live_link='https://vinsta.herokuapp.com/', created_on=datetime.now())
def test_instance(self):
''' test method to ensure post instance creation '''
self.assertTrue(isinstance(self.test_post, Post))
def test_save_and_delete(self):
''' test method to save and delete post instance to db '''
self.test_post.save_post()
self.assertEqual(len(Post.objects.all()), 1)
self.test_post.delete_post()
self.assertEqual(len(Post.objects.all()), 0)
def test_search_project(self):
''' test method to search projects by title '''
self.test_post.save_post()
res = Post.search_project('Vinstagram')
self.assertIsNotNone(res)
def tearDown(self):
''' method to clear all setup instances after each test run '''
self.test_user.delete()
Post.objects.all().delete()
class TestNeighbourhood(TestCase):
''' test class for Neighbourhood model '''
def setUp(self):
''' method called before all tests '''
self.test_user = User(username='vik', password='<PASSWORD>')
self.test_user.save()
self.test_profile = self.test_user.profile
self.test_profile.save()
self.test_post = Post(image='images/vik.jpg', title='Vinstagram',description='Instagram Clone', profile=self.test_profile, live_link='https://vinsta.herokuapp.com/', created_on=datetime.now())
self.test_post.save()
self.test_rate = Neighbourhood(interface=5, experience=6, content=5, user=self.test_profile, post=self.test_post)
def tearDown(self):
''' method called after every test '''
self.test_user.delete()
Post.objects.all().delete()
Neighbourhood.objects.all().delete()
def test_instance(self):
''' method to test instance creation '''
self.assertIsInstance(self.test_rate, Neighbourhood)
def test_save_and_delete_hood(self):
''' test method to save and delete hoods'''
self.test_rate.save_hood()
self.assertEqual(len(Neighbourhood.objects.all()), 1)
self.test_rate.delete_hood()
self.assertEqual(len(Neighbourhood.objects.all()), 0) | 0.486575 | 0.349699 |
import sys
import io
import subprocess
import threading
import time
import uuid
import os.path
import requests
import json
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out
class BrokerActions:
def __init__(self, deployer):
self._action_status = dict()
self._deployer = deployer
self.actor = "BROKER_ACTIONS"
def wait_for_msg_trigger(self, configurations, common_conf, trigger_at):
# iterate over configurations
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
console_out(self.actor, f"Checking message total on node {unique_conf.node_number}")
broker_ip = self.get_broker_ip(unique_conf.technology, unique_conf.node_number, common_conf.run_tag, common_conf.key_pair, True)
msg_total = 0
while(msg_total < trigger_at):
msg_total = self.get_cluster_message_total(broker_ip, common_conf.username, common_conf.password)
console_out(self.actor, f"Trigger at {trigger_at}. Currently {msg_total} messages on node {unique_conf.node_number}")
time.sleep(10)
console_out(self.actor, f"Reached msg trigger on node {unique_conf.node_number}")
def deploy_scripts_to_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"Deploy scripts for configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
return # we don't do this fir EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.deploy_scripts, args=(unique_conf.technology, str(node), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker script deployment failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support broker restarts")
return # we don't do this fir EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(node), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf.key_pair))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def stop_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER SHUTDOWN FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support brokers being stopped")
return # we don't do this fir EKS deployments
restart = threading.Thread(target=self.stop_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker shutdown failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "restart-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Restart of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def stop_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "stop-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Shutdown of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def ensure_no_traffic_control_on_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support traffic control")
return # we don't do this for EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.ensure_no_traffic_control, args=(str(node), unique_conf, common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control removal from node {unique_conf.technology}{node} failed")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def ensure_no_traffic_control(self, node, unique_conf, common_conf):
status_id = f"{unique_conf.technology}{node}"
console_out(self.actor, f"Removing traffic control from node {unique_conf.technology}{node}")
subprocess.call(["bash", "remove-traffic-control.sh",
common_conf.key_pair,
node,
common_conf.run_tag,
unique_conf.technology])
self._action_status[status_id] = "success"
def traffic_control_for_one_broker(self, configurations, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
tc_thread = threading.Thread(target=self.apply_traffic_control, args=(unique_conf.node_number, unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4))
r_threads.append(tc_thread)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def traffic_control_for_all_brokers(self, configurations, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
tc_thread = threading.Thread(target=self.apply_traffic_control, args=(str(node), unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4))
r_threads.append(tc_thread)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def apply_traffic_control(self, node, unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
status_id = unique_conf.technology + node
target_ips = ""
for n in range(unique_conf.cluster_size):
nd = str(int(unique_conf.node_number) + n)
if nd != node:
target_ips += self.get_broker_ip(unique_conf.technology, nd, common_conf.run_tag, common_conf.key_pair, False) + ","
console_out(self.actor, f"Applying traffic control to node {unique_conf.technology}{node}")
target_ips = target_ips[:-1]
exit_code = subprocess.call(["bash", "apply-traffic-control.sh",
common_conf.key_pair,
unique_conf.node_number,
node,
common_conf.run_tag,
unique_conf.technology,
target_ips,
tc_apply_to_clients,
str(tc_delay_ms),
str(tc_delay_jitter_ms),
str(tc_delay_dist),
str(tc_bandwidth_mbit),
tc_loss_mode,
tc_loss_arg1,
tc_loss_arg2,
tc_loss_arg3,
tc_loss_arg4])
if exit_code != 0:
console_out(self.actor, f"Application of traffic control on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def get_broker_ip(self, technology, node, run_tag, key_pair, public_ip):
broker_ip = ""
attempts = 0
while broker_ip == "" and attempts < 3:
attempts += 1
if public_ip:
script = "get_broker_ip.sh"
else:
script = "get_broker_private_ip.sh"
process = subprocess.Popen(["bash", script,
key_pair,
node,
run_tag,
technology], stdout=subprocess.PIPE)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
if not line:
break
if line.startswith("BROKER_IP="):
broker_ip = line.rstrip().replace("BROKER_IP=","")
break
if broker_ip == "":
time.sleep(5)
return broker_ip
def get_cluster_message_total(self, broker_ip, username, password):
res = requests.get(f"http://{broker_ip}:15672/api/overview",
auth=(username,password))
overview_json = res.json()
queue_totals = overview_json["queue_totals"]
if "messages" in queue_totals:
return queue_totals["messages"]
else:
return 0 | orchestration/v1/run/BrokerActions.py | import sys
import io
import subprocess
import threading
import time
import uuid
import os.path
import requests
import json
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out
class BrokerActions:
def __init__(self, deployer):
self._action_status = dict()
self._deployer = deployer
self.actor = "BROKER_ACTIONS"
def wait_for_msg_trigger(self, configurations, common_conf, trigger_at):
# iterate over configurations
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
console_out(self.actor, f"Checking message total on node {unique_conf.node_number}")
broker_ip = self.get_broker_ip(unique_conf.technology, unique_conf.node_number, common_conf.run_tag, common_conf.key_pair, True)
msg_total = 0
while(msg_total < trigger_at):
msg_total = self.get_cluster_message_total(broker_ip, common_conf.username, common_conf.password)
console_out(self.actor, f"Trigger at {trigger_at}. Currently {msg_total} messages on node {unique_conf.node_number}")
time.sleep(10)
console_out(self.actor, f"Reached msg trigger on node {unique_conf.node_number}")
def deploy_scripts_to_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"Deploy scripts for configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
return # we don't do this fir EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.deploy_scripts, args=(unique_conf.technology, str(node), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker script deployment failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support broker restarts")
return # we don't do this fir EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(node), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf.key_pair))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def stop_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER SHUTDOWN FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support brokers being stopped")
return # we don't do this fir EKS deployments
restart = threading.Thread(target=self.stop_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker shutdown failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def restart_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "restart-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Restart of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def stop_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "stop-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Shutdown of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def ensure_no_traffic_control_on_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "EKS/GKE deployments do not currently support traffic control")
return # we don't do this for EKS deployments
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.ensure_no_traffic_control, args=(str(node), unique_conf, common_conf))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control removal from node {unique_conf.technology}{node} failed")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def ensure_no_traffic_control(self, node, unique_conf, common_conf):
status_id = f"{unique_conf.technology}{node}"
console_out(self.actor, f"Removing traffic control from node {unique_conf.technology}{node}")
subprocess.call(["bash", "remove-traffic-control.sh",
common_conf.key_pair,
node,
common_conf.run_tag,
unique_conf.technology])
self._action_status[status_id] = "success"
def traffic_control_for_one_broker(self, configurations, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
tc_thread = threading.Thread(target=self.apply_traffic_control, args=(unique_conf.node_number, unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4))
r_threads.append(tc_thread)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def traffic_control_for_all_brokers(self, configurations, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
r_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
tc_thread = threading.Thread(target=self.apply_traffic_control, args=(str(node), unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4))
r_threads.append(tc_thread)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Traffic control failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf, False)
def apply_traffic_control(self, node, unique_conf, common_conf, tc_apply_to_clients, tc_delay_ms, tc_delay_jitter_ms, tc_delay_dist, tc_bandwidth_mbit, tc_loss_mode, tc_loss_arg1, tc_loss_arg2, tc_loss_arg3, tc_loss_arg4):
status_id = unique_conf.technology + node
target_ips = ""
for n in range(unique_conf.cluster_size):
nd = str(int(unique_conf.node_number) + n)
if nd != node:
target_ips += self.get_broker_ip(unique_conf.technology, nd, common_conf.run_tag, common_conf.key_pair, False) + ","
console_out(self.actor, f"Applying traffic control to node {unique_conf.technology}{node}")
target_ips = target_ips[:-1]
exit_code = subprocess.call(["bash", "apply-traffic-control.sh",
common_conf.key_pair,
unique_conf.node_number,
node,
common_conf.run_tag,
unique_conf.technology,
target_ips,
tc_apply_to_clients,
str(tc_delay_ms),
str(tc_delay_jitter_ms),
str(tc_delay_dist),
str(tc_bandwidth_mbit),
tc_loss_mode,
tc_loss_arg1,
tc_loss_arg2,
tc_loss_arg3,
tc_loss_arg4])
if exit_code != 0:
console_out(self.actor, f"Application of traffic control on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def get_broker_ip(self, technology, node, run_tag, key_pair, public_ip):
broker_ip = ""
attempts = 0
while broker_ip == "" and attempts < 3:
attempts += 1
if public_ip:
script = "get_broker_ip.sh"
else:
script = "get_broker_private_ip.sh"
process = subprocess.Popen(["bash", script,
key_pair,
node,
run_tag,
technology], stdout=subprocess.PIPE)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
if not line:
break
if line.startswith("BROKER_IP="):
broker_ip = line.rstrip().replace("BROKER_IP=","")
break
if broker_ip == "":
time.sleep(5)
return broker_ip
def get_cluster_message_total(self, broker_ip, username, password):
res = requests.get(f"http://{broker_ip}:15672/api/overview",
auth=(username,password))
overview_json = res.json()
queue_totals = overview_json["queue_totals"]
if "messages" in queue_totals:
return queue_totals["messages"]
else:
return 0 | 0.109396 | 0.065935 |
import sqlalchemy
from databases import Database
from fastapi import Depends, FastAPI
from pytest import fixture
from fastapi_pagination import (
LimitOffsetPage,
LimitOffsetPaginationParams,
Page,
PaginationParams,
)
from fastapi_pagination.ext.databases import paginate
from ..base import (
BasePaginationTestCase,
SafeTestClient,
UserOut,
limit_offset_params,
page_params,
)
from ..utils import faker
@fixture(scope="session")
def metadata(database_url):
return sqlalchemy.MetaData()
@fixture(scope="session")
def db(database_url):
return Database(database_url)
@fixture(scope="session")
def User(metadata):
return sqlalchemy.Table(
"users",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, autoincrement=True),
sqlalchemy.Column("name", sqlalchemy.String, nullable=False),
)
@fixture(scope="session")
def app(db, metadata, User):
app = FastAPI()
@app.on_event("startup")
async def on_startup() -> None:
engine = sqlalchemy.create_engine(str(db.url))
metadata.drop_all(engine)
metadata.create_all(engine)
await db.connect()
for _ in range(100):
await db.execute(User.insert(), {"name": faker.name()})
@app.on_event("shutdown")
async def on_shutdown() -> None:
await db.disconnect()
@app.get("/implicit", response_model=Page[UserOut], dependencies=[Depends(page_params)])
async def route():
return await paginate(db, User.select())
@app.get("/explicit", response_model=Page[UserOut])
async def route(params: PaginationParams = Depends()):
return await paginate(db, User.select(), params)
@app.get(
"/implicit-limit-offset",
response_model=LimitOffsetPage[UserOut],
dependencies=[Depends(limit_offset_params)],
)
async def route():
return await paginate(db, User.select())
@app.get("/explicit-limit-offset", response_model=LimitOffsetPage[UserOut])
async def route(params: LimitOffsetPaginationParams = Depends()):
return await paginate(db, User.select(), params)
return app
class TestDatabases(BasePaginationTestCase):
@fixture(scope="session")
async def client(self, app):
with SafeTestClient(app) as c:
yield c
@fixture(scope="session")
async def entities(self, db, User):
return [{**user} async for user in db.iterate(User.select())] | tests/ext/test_databases.py | import sqlalchemy
from databases import Database
from fastapi import Depends, FastAPI
from pytest import fixture
from fastapi_pagination import (
LimitOffsetPage,
LimitOffsetPaginationParams,
Page,
PaginationParams,
)
from fastapi_pagination.ext.databases import paginate
from ..base import (
BasePaginationTestCase,
SafeTestClient,
UserOut,
limit_offset_params,
page_params,
)
from ..utils import faker
@fixture(scope="session")
def metadata(database_url):
return sqlalchemy.MetaData()
@fixture(scope="session")
def db(database_url):
return Database(database_url)
@fixture(scope="session")
def User(metadata):
return sqlalchemy.Table(
"users",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, autoincrement=True),
sqlalchemy.Column("name", sqlalchemy.String, nullable=False),
)
@fixture(scope="session")
def app(db, metadata, User):
app = FastAPI()
@app.on_event("startup")
async def on_startup() -> None:
engine = sqlalchemy.create_engine(str(db.url))
metadata.drop_all(engine)
metadata.create_all(engine)
await db.connect()
for _ in range(100):
await db.execute(User.insert(), {"name": faker.name()})
@app.on_event("shutdown")
async def on_shutdown() -> None:
await db.disconnect()
@app.get("/implicit", response_model=Page[UserOut], dependencies=[Depends(page_params)])
async def route():
return await paginate(db, User.select())
@app.get("/explicit", response_model=Page[UserOut])
async def route(params: PaginationParams = Depends()):
return await paginate(db, User.select(), params)
@app.get(
"/implicit-limit-offset",
response_model=LimitOffsetPage[UserOut],
dependencies=[Depends(limit_offset_params)],
)
async def route():
return await paginate(db, User.select())
@app.get("/explicit-limit-offset", response_model=LimitOffsetPage[UserOut])
async def route(params: LimitOffsetPaginationParams = Depends()):
return await paginate(db, User.select(), params)
return app
class TestDatabases(BasePaginationTestCase):
@fixture(scope="session")
async def client(self, app):
with SafeTestClient(app) as c:
yield c
@fixture(scope="session")
async def entities(self, db, User):
return [{**user} async for user in db.iterate(User.select())] | 0.451568 | 0.110711 |
from django.shortcuts import render
import django_filters
from rest_framework import filters
import json
import logging
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from django.http import HttpResponse
from django.http import HttpRequest
from iec_lookup.services.iec_lookup_service import IECLookupService
from rest_framework import status
from iec_lookup import utils
from iec_lookup.serializers import IECDetailsSerializer
from iec_lookup.custom_exceptions import CustomApiException
@csrf_exempt
def validate_importer_exporter_code(request):
"""
request body contains 10 digit code and name of iec
returns: IEC details of company with all embedded fields
"""
try:
if not request.body:
raise CustomApiException(utils.REQUEST_BODY_NOT_PROVIDED, status.HTTP_400_BAD_REQUEST )
if not 'application/json' in request.META.get('CONTENT_TYPE'):
raise CustomApiException(utils.REQUEST_NON_JSON_FORMAT, status.HTTP_400_BAD_REQUEST )
json_body = json.loads(request.body)
for key, value in json_body.iteritems():
if value is None or value == "":
raise CustomApiException(utils.MISSING_FIELD_VALUE+ key, status.HTTP_400_BAD_REQUEST)
if key == "code" and len(value) != 10:
raise CustomApiException(utils.INVALID_IEC_CODE, status.HTTP_400_BAD_REQUEST)
iec_lookup_service = IECLookupService()
lookup_validate_iec_response = iec_lookup_service.lookup_validate_iec(json_body)
serializer = IECDetailsSerializer(lookup_validate_iec_response)
HttpResponse.status_code = status.HTTP_200_OK
return JsonResponse(serializer.data, safe=False)
except CustomApiException as err:
HttpResponse.status_code = err.status_code
return JsonResponse({'status_code': err.status_code, 'message': err.detail})
except Exception, e:
HttpResponse.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return JsonResponse({'status_code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'message': str(e)})
@csrf_exempt
def retrieve_importer_exporter_code(request):
"""
request comtains 10 digit code of IEC
returns: IEC details of company with all embedded fields
"""
try:
code = request.GET.get('code', '')
if code == "" or code == None or len(code) != 10:
raise CustomApiException(utils.INVALID_IEC_CODE, status.HTTP_400_BAD_REQUEST)
iec_lookup_service = IECLookupService()
iec_data_response = iec_lookup_service.retrieve_iec_data_with_code(code)
serializer = IECDetailsSerializer(iec_data_response)
HttpResponse.status_code = status.HTTP_200_OK
return JsonResponse(serializer.data, safe=False)
except CustomApiException as err:
HttpResponse.status_code = err.status_code
return JsonResponse({'status_code': err.status_code, 'message': err.detail})
except Exception, e:
HttpResponse.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return JsonResponse({'status_code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'message': str(e)}) | iec/iec_lookup/controllers/iec_lookup_controller.py | from django.shortcuts import render
import django_filters
from rest_framework import filters
import json
import logging
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from django.http import HttpResponse
from django.http import HttpRequest
from iec_lookup.services.iec_lookup_service import IECLookupService
from rest_framework import status
from iec_lookup import utils
from iec_lookup.serializers import IECDetailsSerializer
from iec_lookup.custom_exceptions import CustomApiException
@csrf_exempt
def validate_importer_exporter_code(request):
"""
request body contains 10 digit code and name of iec
returns: IEC details of company with all embedded fields
"""
try:
if not request.body:
raise CustomApiException(utils.REQUEST_BODY_NOT_PROVIDED, status.HTTP_400_BAD_REQUEST )
if not 'application/json' in request.META.get('CONTENT_TYPE'):
raise CustomApiException(utils.REQUEST_NON_JSON_FORMAT, status.HTTP_400_BAD_REQUEST )
json_body = json.loads(request.body)
for key, value in json_body.iteritems():
if value is None or value == "":
raise CustomApiException(utils.MISSING_FIELD_VALUE+ key, status.HTTP_400_BAD_REQUEST)
if key == "code" and len(value) != 10:
raise CustomApiException(utils.INVALID_IEC_CODE, status.HTTP_400_BAD_REQUEST)
iec_lookup_service = IECLookupService()
lookup_validate_iec_response = iec_lookup_service.lookup_validate_iec(json_body)
serializer = IECDetailsSerializer(lookup_validate_iec_response)
HttpResponse.status_code = status.HTTP_200_OK
return JsonResponse(serializer.data, safe=False)
except CustomApiException as err:
HttpResponse.status_code = err.status_code
return JsonResponse({'status_code': err.status_code, 'message': err.detail})
except Exception, e:
HttpResponse.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return JsonResponse({'status_code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'message': str(e)})
@csrf_exempt
def retrieve_importer_exporter_code(request):
"""
request comtains 10 digit code of IEC
returns: IEC details of company with all embedded fields
"""
try:
code = request.GET.get('code', '')
if code == "" or code == None or len(code) != 10:
raise CustomApiException(utils.INVALID_IEC_CODE, status.HTTP_400_BAD_REQUEST)
iec_lookup_service = IECLookupService()
iec_data_response = iec_lookup_service.retrieve_iec_data_with_code(code)
serializer = IECDetailsSerializer(iec_data_response)
HttpResponse.status_code = status.HTTP_200_OK
return JsonResponse(serializer.data, safe=False)
except CustomApiException as err:
HttpResponse.status_code = err.status_code
return JsonResponse({'status_code': err.status_code, 'message': err.detail})
except Exception, e:
HttpResponse.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return JsonResponse({'status_code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'message': str(e)}) | 0.316898 | 0.059265 |
import os.path
import subprocess
from textwrap import dedent
import pytest
from pex.common import safe_open
from pex.compatibility import PY2
from pex.testing import run_pex_command
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
@pytest.mark.skipif(PY2, reason="Example code used to drive test is Python 3 only.")
def test_stderr_not_torn_down(tmpdir):
# type: (Any) -> None
exe = os.path.join(str(tmpdir), "exe")
with safe_open(exe, "w") as fp:
fp.write(
dedent(
"""\
import sys
import logging
import atexit
import logging.handlers
import queue
import sys
import faulthandler
import absl.app as absl_app
import absl.logging as absl_logging
from absl.flags import FLAGS
def run():
print("hello")
absl_logging.error("HELP ME")
def init_sys_logging():
root_logger = logging.getLogger()
FLAGS.alsologtostderr = True
# No limit on queue size.
log_queue = queue.Queue(-1)
queue_forwarder = logging.handlers.QueueHandler(log_queue)
root_logger.addHandler(queue_forwarder)
queue_handlers = []
# If absl logging is enabled; re-parent it to the queue.
absl_handler = absl_logging.get_absl_handler()
if absl_handler in root_logger.handlers:
root_logger.handlers.remove(absl_handler)
queue_handlers.append(absl_handler)
queue_log_listener = logging.handlers.QueueListener(
log_queue, *queue_handlers, respect_handler_level=True
)
queue_log_listener.start()
atexit.register(queue_log_listener.stop)
FLAGS.mark_as_parsed()
if __name__ == "__main__":
absl_logging.set_verbosity(0)
absl_logging.use_absl_handler()
absl_logging.get_absl_handler().use_absl_log_file()
faulthandler.enable()
init_sys_logging()
def run_wrapper(fn) -> int:
absl_app._run_main(lambda args: fn(), sys.argv)
return 0
sys.exit(run_wrapper(run))
"""
)
)
pex = os.path.join(str(tmpdir), "pex")
run_pex_command(args=["absl-py==0.10.0", "--exe", exe, "-o", pex]).assert_success()
process = subprocess.Popen(args=[pex], stderr=subprocess.PIPE)
_, stderr = process.communicate()
error = stderr.decode("utf-8")
assert 0 == process.returncode
assert "HELP ME" in error | tests/integration/test_issue_1802.py | import os.path
import subprocess
from textwrap import dedent
import pytest
from pex.common import safe_open
from pex.compatibility import PY2
from pex.testing import run_pex_command
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
@pytest.mark.skipif(PY2, reason="Example code used to drive test is Python 3 only.")
def test_stderr_not_torn_down(tmpdir):
# type: (Any) -> None
exe = os.path.join(str(tmpdir), "exe")
with safe_open(exe, "w") as fp:
fp.write(
dedent(
"""\
import sys
import logging
import atexit
import logging.handlers
import queue
import sys
import faulthandler
import absl.app as absl_app
import absl.logging as absl_logging
from absl.flags import FLAGS
def run():
print("hello")
absl_logging.error("HELP ME")
def init_sys_logging():
root_logger = logging.getLogger()
FLAGS.alsologtostderr = True
# No limit on queue size.
log_queue = queue.Queue(-1)
queue_forwarder = logging.handlers.QueueHandler(log_queue)
root_logger.addHandler(queue_forwarder)
queue_handlers = []
# If absl logging is enabled; re-parent it to the queue.
absl_handler = absl_logging.get_absl_handler()
if absl_handler in root_logger.handlers:
root_logger.handlers.remove(absl_handler)
queue_handlers.append(absl_handler)
queue_log_listener = logging.handlers.QueueListener(
log_queue, *queue_handlers, respect_handler_level=True
)
queue_log_listener.start()
atexit.register(queue_log_listener.stop)
FLAGS.mark_as_parsed()
if __name__ == "__main__":
absl_logging.set_verbosity(0)
absl_logging.use_absl_handler()
absl_logging.get_absl_handler().use_absl_log_file()
faulthandler.enable()
init_sys_logging()
def run_wrapper(fn) -> int:
absl_app._run_main(lambda args: fn(), sys.argv)
return 0
sys.exit(run_wrapper(run))
"""
)
)
pex = os.path.join(str(tmpdir), "pex")
run_pex_command(args=["absl-py==0.10.0", "--exe", exe, "-o", pex]).assert_success()
process = subprocess.Popen(args=[pex], stderr=subprocess.PIPE)
_, stderr = process.communicate()
error = stderr.decode("utf-8")
assert 0 == process.returncode
assert "HELP ME" in error | 0.225929 | 0.092852 |
import numpy as np
import math
state_dim = 2
n_actions = 27
class environment():
def __init__(self):
self.states_dim = 2
self.n_actions = 27
self.possible_actions_orig = [(i, j, k) for i in range(-1,2) for j in range(-1,2) for k in range(-1,2)]
self.mapping_actions = [i for i in range(27)]
self.possible_actions = dict(zip(self.mapping_actions, self.possible_actions_orig))
self.state = None
self.X_faces = None
self.Y_faces = None
self.X_faces_norm = None
self.Y_faces_norm = None
self.n_faces = 0
self.frame_width = 400
self.frame_height = 400
self.pixel_to_mm = 0.2645833333 # 0.2645833333 mm = 1 pixel
self.delta_thetaX = 0.9*1600 # X Stepper Motor Rotation (CW), 0.9 is the stepper motor step angle
self.delta_thetaRo = 0.9*5 # Y Stepper Motor Rotation (CW)
self.delta_thetaPhi = 0.9*7 # Z Stepper Motor Rotation (CW)
self.shaft_diameter = 5 # Stepper Motor Shaft Diameter
self.delta_x = (self.delta_thetaX/360) * math.pi * self.shaft_diameter # sliding DOF ()
self.steps = 0
def reset(self):
n = np.random.randint(1, 11)
self.n_faces = n
self.X_faces = np.random.randint(100, self.frame_width-100, size = n).astype("float64")
self.Y_faces = np.random.randint(100, self.frame_height-100, size = n).astype("float64")
self.X_faces_norm = self.X_faces/self.frame_width
self.Y_faces_norm = self.Y_faces/self.frame_height
self.state = (np.std(abs(self.X_faces_norm - 0.5)), np.std(abs(self.Y_faces_norm - 0.5)))
self.steps = 0
return np.array(self.state), self.X_faces, self.Y_faces
def custom_reset(self, n, faces_X, faces_Y):
self.n_faces = n
self.X_faces = faces_X.astype("float64")
self.Y_faces = faces_Y.astype("float64")
self.X_faces_norm = self.X_faces/self.frame_width
self.Y_faces_norm = self.Y_faces/self.frame_height
self.state = (np.std(abs(self.X_faces_norm - 0.5)), np.std(abs(self.Y_faces_norm - 0.5)))
self.steps = 0
return np.array(self.state), self.X_faces, self.Y_faces
def step(self, action):
new_x = self.X_faces + (- self.delta_x*action[0]*self.pixel_to_mm - (math.sin(self.delta_thetaRo*math.pi/180)*self.frame_width/2*action[1]))
new_y = self.Y_faces + (+(math.sin(self.delta_thetaPhi*math.pi/180)) * self.frame_height/2 * action[2])
new_x_norm = new_x/self.frame_width
new_y_norm = new_y/self.frame_height
new_std_x = np.std(abs(self.X_faces_norm - 0.5))
new_std_y = np.std(abs(self.Y_faces_norm - 0.5))
out_of_frame = False
done = False
if any(x<0.02 for x in new_x_norm) or any(x>0.98 for x in new_x_norm) or any(y<0.02 for y in new_y_norm) or any(y>0.98 for y in new_y_norm):
out_of_frame = True
done = True
else:
if new_std_x < 0.075 and new_std_y < 0.075:
done = True
self.steps += 1
if not done:
reward = 1 + 10*(self.state[0]-new_std_y) + 10*(self.state[1]-new_std_y)
else:
if not out_of_frame:
reward = +100 - self.steps/10
else:
reward = -100 - self.steps/10
self.state = (new_std_x, new_std_y)
self.X_faces = new_x
self.Y_faces = new_y
return np.array(self.state), reward, done, self.X_faces, self.Y_faces
def action_space_Sample(self):
return np.random.choice(self.possible_actions) | environment.py | import numpy as np
import math
state_dim = 2
n_actions = 27
class environment():
def __init__(self):
self.states_dim = 2
self.n_actions = 27
self.possible_actions_orig = [(i, j, k) for i in range(-1,2) for j in range(-1,2) for k in range(-1,2)]
self.mapping_actions = [i for i in range(27)]
self.possible_actions = dict(zip(self.mapping_actions, self.possible_actions_orig))
self.state = None
self.X_faces = None
self.Y_faces = None
self.X_faces_norm = None
self.Y_faces_norm = None
self.n_faces = 0
self.frame_width = 400
self.frame_height = 400
self.pixel_to_mm = 0.2645833333 # 0.2645833333 mm = 1 pixel
self.delta_thetaX = 0.9*1600 # X Stepper Motor Rotation (CW), 0.9 is the stepper motor step angle
self.delta_thetaRo = 0.9*5 # Y Stepper Motor Rotation (CW)
self.delta_thetaPhi = 0.9*7 # Z Stepper Motor Rotation (CW)
self.shaft_diameter = 5 # Stepper Motor Shaft Diameter
self.delta_x = (self.delta_thetaX/360) * math.pi * self.shaft_diameter # sliding DOF ()
self.steps = 0
def reset(self):
n = np.random.randint(1, 11)
self.n_faces = n
self.X_faces = np.random.randint(100, self.frame_width-100, size = n).astype("float64")
self.Y_faces = np.random.randint(100, self.frame_height-100, size = n).astype("float64")
self.X_faces_norm = self.X_faces/self.frame_width
self.Y_faces_norm = self.Y_faces/self.frame_height
self.state = (np.std(abs(self.X_faces_norm - 0.5)), np.std(abs(self.Y_faces_norm - 0.5)))
self.steps = 0
return np.array(self.state), self.X_faces, self.Y_faces
def custom_reset(self, n, faces_X, faces_Y):
self.n_faces = n
self.X_faces = faces_X.astype("float64")
self.Y_faces = faces_Y.astype("float64")
self.X_faces_norm = self.X_faces/self.frame_width
self.Y_faces_norm = self.Y_faces/self.frame_height
self.state = (np.std(abs(self.X_faces_norm - 0.5)), np.std(abs(self.Y_faces_norm - 0.5)))
self.steps = 0
return np.array(self.state), self.X_faces, self.Y_faces
def step(self, action):
new_x = self.X_faces + (- self.delta_x*action[0]*self.pixel_to_mm - (math.sin(self.delta_thetaRo*math.pi/180)*self.frame_width/2*action[1]))
new_y = self.Y_faces + (+(math.sin(self.delta_thetaPhi*math.pi/180)) * self.frame_height/2 * action[2])
new_x_norm = new_x/self.frame_width
new_y_norm = new_y/self.frame_height
new_std_x = np.std(abs(self.X_faces_norm - 0.5))
new_std_y = np.std(abs(self.Y_faces_norm - 0.5))
out_of_frame = False
done = False
if any(x<0.02 for x in new_x_norm) or any(x>0.98 for x in new_x_norm) or any(y<0.02 for y in new_y_norm) or any(y>0.98 for y in new_y_norm):
out_of_frame = True
done = True
else:
if new_std_x < 0.075 and new_std_y < 0.075:
done = True
self.steps += 1
if not done:
reward = 1 + 10*(self.state[0]-new_std_y) + 10*(self.state[1]-new_std_y)
else:
if not out_of_frame:
reward = +100 - self.steps/10
else:
reward = -100 - self.steps/10
self.state = (new_std_x, new_std_y)
self.X_faces = new_x
self.Y_faces = new_y
return np.array(self.state), reward, done, self.X_faces, self.Y_faces
def action_space_Sample(self):
return np.random.choice(self.possible_actions) | 0.387459 | 0.297132 |
#!/usr/bin/env python
import requests
import json
import sys
import os
# Set the environment variable GPT_API_KEY to your API key
GPT_API_KEY = os.environ['GPT_API_KEY']
def get_gpt_guess(question):
prompt = "Q: {}\nA:".format(question)
data = json.dumps({
"prompt": prompt,
"max_tokens": 150,
"temperature": 0.7,
"stop": "Q:"
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(GPT_API_KEY)
}
response = requests.post('https://api.openai.com/v1/engines/davinci-codex/completions', headers=headers, data=data)
return response.json()['choices'][0]['text']
"""
Read the GPT_API_KEY from its environment variable.
Read the question from the first argument.
Use the https://api.openai.com/v1/engines/davinci-codex/completions API to make an initial guess.
URL-encode the question and use python's URL libraries to search Google for the question.
In the case of an error, print out the request URL.
Retrieve the contents of the first result url.
Use https://github.com/buriy/python-readability to extract the main article text
Provide the article text to the OpenAI API, followed by the following prompt:
Q: According to the article above, $question
A:
to get OpenAI to answer the question by summarizing the article.
Print out that answer.
"""
#!/usr/bin/env python
import requests
import json
import sys
import os
import urllib.parse
import readability
from bs4 import BeautifulSoup
# Set the environment variable GPT_API_KEY to your API key
GPT_API_KEY = os.environ['GPT_API_KEY']
def get_gpt_answer(question):
prompt = "Q: According to the article above, {}\nA:".format(question)
data = json.dumps({
"prompt": prompt,
"max_tokens": 150,
"temperature": 0.7,
"stop": "Q:"
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(GPT_API_KEY)
}
response = requests.post('https://api.openai.com/v1/engines/davinci-codex/completions', headers=headers, data=data)
return response.json()['choices'][0]['text']
def get_google_answer(question):
url = "https://www.google.com/search?q={}".format(urllib.parse.quote_plus(question))
response = requests.get(url)
if response.status_code != 200:
print("Error: {}".format(response.status_code))
print("URL: {}".format(url))
return None
doc = readability.Document(response.text)
return doc.summary()
if __name__ == '__main__':
question = sys.argv[1]
print("Initial guess:")
print("Q: {}".format(question))
print("A: {}".format(get_gpt_guess(question)))
print("")
print("Google search:")
answer = get_google_answer(question)
if answer is not None:
print("Q: {}".format(question))
print("A: {}".format(answer))
print("")
print("OpenAI answer:")
print("Q: According to the article above, {}".format(question))
print("A: {}".format(get_gpt_answer(answer)))
else:
print("No Google search results found") | gpt3-ask.py | #!/usr/bin/env python
import requests
import json
import sys
import os
# Set the environment variable GPT_API_KEY to your API key
GPT_API_KEY = os.environ['GPT_API_KEY']
def get_gpt_guess(question):
prompt = "Q: {}\nA:".format(question)
data = json.dumps({
"prompt": prompt,
"max_tokens": 150,
"temperature": 0.7,
"stop": "Q:"
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(GPT_API_KEY)
}
response = requests.post('https://api.openai.com/v1/engines/davinci-codex/completions', headers=headers, data=data)
return response.json()['choices'][0]['text']
"""
Read the GPT_API_KEY from its environment variable.
Read the question from the first argument.
Use the https://api.openai.com/v1/engines/davinci-codex/completions API to make an initial guess.
URL-encode the question and use python's URL libraries to search Google for the question.
In the case of an error, print out the request URL.
Retrieve the contents of the first result url.
Use https://github.com/buriy/python-readability to extract the main article text
Provide the article text to the OpenAI API, followed by the following prompt:
Q: According to the article above, $question
A:
to get OpenAI to answer the question by summarizing the article.
Print out that answer.
"""
#!/usr/bin/env python
import requests
import json
import sys
import os
import urllib.parse
import readability
from bs4 import BeautifulSoup
# Set the environment variable GPT_API_KEY to your API key
GPT_API_KEY = os.environ['GPT_API_KEY']
def get_gpt_answer(question):
prompt = "Q: According to the article above, {}\nA:".format(question)
data = json.dumps({
"prompt": prompt,
"max_tokens": 150,
"temperature": 0.7,
"stop": "Q:"
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(GPT_API_KEY)
}
response = requests.post('https://api.openai.com/v1/engines/davinci-codex/completions', headers=headers, data=data)
return response.json()['choices'][0]['text']
def get_google_answer(question):
url = "https://www.google.com/search?q={}".format(urllib.parse.quote_plus(question))
response = requests.get(url)
if response.status_code != 200:
print("Error: {}".format(response.status_code))
print("URL: {}".format(url))
return None
doc = readability.Document(response.text)
return doc.summary()
if __name__ == '__main__':
question = sys.argv[1]
print("Initial guess:")
print("Q: {}".format(question))
print("A: {}".format(get_gpt_guess(question)))
print("")
print("Google search:")
answer = get_google_answer(question)
if answer is not None:
print("Q: {}".format(question))
print("A: {}".format(answer))
print("")
print("OpenAI answer:")
print("Q: According to the article above, {}".format(question))
print("A: {}".format(get_gpt_answer(answer)))
else:
print("No Google search results found") | 0.211173 | 0.216342 |
import threading
from peewee import *
from playhouse.kv import JSONKeyStore
from playhouse.kv import KeyStore
from playhouse.kv import PickledKeyStore
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_if
class TestKeyStore(PeeweeTestCase):
def setUp(self):
super(TestKeyStore, self).setUp()
self.kv = KeyStore(CharField())
self.ordered_kv = KeyStore(CharField(), ordered=True)
self.pickled_kv = PickledKeyStore(ordered=True)
self.json_kv = JSONKeyStore(ordered=True)
self.kv.clear()
self.json_kv.clear()
def test_json(self):
self.json_kv['foo'] = 'bar'
self.json_kv['baze'] = {'baze': [1, 2, 3]}
self.json_kv['nugget'] = None
self.assertEqual(self.json_kv['foo'], 'bar')
self.assertEqual(self.json_kv['baze'], {'baze': [1, 2, 3]})
self.assertIsNone(self.json_kv['nugget'])
self.assertRaises(KeyError, lambda: self.json_kv['missing'])
results = self.json_kv[self.json_kv.key << ['baze', 'bar', 'nugget']]
self.assertEqual(results, [
{'baze': [1, 2, 3]},
None,
])
def test_storage(self):
self.kv['a'] = 'A'
self.kv['b'] = 1
self.assertEqual(self.kv['a'], 'A')
self.assertEqual(self.kv['b'], '1')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
del(self.kv['a'])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.kv['a'] = 'A'
self.kv['c'] = 'C'
self.assertEqual(self.kv[self.kv.key << ('a', 'c')], ['A', 'C'])
self.kv[self.kv.key << ('a', 'c')] = 'X'
self.assertEqual(self.kv['a'], 'X')
self.assertEqual(self.kv['b'], '1')
self.assertEqual(self.kv['c'], 'X')
key = self.kv.key
results = self.kv[key << ('a', 'b')]
self.assertEqual(results, ['X', '1'])
del(self.kv[self.kv.key << ('a', 'c')])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
self.assertEqual(self.kv['b'], '1')
self.pickled_kv['a'] = 'A'
self.pickled_kv['b'] = 1.1
self.assertEqual(self.pickled_kv['a'], 'A')
self.assertEqual(self.pickled_kv['b'], 1.1)
def test_container_properties(self):
self.kv['x'] = 'X'
self.kv['y'] = 'Y'
self.assertEqual(len(self.kv), 2)
self.assertTrue('x' in self.kv)
self.assertFalse('a' in self.kv)
def test_dict_methods(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
self.assertEqual(list(kv.keys()), ['a', 'b', 'c'])
self.assertEqual(list(kv.values()), ['A', 'B', 'C'])
self.assertEqual(list(kv.items()), [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_iteration(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
items = list(kv)
self.assertEqual(items, [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_shared_mem(self):
self.kv['a'] = 'xxx'
self.assertEqual(self.ordered_kv['a'], 'xxx')
def set_k():
kv_t = KeyStore(CharField())
kv_t['b'] = 'yyy'
t = threading.Thread(target=set_k)
t.start()
t.join()
self.assertEqual(self.kv['b'], 'yyy')
def test_get(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv.get('a'), 'A')
self.assertEqual(self.kv.get('x'), None)
self.assertEqual(self.kv.get('x', 'y'), 'y')
self.assertEqual(
list(self.kv.get(self.kv.key << ('a', 'b'))),
['A', 'B'])
self.assertEqual(
list(self.kv.get(self.kv.key << ('x', 'y'))),
[])
def test_pop(self):
self.ordered_kv['a'] = 'A'
self.ordered_kv['b'] = 'B'
self.ordered_kv['c'] = 'C'
self.assertEqual(self.ordered_kv.pop('a'), 'A')
self.assertEqual(list(self.ordered_kv.keys()), ['b', 'c'])
self.assertRaises(KeyError, self.ordered_kv.pop, 'x')
self.assertEqual(self.ordered_kv.pop('x', 'y'), 'y')
self.assertEqual(
list(self.ordered_kv.pop(self.ordered_kv.key << ['b', 'c'])),
['B', 'C'])
self.assertEqual(list(self.ordered_kv.keys()), [])
try:
import psycopg2
except ImportError:
psycopg2 = None
@skip_if(lambda: psycopg2 is None)
class TestPostgresqlKeyStore(PeeweeTestCase):
def setUp(self):
self.db = PostgresqlDatabase('peewee_test')
self.kv = KeyStore(CharField(), ordered=True, database=self.db)
self.kv.clear()
def tearDown(self):
self.db.close()
def test_non_native_upsert(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv['a'], 'A')
self.kv['a'] = 'C'
self.assertEqual(self.kv['a'], 'C') | playhouse/tests/test_kv.py | import threading
from peewee import *
from playhouse.kv import JSONKeyStore
from playhouse.kv import KeyStore
from playhouse.kv import PickledKeyStore
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_if
class TestKeyStore(PeeweeTestCase):
def setUp(self):
super(TestKeyStore, self).setUp()
self.kv = KeyStore(CharField())
self.ordered_kv = KeyStore(CharField(), ordered=True)
self.pickled_kv = PickledKeyStore(ordered=True)
self.json_kv = JSONKeyStore(ordered=True)
self.kv.clear()
self.json_kv.clear()
def test_json(self):
self.json_kv['foo'] = 'bar'
self.json_kv['baze'] = {'baze': [1, 2, 3]}
self.json_kv['nugget'] = None
self.assertEqual(self.json_kv['foo'], 'bar')
self.assertEqual(self.json_kv['baze'], {'baze': [1, 2, 3]})
self.assertIsNone(self.json_kv['nugget'])
self.assertRaises(KeyError, lambda: self.json_kv['missing'])
results = self.json_kv[self.json_kv.key << ['baze', 'bar', 'nugget']]
self.assertEqual(results, [
{'baze': [1, 2, 3]},
None,
])
def test_storage(self):
self.kv['a'] = 'A'
self.kv['b'] = 1
self.assertEqual(self.kv['a'], 'A')
self.assertEqual(self.kv['b'], '1')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
del(self.kv['a'])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.kv['a'] = 'A'
self.kv['c'] = 'C'
self.assertEqual(self.kv[self.kv.key << ('a', 'c')], ['A', 'C'])
self.kv[self.kv.key << ('a', 'c')] = 'X'
self.assertEqual(self.kv['a'], 'X')
self.assertEqual(self.kv['b'], '1')
self.assertEqual(self.kv['c'], 'X')
key = self.kv.key
results = self.kv[key << ('a', 'b')]
self.assertEqual(results, ['X', '1'])
del(self.kv[self.kv.key << ('a', 'c')])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
self.assertEqual(self.kv['b'], '1')
self.pickled_kv['a'] = 'A'
self.pickled_kv['b'] = 1.1
self.assertEqual(self.pickled_kv['a'], 'A')
self.assertEqual(self.pickled_kv['b'], 1.1)
def test_container_properties(self):
self.kv['x'] = 'X'
self.kv['y'] = 'Y'
self.assertEqual(len(self.kv), 2)
self.assertTrue('x' in self.kv)
self.assertFalse('a' in self.kv)
def test_dict_methods(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
self.assertEqual(list(kv.keys()), ['a', 'b', 'c'])
self.assertEqual(list(kv.values()), ['A', 'B', 'C'])
self.assertEqual(list(kv.items()), [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_iteration(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
items = list(kv)
self.assertEqual(items, [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_shared_mem(self):
self.kv['a'] = 'xxx'
self.assertEqual(self.ordered_kv['a'], 'xxx')
def set_k():
kv_t = KeyStore(CharField())
kv_t['b'] = 'yyy'
t = threading.Thread(target=set_k)
t.start()
t.join()
self.assertEqual(self.kv['b'], 'yyy')
def test_get(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv.get('a'), 'A')
self.assertEqual(self.kv.get('x'), None)
self.assertEqual(self.kv.get('x', 'y'), 'y')
self.assertEqual(
list(self.kv.get(self.kv.key << ('a', 'b'))),
['A', 'B'])
self.assertEqual(
list(self.kv.get(self.kv.key << ('x', 'y'))),
[])
def test_pop(self):
self.ordered_kv['a'] = 'A'
self.ordered_kv['b'] = 'B'
self.ordered_kv['c'] = 'C'
self.assertEqual(self.ordered_kv.pop('a'), 'A')
self.assertEqual(list(self.ordered_kv.keys()), ['b', 'c'])
self.assertRaises(KeyError, self.ordered_kv.pop, 'x')
self.assertEqual(self.ordered_kv.pop('x', 'y'), 'y')
self.assertEqual(
list(self.ordered_kv.pop(self.ordered_kv.key << ['b', 'c'])),
['B', 'C'])
self.assertEqual(list(self.ordered_kv.keys()), [])
try:
import psycopg2
except ImportError:
psycopg2 = None
@skip_if(lambda: psycopg2 is None)
class TestPostgresqlKeyStore(PeeweeTestCase):
def setUp(self):
self.db = PostgresqlDatabase('peewee_test')
self.kv = KeyStore(CharField(), ordered=True, database=self.db)
self.kv.clear()
def tearDown(self):
self.db.close()
def test_non_native_upsert(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv['a'], 'A')
self.kv['a'] = 'C'
self.assertEqual(self.kv['a'], 'C') | 0.497803 | 0.458288 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
import abc
import contextlib
import functools
import six
import tensorflow as tf
import numpy as np
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from typeguard import typechecked
from typing import Union, Callable, List, Dict
# TODO: Remove once https://github.com/tensorflow/tensorflow/issues/44613 is resolved
if tf.__version__[:3] > "2.5":
from keras.engine import keras_tensor
else:
from tensorflow.python.keras.engine import keras_tensor
Number = Union[
float,
int,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
Initializer = Union[None, dict, str, Callable,
tf.keras.initializers.Initializer]
Regularizer = Union[None, dict, str, Callable,
tf.keras.regularizers.Regularizer]
Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint]
Activation = Union[None, str, Callable]
Optimizer = Union[tf.keras.optimizers.Optimizer, str]
TensorLike = Union[
List[Union[Number, list]],
tuple,
Number,
np.ndarray,
tf.Tensor,
tf.SparseTensor,
tf.Variable,
keras_tensor.KerasTensor,
]
FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64]
AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None]
def _solve(a, b, c):
"""Return solution of a quadratic minimization.
The optimization equation is:
f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}
we get optimal solution w*:
w* = -(b - sign(b)*c)/a if |b| > c else w* = 0
REQUIRES: Dimensionality of a and b must be same
Args:
a: A Tensor
b: A Tensor
c: A Tensor with one element.
Returns:
A Tensor w, which is solution for the equation
"""
w = (c * tf.sign(b) - b) / a
w = tf.cast(tf.abs(b) > c, dtype=b.dtype) * w
return w
<EMAIL>.register_keras_serializable(package="Addons")
class Yogi(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Yogi algorithm in Keras.
See Algorithm 2 of
https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf.
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.01,
beta1: FloatTensorLike = 0.9,
beta2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-3,
l1_regularization_strength: FloatTensorLike = 0.0,
l2_regularization_strength: FloatTensorLike = 0.0,
initial_accumulator_value: FloatTensorLike = 1e-6,
activation: str = "sign",
name: str = "Yogi",
**kwargs,
):
"""Construct a new Yogi optimizer.
Args:
learning_rate: A Tensor or a floating point value.
The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A constant trading off adaptivity and noise.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
activation: Use hard sign or soft tanh to determin sign.
name: Optional name for the operations created when applying
gradients. Defaults to "Yogi".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
`lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`
is clip gradients by value, `decay` is included for backward
compatibility to allow time inverse decay of learning rate. `lr`
is included for backward compatibility, recommended to use
`learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta1)
self._set_hyper("beta_2", beta2)
self._set_hyper("epsilon", epsilon)
self._set_hyper("l1_regularization_strength",
l1_regularization_strength)
self._set_hyper("l2_regularization_strength",
l2_regularization_strength)
self._beta1 = beta1
self._activation = activation
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
def _create_slots(self, var_list):
"""See `tf.train.Optimizer._create_slots()`."""
# Create slots for the first and second moments, and maximum second moments.
for var in var_list:
init = tf.constant_initializer(self._initial_accumulator_value)
self.add_slot(var, "v", init)
if self._beta1 > 0.0:
self.add_slot(var, "m")
def _resource_apply_dense(self, grad, var):
"""See `tf.train.Optimizer._apply_dense()`."""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(v_t)
else:
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_t = m.assign(
m * beta1_t + grad * (1 - beta1_t), use_locking=self._use_locking
)
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * m_t
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(m_t)
update_vs.append(v_t)
# Create an op that groups all the above operations
return tf.group(*update_vs)
def _resource_apply_sparse(self, grad, var, indices):
"""Applies sparse gradients to a variable.
Args:
grad: A tensor for the `values` of `tf.IndexedSlices`.
var: A `tf.Variable` object.
indices: A tensor for the `indices` of `tf.IndexedSlices`.
Returns:
An op which updates `var` with `grad` and `indices`.
"""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
v_slice = tf.gather(v, indices)
if self._activation == "sign":
sign = tf.sign(grad2 - v_slice)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v_slice))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_scaled_g_values = v_slice + (1 - beta2_t) * sign * grad2
v_t = self._resource_scatter_update(v, indices, v_scaled_g_values)
v_sqrt = tf.sqrt(v_scaled_g_values)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
var_slice = tf.gather(var, indices)
new_var = var_slice - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = self._resource_scatter_update(var, indices, new_var)
update_vs.append(var_update)
update_vs.append(v_t)
else:
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = m.assign(m * beta1_t, use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_slice = tf.gather(m, indices) + m_scaled_g_values
m_t = self._resource_scatter_update(m, indices, m_slice)
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
v_slice = tf.gather(v, indices)
if self._activation == "sign":
sign = tf.sign(grad2 - tf.gather(v, indices))
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - tf.gather(v, indices)))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_scaled_g_values = v_slice + (1 - beta2_t) * sign * grad2
v_t = self._resource_scatter_update(v, indices, v_scaled_g_values)
v_sqrt = tf.sqrt(v_scaled_g_values)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
var_slice = tf.gather(var, indices)
new_var = var_slice - per_coord_lr * m_slice
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = self._resource_scatter_update(var, indices, new_var)
update_vs.append(var_update)
update_vs.append(m_t)
update_vs.append(v_t)
# Create an op that groups all the above operations
return tf.group(*update_vs)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._serialize_hyperparameter("decay"),
"beta1": self._serialize_hyperparameter("beta_1"),
"beta2": self._serialize_hyperparameter("beta_2"),
"epsilon": self._serialize_hyperparameter("epsilon"),
"l1_regularization_strength": self._serialize_hyperparameter(
"l1_regularization_strength"
),
"l2_regularization_strength": self._serialize_hyperparameter(
"l2_regularization_strength"
),
"activation": self._activation,
"initial_accumulator_value": self._initial_accumulator_value,
}
)
return config
<EMAIL>(package="Addons")
class RectifiedAdam2(tf.keras.optimizers.Optimizer):
"""Variant of the Adam optimizer whose adaptive learning rate is rectified
so as to have a consistent variance.
It implements the Rectified Adam (a.k.a. RAdam) proposed by
<NAME> et al. in [On The Variance Of The Adaptive Learning Rate
And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
Example of usage:
```python
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
```
Note: `amsgrad` is not described in the original paper. Use it with
caution.
RAdam is not a placement of the heuristic warmup, the settings should be
kept if warmup has already been employed and tuned in the baseline method.
You can enable warmup by setting `total_steps` and `warmup_proportion`:
```python
opt = tfa.optimizers.RectifiedAdam(
lr=1e-3,
total_steps=10000,
warmup_proportion=0.1,
min_lr=1e-5,
)
```
In the above example, the learning rate will increase linearly
from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`
in 9000 steps.
Lookahead, proposed by <NAME> et.al in the paper
[Lookahead Optimizer: k steps forward, 1 step back]
(https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,
which is announced by <NAME> and the new combined optimizer can also
be called "Ranger". The mechanism can be enabled by using the lookahead
wrapper. For example:
```python
radam = tfa.optimizers.RectifiedAdam()
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
```
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable, Dict] = 0.001,
beta_1: FloatTensorLike = 0.9,
beta_2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-7,
weight_decay: Union[FloatTensorLike, Callable, Dict] = 0.0,
amsgrad: bool = False,
sma_threshold: FloatTensorLike = 5.0,
total_steps: int = 0,
warmup_proportion: FloatTensorLike = 0.1,
min_lr: FloatTensorLike = 0.0,
name: str = "RectifiedAdam",
**kwargs,
):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A `Tensor` or a floating point value, or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A `Tensor` or a floating point value, or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.
Weight decay for each parameter.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer value. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name, **kwargs)
if isinstance(learning_rate, Dict):
learning_rate = tf.keras.optimizers.schedules.deserialize(
learning_rate)
if isinstance(weight_decay, Dict):
weight_decay = tf.keras.optimizers.schedules.deserialize(
weight_decay)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self._set_hyper("decay", self._initial_decay)
self._set_hyper("weight_decay", weight_decay)
self._set_hyper("sma_threshold", sma_threshold)
self._set_hyper("total_steps", float(total_steps))
self._set_hyper("warmup_proportion", warmup_proportion)
self._set_hyper("min_lr", min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._has_weight_decay = weight_decay != 0.0
self._initial_total_steps = total_steps
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
if self.amsgrad:
for var in var_list:
self.add_slot(var, "vhat")
def set_weights(self, weights):
params = self.weights
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[: len(params)]
super().set_weights(weights)
def _decayed_wd(self, var_dtype):
wd_t = self._get_hyper("weight_decay", var_dtype)
if isinstance(wd_t, tf.keras.optimizers.schedules.LearningRateSchedule):
wd_t = tf.cast(wd_t(self.iterations), var_dtype)
return wd_t
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
lr_t = self._decayed_lr(var_dtype)
wd_t = self._decayed_wd(var_dtype)
beta_1_t = self._get_hyper("beta_1", var_dtype)
beta_2_t = self._get_hyper("beta_2", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
one_minus_beta_1_t = 1.0 - beta_1_t
recip_one_minus_beta_1_power = 1.0 / (1.0 - beta_1_power)
one_minus_beta_2_t = 1.0 - beta_2_t
recip_one_minus_beta_2_power = 1.0 / (1.0 - beta_2_power)
sma_inf = 2.0 / one_minus_beta_2_t - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power * recip_one_minus_beta_2_power
r_t = tf.sqrt(
(sma_t - 4.0)
/ (sma_inf - 4.0)
* (sma_t - 2.0)
/ (sma_inf - 2.0)
* sma_inf
/ sma_t
)
sma_threshold = self._get_hyper("sma_threshold", var_dtype)
sma_t_ge_sma_threshold = sma_t >= sma_threshold
if self._initial_total_steps > 0:
total_steps = self._get_hyper("total_steps", var_dtype)
warmup_steps = total_steps * \
self._get_hyper("warmup_proportion", var_dtype)
min_lr = self._get_hyper("min_lr", var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate *
tf.minimum(local_step - warmup_steps, decay_steps),
)
apply_state[(var_device, var_dtype)].update(
dict(
lr_t=lr_t,
wd_t=wd_t,
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
epsilon_t=tf.convert_to_tensor(self.epsilon, var_dtype),
local_step=local_step,
beta_1_power=beta_1_power,
beta_2_power=beta_2_power,
sma_inf=sma_inf,
sma_t=sma_t,
one_minus_beta_1_t=one_minus_beta_1_t,
recip_one_minus_beta_1_power=recip_one_minus_beta_1_power,
one_minus_beta_2_t=one_minus_beta_2_t,
recip_one_minus_beta_2_power=recip_one_minus_beta_2_power,
r_t=r_t,
sma_t_ge_sma_threshold=sma_t_ge_sma_threshold,
)
)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coef = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
m_t = m.assign(
coef["beta_1_t"] * m + coef["one_minus_beta_1_t"] * grad,
use_locking=self._use_locking,
)
m_corr_t = m_t * coef["recip_one_minus_beta_1_power"]
v_t = v.assign(
coef["beta_2_t"] * v +
coef["one_minus_beta_2_t"] * tf.square(grad),
use_locking=self._use_locking,
)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat_t = vhat.assign(tf.maximum(vhat, v_t),
use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t * coef["recip_one_minus_beta_2_power"])
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t * coef["recip_one_minus_beta_2_power"])
var_t = tf.where(
coef["sma_t_ge_sma_threshold"],
coef["r_t"] * m_corr_t / (v_corr_t + coef["epsilon_t"]),
m_corr_t,
)
if self._has_weight_decay:
var_t += coef["wd_t"] * var
var_update = var.assign_sub(
coef["lr_t"] * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coef = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coef["one_minus_beta_1_t"]
m_t = m.assign(m * coef["beta_1_t"], use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t * coef["recip_one_minus_beta_1_power"]
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coef["one_minus_beta_2_t"]
v_t = v.assign(v * coef["beta_2_t"], use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat_t = vhat.assign(tf.maximum(vhat, v_t),
use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t * coef["recip_one_minus_beta_2_power"])
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t * coef["recip_one_minus_beta_2_power"])
var_t = tf.where(
coef["sma_t_ge_sma_threshold"],
coef["r_t"] * m_corr_t / (v_corr_t + coef["epsilon_t"]),
m_corr_t,
)
if self._has_weight_decay:
var_t += coef["wd_t"] * var
with tf.control_dependencies([var_t]):
var_update = self._resource_scatter_add(
var, indices, tf.gather(-coef["lr_t"] * var_t, indices)
)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"decay": self._serialize_hyperparameter("decay"),
"weight_decay": self._serialize_hyperparameter("weight_decay"),
"sma_threshold": self._serialize_hyperparameter("sma_threshold"),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
"total_steps": int(self._serialize_hyperparameter("total_steps")),
"warmup_proportion": self._serialize_hyperparameter(
"warmup_proportion"
),
"min_lr": self._serialize_hyperparameter("min_lr"),
}
)
@tf_export(v1=["train.AdaBeliefOptimizer"])
class AdaBeliefOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="AdaBelief", amsgrad=False):
r"""Construct a new Adam optimizer.
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 2 of the paper:
$$t := t + 1$$
$$\text{lr}_t := \mathrm{learning_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
$$\text{variable} := \text{variable} -
\text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
"""
super(AdaBeliefOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self.amsgrad = amsgrad
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "vhat", self._name)
def _prepare(self):
lr = self._lr
beta1 = self._beta1
beta2 = self._beta2
epsilon = self._epsilon
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
graph = None if context.executing_eagerly() else ops.get_default_graph()
beta1_power = math_ops.cast(self._get_non_slot_variable(
"beta1_power", graph=graph), var.dtype.base_dtype)
beta2_power = math_ops.cast(self._get_non_slot_variable(
"beta2_power", graph=graph), var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
step_size = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(
m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(
v, beta2_t * v + v_scaled_g_values + epsilon_t, use_locking=self._use_locking)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
# Compute the bounds
step_size = step_size / (v_sqrt + epsilon_t)
bounded_lr = m_t * step_size
var_update = state_ops.assign_sub(
var, bounded_lr, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _resource_apply_dense(self, grad, var):
graph = None if context.executing_eagerly() else ops.get_default_graph()
beta1_power = math_ops.cast(self._get_non_slot_variable(
"beta1_power", graph=graph), grad.dtype.base_dtype)
beta2_power = math_ops.cast(self._get_non_slot_variable(
"beta2_power", graph=graph), grad.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype)
step_size = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(
m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(
v, beta2_t * v + v_scaled_g_values + epsilon_t, use_locking=self._use_locking)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
# Compute the bounds
step_size = step_size / (v_sqrt + epsilon_t)
bounded_lr = m_t * step_size
var_update = state_ops.assign_sub(
var, bounded_lr, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values + epsilon_t)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(
*update_ops + [update_beta1, update_beta2], name=name_scope)
class AdaHessian(optimizer_v2.OptimizerV2):
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-4,
weight_decay=0.,
hessian_power=1.0,
name='AdaHessian',
average_size_1d=None,
average_size_2d=None,
average_size_3d=-1,
average_size_4d=-1,
**kwargs):
"""Construct a new AdaHessian optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
takes no arguments and returns the actual value to use, The learning
rate. Defaults to 0.1.
beta_1: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use. The exponential decay
rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use, The exponential decay
rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
weight_decay: We are using AdamW's weight decay scheme. Defaults to 0.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
hessian_power: Hessian power to control the optimizer more similar to first/second
order method (default: 1). You can also try 0.5. For some tasks we found this
to result in better performance.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
# average_size_{1,2,3,4}d:
None: use no spatial averaging
-1: use suggested spatial averaging (recommended for conv kernels)
>= 1: use customized size
"""
super(AdaHessian, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.weight_decay = weight_decay
self.hessian_power = hessian_power
self.average_size_1d = average_size_1d
self.average_size_2d = average_size_2d
self.average_size_3d = average_size_3d
self.average_size_4d = average_size_4d
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdaHessian, self)._prepare_local(
var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (
apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(AdaHessian, self).set_weights(weights)
def get_gradients_hessian(self, loss, params):
"""Returns gradients and Hessian of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient and Hessian tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name +
"/gradients"):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
# WARNING: for now we do not support gradient clip
# grads = self._clip_gradients(grads)
v = [np.random.uniform(0, 1, size=p.shape) for p in params]
for vi in v:
vi[vi < 0.5] = -1
vi[vi >= 0.5] = 1
v = [tf.convert_to_tensor(vi, dtype=tf.dtypes.float32) for vi in v]
vprod = tf.reduce_sum([tf.reduce_sum(vi * grad)
for vi, grad in zip(v, grads)])
Hv = gradients.gradients(vprod, params)
Hd = [tf.abs(Hvi * vi) for Hvi, vi in zip(Hv, v)]
return grads, Hd
def _filter_grads_hessian(self, grads_hessian_and_vars):
"""Filter out iterable with grad equal to None."""
grads_hessian_and_vars = tuple(grads_hessian_and_vars)
if not grads_hessian_and_vars:
return grads_hessian_and_vars
filtered = []
vars_with_empty_grads = []
for grad, hessian, var in grads_hessian_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, hessian, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def apply_gradients_hessian(self,
grads_hessian_and_vars,
name=None,
experimental_aggregate_gradients=True):
grads_hessian_and_vars = self._filter_grads_hessian(
grads_hessian_and_vars)
var_list = [v for (_, _, v) in grads_hessian_and_vars]
with backend.name_scope(self._name):
# Create iteration if necessary.
with ops.init_scope():
self._create_all_weights(var_list)
if not grads_hessian_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
if distribute_ctx.in_cross_replica_context():
raise RuntimeError(
"`apply_gradients() cannot be called in cross-replica context. "
"Use `tf.distribute.Strategy.run` to enter replica "
"context.")
strategy = distribute_ctx.get_strategy()
if (not experimental_aggregate_gradients and strategy and isinstance(
strategy.extended,
parameter_server_strategy.ParameterServerStrategyExtended)):
raise NotImplementedError(
"`experimental_aggregate_gradients=False is not supported for "
"ParameterServerStrategy and CentralStorageStrategy")
apply_state = self._prepare(var_list)
if experimental_aggregate_gradients:
reduced_grads, reduced_hessian = self._aggregate_gradients_hessian(
grads_hessian_and_vars)
var_list = [v for _, _, v in grads_hessian_and_vars]
grads_hessian_and_vars = list(
zip(reduced_grads, reduced_hessian, var_list))
return distribute_ctx.get_replica_context().merge_call(
functools.partial(self._distributed_apply,
apply_state=apply_state),
args=(grads_hessian_and_vars,),
kwargs={
"name": name,
})
def _aggregate_gradients_hessian(self, grads_hessian_and_vars):
"""Returns all-reduced gradients.
Args:
grads_and_vars: List of (gradient, hessian, variable) pairs.
Returns:
Two lists of all-reduced gradients and Hessian.
"""
grads_hessian_and_vars = list(grads_hessian_and_vars)
filtered_grads_hessian_and_vars = self._filter_grads_hessian(
grads_hessian_and_vars)
# split the list so that we can use the all_recude_fn
filtered_grads_and_vars = tuple(
[(g, v) for (g, h, v) in filtered_grads_hessian_and_vars])
filtered_hessian_and_vars = tuple(
[(h, v) for (g, h, v) in filtered_grads_hessian_and_vars])
def all_reduce_fn(distribution, grads_hessian_and_vars):
# WARNING: this ReduceOp.SUM can only support two entries, for now we have three.
# So far now, we do it for two steps to make life easier.
return distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_hessian_and_vars)
if filtered_grads_hessian_and_vars:
reduced_part1 = distribute_ctx.get_replica_context().merge_call(
all_reduce_fn, args=(filtered_grads_and_vars,))
reduced_part2 = distribute_ctx.get_replica_context().merge_call(
all_reduce_fn, args=(filtered_hessian_and_vars,))
else:
reduced = []
# Copy 'reduced' but add None gradients back in
reduced_with_nones_grads = []
reduced_with_nones_hessian = []
reduced_pos = 0
for g, h, _ in grads_hessian_and_vars:
if g is None:
reduced_with_nones_grads.append(None)
reduced_with_nones_hessian.append(None)
else:
reduced_with_nones_grads.append(reduced_part1[reduced_pos])
reduced_with_nones_hessian.append(reduced_part2[reduced_pos])
reduced_pos += 1
return reduced_with_nones_grads, reduced_with_nones_hessian
@def_function.function(experimental_compile=True)
def _resource_apply_dense(self, grad, hess, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
m.assign_add((grad - m) * (1 - coefficients['beta_1_t']))
# this part need to be changed for spatial averaging
if len(v.shape) == 1:
resize = self.average_size_1d
elif len(v.shape) == 2:
resize = self.average_size_2d
elif len(v.shape) == 3:
resize = self.average_size_3d
elif len(v.shape) == 4:
resize = self.average_size_4d
else:
raise Exception(
'You need to define the spatial average size by yourself!')
if resize == None:
v.assign_add((math_ops.square(hess) - v) *
(1 - coefficients['beta_2_t']))
elif resize == -1:
if len(v.shape) == 1:
v.assign_add((math_ops.square(hess) - v) *
(1 - coefficients['beta_2_t']))
elif len(v.shape) == 2:
hess_average = tf.reduce_mean(hess, [0], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
elif len(v.shape) == 3:
hess_average = tf.reduce_mean(hess, [0], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
elif len(v.shape) == 4:
hess_average = tf.reduce_mean(hess, [0, 1], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
else:
if resize <= 0:
raise Exception(
'You need to define the spatial average size >= 1!')
hess_average = tf.reshape(hess, [resize, -1])
hess_average = tf.reduce_mean(hess_average, [0])
hess_average = tf.repeat(hess_average, resize)
hess_average = tf.reshape(hess_average, v.shape)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
bias_correct1 = 1 - coefficients['beta_1_power']
bias_correct2 = 1 - coefficients['beta_2_power']
if self.weight_decay != 0:
var.assign_sub(coefficients['lr_t'] * self.weight_decay * var)
# denom = np.power(math_ops.sqrt(v / bias_correct2), self.hessian_power) + coefficients['epsilon']
denom = tf.math.pow(math_ops.sqrt(v / bias_correct2),
self.hessian_power) + coefficients['epsilon']
var.assign_sub(coefficients['lr_t'] * m / bias_correct1 / denom)
@def_function.function(experimental_compile=True)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
raise Exception('For now, we do not support sparse update yet.')
def get_config(self):
config = super(AdaHessian, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'weight_decay': self.weight_decay
})
return config
def _distributed_apply(self, distribution, grads_hessian_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad, hess):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(
grad, hess, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
eagerly_outside_functions = ops.executing_eagerly_outside_functions()
update_ops = []
with ops.name_scope(name or self._name, skip_on_eager=True):
for grad, hess, var in grads_hessian_and_vars:
def _assume_mirrored(grad, hess):
if isinstance(grad, ds_values.PerReplica):
return ds_values.Mirrored(grad.values), ds_values.Mirrored(hess.values)
return grad, hess
grad, hess = nest.map_structure(_assume_mirrored, grad, hess)
# Colocate the update with variables to avoid unnecessary communication
# delays. See b/136304694.
with distribution.extended.colocate_vars_with(var):
with ops.name_scope("update" if eagerly_outside_functions else
"update_" + var.op.name, skip_on_eager=True):
update_ops.extend(distribution.extended.update(
var, apply_grad_to_update_var, args=(grad, hess), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1, read_value=False)
return self._iterations.assign_add(1) | swiss_army_keras/optimizers.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
import abc
import contextlib
import functools
import six
import tensorflow as tf
import numpy as np
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from typeguard import typechecked
from typing import Union, Callable, List, Dict
# TODO: Remove once https://github.com/tensorflow/tensorflow/issues/44613 is resolved
if tf.__version__[:3] > "2.5":
from keras.engine import keras_tensor
else:
from tensorflow.python.keras.engine import keras_tensor
Number = Union[
float,
int,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
Initializer = Union[None, dict, str, Callable,
tf.keras.initializers.Initializer]
Regularizer = Union[None, dict, str, Callable,
tf.keras.regularizers.Regularizer]
Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint]
Activation = Union[None, str, Callable]
Optimizer = Union[tf.keras.optimizers.Optimizer, str]
TensorLike = Union[
List[Union[Number, list]],
tuple,
Number,
np.ndarray,
tf.Tensor,
tf.SparseTensor,
tf.Variable,
keras_tensor.KerasTensor,
]
FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64]
AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None]
def _solve(a, b, c):
"""Return solution of a quadratic minimization.
The optimization equation is:
f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}
we get optimal solution w*:
w* = -(b - sign(b)*c)/a if |b| > c else w* = 0
REQUIRES: Dimensionality of a and b must be same
Args:
a: A Tensor
b: A Tensor
c: A Tensor with one element.
Returns:
A Tensor w, which is solution for the equation
"""
w = (c * tf.sign(b) - b) / a
w = tf.cast(tf.abs(b) > c, dtype=b.dtype) * w
return w
<EMAIL>.register_keras_serializable(package="Addons")
class Yogi(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Yogi algorithm in Keras.
See Algorithm 2 of
https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf.
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.01,
beta1: FloatTensorLike = 0.9,
beta2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-3,
l1_regularization_strength: FloatTensorLike = 0.0,
l2_regularization_strength: FloatTensorLike = 0.0,
initial_accumulator_value: FloatTensorLike = 1e-6,
activation: str = "sign",
name: str = "Yogi",
**kwargs,
):
"""Construct a new Yogi optimizer.
Args:
learning_rate: A Tensor or a floating point value.
The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A constant trading off adaptivity and noise.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
activation: Use hard sign or soft tanh to determin sign.
name: Optional name for the operations created when applying
gradients. Defaults to "Yogi".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
`lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`
is clip gradients by value, `decay` is included for backward
compatibility to allow time inverse decay of learning rate. `lr`
is included for backward compatibility, recommended to use
`learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta1)
self._set_hyper("beta_2", beta2)
self._set_hyper("epsilon", epsilon)
self._set_hyper("l1_regularization_strength",
l1_regularization_strength)
self._set_hyper("l2_regularization_strength",
l2_regularization_strength)
self._beta1 = beta1
self._activation = activation
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
def _create_slots(self, var_list):
"""See `tf.train.Optimizer._create_slots()`."""
# Create slots for the first and second moments, and maximum second moments.
for var in var_list:
init = tf.constant_initializer(self._initial_accumulator_value)
self.add_slot(var, "v", init)
if self._beta1 > 0.0:
self.add_slot(var, "m")
def _resource_apply_dense(self, grad, var):
"""See `tf.train.Optimizer._apply_dense()`."""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(v_t)
else:
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_t = m.assign(
m * beta1_t + grad * (1 - beta1_t), use_locking=self._use_locking
)
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * m_t
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(m_t)
update_vs.append(v_t)
# Create an op that groups all the above operations
return tf.group(*update_vs)
def _resource_apply_sparse(self, grad, var, indices):
"""Applies sparse gradients to a variable.
Args:
grad: A tensor for the `values` of `tf.IndexedSlices`.
var: A `tf.Variable` object.
indices: A tensor for the `indices` of `tf.IndexedSlices`.
Returns:
An op which updates `var` with `grad` and `indices`.
"""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
v_slice = tf.gather(v, indices)
if self._activation == "sign":
sign = tf.sign(grad2 - v_slice)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v_slice))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_scaled_g_values = v_slice + (1 - beta2_t) * sign * grad2
v_t = self._resource_scatter_update(v, indices, v_scaled_g_values)
v_sqrt = tf.sqrt(v_scaled_g_values)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
var_slice = tf.gather(var, indices)
new_var = var_slice - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = self._resource_scatter_update(var, indices, new_var)
update_vs.append(var_update)
update_vs.append(v_t)
else:
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = m.assign(m * beta1_t, use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_slice = tf.gather(m, indices) + m_scaled_g_values
m_t = self._resource_scatter_update(m, indices, m_slice)
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
v_slice = tf.gather(v, indices)
if self._activation == "sign":
sign = tf.sign(grad2 - tf.gather(v, indices))
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - tf.gather(v, indices)))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_scaled_g_values = v_slice + (1 - beta2_t) * sign * grad2
v_t = self._resource_scatter_update(v, indices, v_scaled_g_values)
v_sqrt = tf.sqrt(v_scaled_g_values)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
var_slice = tf.gather(var, indices)
new_var = var_slice - per_coord_lr * m_slice
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = self._resource_scatter_update(var, indices, new_var)
update_vs.append(var_update)
update_vs.append(m_t)
update_vs.append(v_t)
# Create an op that groups all the above operations
return tf.group(*update_vs)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._serialize_hyperparameter("decay"),
"beta1": self._serialize_hyperparameter("beta_1"),
"beta2": self._serialize_hyperparameter("beta_2"),
"epsilon": self._serialize_hyperparameter("epsilon"),
"l1_regularization_strength": self._serialize_hyperparameter(
"l1_regularization_strength"
),
"l2_regularization_strength": self._serialize_hyperparameter(
"l2_regularization_strength"
),
"activation": self._activation,
"initial_accumulator_value": self._initial_accumulator_value,
}
)
return config
<EMAIL>(package="Addons")
class RectifiedAdam2(tf.keras.optimizers.Optimizer):
"""Variant of the Adam optimizer whose adaptive learning rate is rectified
so as to have a consistent variance.
It implements the Rectified Adam (a.k.a. RAdam) proposed by
<NAME> et al. in [On The Variance Of The Adaptive Learning Rate
And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
Example of usage:
```python
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
```
Note: `amsgrad` is not described in the original paper. Use it with
caution.
RAdam is not a placement of the heuristic warmup, the settings should be
kept if warmup has already been employed and tuned in the baseline method.
You can enable warmup by setting `total_steps` and `warmup_proportion`:
```python
opt = tfa.optimizers.RectifiedAdam(
lr=1e-3,
total_steps=10000,
warmup_proportion=0.1,
min_lr=1e-5,
)
```
In the above example, the learning rate will increase linearly
from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`
in 9000 steps.
Lookahead, proposed by <NAME> et.al in the paper
[Lookahead Optimizer: k steps forward, 1 step back]
(https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,
which is announced by <NAME> and the new combined optimizer can also
be called "Ranger". The mechanism can be enabled by using the lookahead
wrapper. For example:
```python
radam = tfa.optimizers.RectifiedAdam()
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
```
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable, Dict] = 0.001,
beta_1: FloatTensorLike = 0.9,
beta_2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-7,
weight_decay: Union[FloatTensorLike, Callable, Dict] = 0.0,
amsgrad: bool = False,
sma_threshold: FloatTensorLike = 5.0,
total_steps: int = 0,
warmup_proportion: FloatTensorLike = 0.1,
min_lr: FloatTensorLike = 0.0,
name: str = "RectifiedAdam",
**kwargs,
):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A `Tensor` or a floating point value, or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A `Tensor` or a floating point value, or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.
Weight decay for each parameter.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer value. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name, **kwargs)
if isinstance(learning_rate, Dict):
learning_rate = tf.keras.optimizers.schedules.deserialize(
learning_rate)
if isinstance(weight_decay, Dict):
weight_decay = tf.keras.optimizers.schedules.deserialize(
weight_decay)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self._set_hyper("decay", self._initial_decay)
self._set_hyper("weight_decay", weight_decay)
self._set_hyper("sma_threshold", sma_threshold)
self._set_hyper("total_steps", float(total_steps))
self._set_hyper("warmup_proportion", warmup_proportion)
self._set_hyper("min_lr", min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._has_weight_decay = weight_decay != 0.0
self._initial_total_steps = total_steps
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, "m")
for var in var_list:
self.add_slot(var, "v")
if self.amsgrad:
for var in var_list:
self.add_slot(var, "vhat")
def set_weights(self, weights):
params = self.weights
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[: len(params)]
super().set_weights(weights)
def _decayed_wd(self, var_dtype):
wd_t = self._get_hyper("weight_decay", var_dtype)
if isinstance(wd_t, tf.keras.optimizers.schedules.LearningRateSchedule):
wd_t = tf.cast(wd_t(self.iterations), var_dtype)
return wd_t
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
lr_t = self._decayed_lr(var_dtype)
wd_t = self._decayed_wd(var_dtype)
beta_1_t = self._get_hyper("beta_1", var_dtype)
beta_2_t = self._get_hyper("beta_2", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
one_minus_beta_1_t = 1.0 - beta_1_t
recip_one_minus_beta_1_power = 1.0 / (1.0 - beta_1_power)
one_minus_beta_2_t = 1.0 - beta_2_t
recip_one_minus_beta_2_power = 1.0 / (1.0 - beta_2_power)
sma_inf = 2.0 / one_minus_beta_2_t - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power * recip_one_minus_beta_2_power
r_t = tf.sqrt(
(sma_t - 4.0)
/ (sma_inf - 4.0)
* (sma_t - 2.0)
/ (sma_inf - 2.0)
* sma_inf
/ sma_t
)
sma_threshold = self._get_hyper("sma_threshold", var_dtype)
sma_t_ge_sma_threshold = sma_t >= sma_threshold
if self._initial_total_steps > 0:
total_steps = self._get_hyper("total_steps", var_dtype)
warmup_steps = total_steps * \
self._get_hyper("warmup_proportion", var_dtype)
min_lr = self._get_hyper("min_lr", var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate *
tf.minimum(local_step - warmup_steps, decay_steps),
)
apply_state[(var_device, var_dtype)].update(
dict(
lr_t=lr_t,
wd_t=wd_t,
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
epsilon_t=tf.convert_to_tensor(self.epsilon, var_dtype),
local_step=local_step,
beta_1_power=beta_1_power,
beta_2_power=beta_2_power,
sma_inf=sma_inf,
sma_t=sma_t,
one_minus_beta_1_t=one_minus_beta_1_t,
recip_one_minus_beta_1_power=recip_one_minus_beta_1_power,
one_minus_beta_2_t=one_minus_beta_2_t,
recip_one_minus_beta_2_power=recip_one_minus_beta_2_power,
r_t=r_t,
sma_t_ge_sma_threshold=sma_t_ge_sma_threshold,
)
)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coef = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
m_t = m.assign(
coef["beta_1_t"] * m + coef["one_minus_beta_1_t"] * grad,
use_locking=self._use_locking,
)
m_corr_t = m_t * coef["recip_one_minus_beta_1_power"]
v_t = v.assign(
coef["beta_2_t"] * v +
coef["one_minus_beta_2_t"] * tf.square(grad),
use_locking=self._use_locking,
)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat_t = vhat.assign(tf.maximum(vhat, v_t),
use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t * coef["recip_one_minus_beta_2_power"])
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t * coef["recip_one_minus_beta_2_power"])
var_t = tf.where(
coef["sma_t_ge_sma_threshold"],
coef["r_t"] * m_corr_t / (v_corr_t + coef["epsilon_t"]),
m_corr_t,
)
if self._has_weight_decay:
var_t += coef["wd_t"] * var
var_update = var.assign_sub(
coef["lr_t"] * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coef = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
m_scaled_g_values = grad * coef["one_minus_beta_1_t"]
m_t = m.assign(m * coef["beta_1_t"], use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t * coef["recip_one_minus_beta_1_power"]
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * coef["one_minus_beta_2_t"]
v_t = v.assign(v * coef["beta_2_t"], use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if self.amsgrad:
vhat = self.get_slot(var, "vhat")
vhat_t = vhat.assign(tf.maximum(vhat, v_t),
use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t * coef["recip_one_minus_beta_2_power"])
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t * coef["recip_one_minus_beta_2_power"])
var_t = tf.where(
coef["sma_t_ge_sma_threshold"],
coef["r_t"] * m_corr_t / (v_corr_t + coef["epsilon_t"]),
m_corr_t,
)
if self._has_weight_decay:
var_t += coef["wd_t"] * var
with tf.control_dependencies([var_t]):
var_update = self._resource_scatter_add(
var, indices, tf.gather(-coef["lr_t"] * var_t, indices)
)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"decay": self._serialize_hyperparameter("decay"),
"weight_decay": self._serialize_hyperparameter("weight_decay"),
"sma_threshold": self._serialize_hyperparameter("sma_threshold"),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
"total_steps": int(self._serialize_hyperparameter("total_steps")),
"warmup_proportion": self._serialize_hyperparameter(
"warmup_proportion"
),
"min_lr": self._serialize_hyperparameter("min_lr"),
}
)
@tf_export(v1=["train.AdaBeliefOptimizer"])
class AdaBeliefOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="AdaBelief", amsgrad=False):
r"""Construct a new Adam optimizer.
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 2 of the paper:
$$t := t + 1$$
$$\text{lr}_t := \mathrm{learning_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
$$\text{variable} := \text{variable} -
\text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
"""
super(AdaBeliefOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self.amsgrad = amsgrad
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "vhat", self._name)
def _prepare(self):
lr = self._lr
beta1 = self._beta1
beta2 = self._beta2
epsilon = self._epsilon
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
graph = None if context.executing_eagerly() else ops.get_default_graph()
beta1_power = math_ops.cast(self._get_non_slot_variable(
"beta1_power", graph=graph), var.dtype.base_dtype)
beta2_power = math_ops.cast(self._get_non_slot_variable(
"beta2_power", graph=graph), var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
step_size = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(
m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(
v, beta2_t * v + v_scaled_g_values + epsilon_t, use_locking=self._use_locking)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
# Compute the bounds
step_size = step_size / (v_sqrt + epsilon_t)
bounded_lr = m_t * step_size
var_update = state_ops.assign_sub(
var, bounded_lr, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _resource_apply_dense(self, grad, var):
graph = None if context.executing_eagerly() else ops.get_default_graph()
beta1_power = math_ops.cast(self._get_non_slot_variable(
"beta1_power", graph=graph), grad.dtype.base_dtype)
beta2_power = math_ops.cast(self._get_non_slot_variable(
"beta2_power", graph=graph), grad.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype)
step_size = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(
m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(
v, beta2_t * v + v_scaled_g_values + epsilon_t, use_locking=self._use_locking)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
# Compute the bounds
step_size = step_size / (v_sqrt + epsilon_t)
bounded_lr = m_t * step_size
var_update = state_ops.assign_sub(
var, bounded_lr, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad - m_t) * (grad - m_t) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values + epsilon_t)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self.amsgrad:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(
*update_ops + [update_beta1, update_beta2], name=name_scope)
class AdaHessian(optimizer_v2.OptimizerV2):
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-4,
weight_decay=0.,
hessian_power=1.0,
name='AdaHessian',
average_size_1d=None,
average_size_2d=None,
average_size_3d=-1,
average_size_4d=-1,
**kwargs):
"""Construct a new AdaHessian optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
takes no arguments and returns the actual value to use, The learning
rate. Defaults to 0.1.
beta_1: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use. The exponential decay
rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use, The exponential decay
rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
weight_decay: We are using AdamW's weight decay scheme. Defaults to 0.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
hessian_power: Hessian power to control the optimizer more similar to first/second
order method (default: 1). You can also try 0.5. For some tasks we found this
to result in better performance.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
# average_size_{1,2,3,4}d:
None: use no spatial averaging
-1: use suggested spatial averaging (recommended for conv kernels)
>= 1: use customized size
"""
super(AdaHessian, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.weight_decay = weight_decay
self.hessian_power = hessian_power
self.average_size_1d = average_size_1d
self.average_size_2d = average_size_2d
self.average_size_3d = average_size_3d
self.average_size_4d = average_size_4d
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdaHessian, self)._prepare_local(
var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (
apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(AdaHessian, self).set_weights(weights)
def get_gradients_hessian(self, loss, params):
"""Returns gradients and Hessian of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient and Hessian tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default(), backend.name_scope(self._name +
"/gradients"):
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
# WARNING: for now we do not support gradient clip
# grads = self._clip_gradients(grads)
v = [np.random.uniform(0, 1, size=p.shape) for p in params]
for vi in v:
vi[vi < 0.5] = -1
vi[vi >= 0.5] = 1
v = [tf.convert_to_tensor(vi, dtype=tf.dtypes.float32) for vi in v]
vprod = tf.reduce_sum([tf.reduce_sum(vi * grad)
for vi, grad in zip(v, grads)])
Hv = gradients.gradients(vprod, params)
Hd = [tf.abs(Hvi * vi) for Hvi, vi in zip(Hv, v)]
return grads, Hd
def _filter_grads_hessian(self, grads_hessian_and_vars):
"""Filter out iterable with grad equal to None."""
grads_hessian_and_vars = tuple(grads_hessian_and_vars)
if not grads_hessian_and_vars:
return grads_hessian_and_vars
filtered = []
vars_with_empty_grads = []
for grad, hessian, var in grads_hessian_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, hessian, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def apply_gradients_hessian(self,
grads_hessian_and_vars,
name=None,
experimental_aggregate_gradients=True):
grads_hessian_and_vars = self._filter_grads_hessian(
grads_hessian_and_vars)
var_list = [v for (_, _, v) in grads_hessian_and_vars]
with backend.name_scope(self._name):
# Create iteration if necessary.
with ops.init_scope():
self._create_all_weights(var_list)
if not grads_hessian_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
if distribute_ctx.in_cross_replica_context():
raise RuntimeError(
"`apply_gradients() cannot be called in cross-replica context. "
"Use `tf.distribute.Strategy.run` to enter replica "
"context.")
strategy = distribute_ctx.get_strategy()
if (not experimental_aggregate_gradients and strategy and isinstance(
strategy.extended,
parameter_server_strategy.ParameterServerStrategyExtended)):
raise NotImplementedError(
"`experimental_aggregate_gradients=False is not supported for "
"ParameterServerStrategy and CentralStorageStrategy")
apply_state = self._prepare(var_list)
if experimental_aggregate_gradients:
reduced_grads, reduced_hessian = self._aggregate_gradients_hessian(
grads_hessian_and_vars)
var_list = [v for _, _, v in grads_hessian_and_vars]
grads_hessian_and_vars = list(
zip(reduced_grads, reduced_hessian, var_list))
return distribute_ctx.get_replica_context().merge_call(
functools.partial(self._distributed_apply,
apply_state=apply_state),
args=(grads_hessian_and_vars,),
kwargs={
"name": name,
})
def _aggregate_gradients_hessian(self, grads_hessian_and_vars):
"""Returns all-reduced gradients.
Args:
grads_and_vars: List of (gradient, hessian, variable) pairs.
Returns:
Two lists of all-reduced gradients and Hessian.
"""
grads_hessian_and_vars = list(grads_hessian_and_vars)
filtered_grads_hessian_and_vars = self._filter_grads_hessian(
grads_hessian_and_vars)
# split the list so that we can use the all_recude_fn
filtered_grads_and_vars = tuple(
[(g, v) for (g, h, v) in filtered_grads_hessian_and_vars])
filtered_hessian_and_vars = tuple(
[(h, v) for (g, h, v) in filtered_grads_hessian_and_vars])
def all_reduce_fn(distribution, grads_hessian_and_vars):
# WARNING: this ReduceOp.SUM can only support two entries, for now we have three.
# So far now, we do it for two steps to make life easier.
return distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_hessian_and_vars)
if filtered_grads_hessian_and_vars:
reduced_part1 = distribute_ctx.get_replica_context().merge_call(
all_reduce_fn, args=(filtered_grads_and_vars,))
reduced_part2 = distribute_ctx.get_replica_context().merge_call(
all_reduce_fn, args=(filtered_hessian_and_vars,))
else:
reduced = []
# Copy 'reduced' but add None gradients back in
reduced_with_nones_grads = []
reduced_with_nones_hessian = []
reduced_pos = 0
for g, h, _ in grads_hessian_and_vars:
if g is None:
reduced_with_nones_grads.append(None)
reduced_with_nones_hessian.append(None)
else:
reduced_with_nones_grads.append(reduced_part1[reduced_pos])
reduced_with_nones_hessian.append(reduced_part2[reduced_pos])
reduced_pos += 1
return reduced_with_nones_grads, reduced_with_nones_hessian
@def_function.function(experimental_compile=True)
def _resource_apply_dense(self, grad, hess, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
m.assign_add((grad - m) * (1 - coefficients['beta_1_t']))
# this part need to be changed for spatial averaging
if len(v.shape) == 1:
resize = self.average_size_1d
elif len(v.shape) == 2:
resize = self.average_size_2d
elif len(v.shape) == 3:
resize = self.average_size_3d
elif len(v.shape) == 4:
resize = self.average_size_4d
else:
raise Exception(
'You need to define the spatial average size by yourself!')
if resize == None:
v.assign_add((math_ops.square(hess) - v) *
(1 - coefficients['beta_2_t']))
elif resize == -1:
if len(v.shape) == 1:
v.assign_add((math_ops.square(hess) - v) *
(1 - coefficients['beta_2_t']))
elif len(v.shape) == 2:
hess_average = tf.reduce_mean(hess, [0], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
elif len(v.shape) == 3:
hess_average = tf.reduce_mean(hess, [0], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
elif len(v.shape) == 4:
hess_average = tf.reduce_mean(hess, [0, 1], keepdims=True)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
else:
if resize <= 0:
raise Exception(
'You need to define the spatial average size >= 1!')
hess_average = tf.reshape(hess, [resize, -1])
hess_average = tf.reduce_mean(hess_average, [0])
hess_average = tf.repeat(hess_average, resize)
hess_average = tf.reshape(hess_average, v.shape)
v.assign_add((math_ops.square(hess_average) - v)
* (1 - coefficients['beta_2_t']))
bias_correct1 = 1 - coefficients['beta_1_power']
bias_correct2 = 1 - coefficients['beta_2_power']
if self.weight_decay != 0:
var.assign_sub(coefficients['lr_t'] * self.weight_decay * var)
# denom = np.power(math_ops.sqrt(v / bias_correct2), self.hessian_power) + coefficients['epsilon']
denom = tf.math.pow(math_ops.sqrt(v / bias_correct2),
self.hessian_power) + coefficients['epsilon']
var.assign_sub(coefficients['lr_t'] * m / bias_correct1 / denom)
@def_function.function(experimental_compile=True)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
raise Exception('For now, we do not support sparse update yet.')
def get_config(self):
config = super(AdaHessian, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'weight_decay': self.weight_decay
})
return config
def _distributed_apply(self, distribution, grads_hessian_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad, hess):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(
grad, hess, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
eagerly_outside_functions = ops.executing_eagerly_outside_functions()
update_ops = []
with ops.name_scope(name or self._name, skip_on_eager=True):
for grad, hess, var in grads_hessian_and_vars:
def _assume_mirrored(grad, hess):
if isinstance(grad, ds_values.PerReplica):
return ds_values.Mirrored(grad.values), ds_values.Mirrored(hess.values)
return grad, hess
grad, hess = nest.map_structure(_assume_mirrored, grad, hess)
# Colocate the update with variables to avoid unnecessary communication
# delays. See b/136304694.
with distribution.extended.colocate_vars_with(var):
with ops.name_scope("update" if eagerly_outside_functions else
"update_" + var.op.name, skip_on_eager=True):
update_ops.extend(distribution.extended.update(
var, apply_grad_to_update_var, args=(grad, hess), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1, read_value=False)
return self._iterations.assign_add(1) | 0.880707 | 0.243406 |
import os
import json
import logging
import numpy
from osgeo import gdal
from ground_surveyor import gsconfig
def pick_best_pile_layer(pile_md_filename,
selection_options):
pile_md = json.load(open(pile_md_filename))
best_i = -1
best_value = 0
target_field = selection_options.get('order_field',
'normalized_sharpness')
cc_threshold = selection_options.get('cross_correlation_threshold',
None)
small_cc_threshold = selection_options.get(
'small_cross_correlation_threshold',
None)
zero_threshold = selection_options.get('zero_threshold',
None)
for i in range(len(pile_md['sharpness'])):
if cc_threshold is not None:
cc_raw = pile_md['cross_correlation_raw'][i]
if cc_raw < cc_threshold:
continue
if small_cc_threshold is not None:
small_cc = pile_md['cross_correlation_small'][i]
if small_cc < small_cc_threshold:
continue
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
# If nothing met the threshold, try again without the threshold.
if best_i == -1 and cc_threshold is not None:
for i in range(len(pile_md['sharpness'])):
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
logging.debug('Picked input metatile %d for pile %s with %s value of %s.',
best_i,
os.path.basename(pile_md_filename)[:15],
target_field, best_value)
return best_i
def get_pile_layer(pile_md_filename, i_file):
raw_filename = pile_md_filename.replace('_datacube_metadata.json',
'_raw.tif')
raw_ds = gdal.Open(raw_filename)
return raw_ds.GetRasterBand(i_file+1).ReadAsArray()
def merge_pile_into_mosaic(mosaic_ds,
pile_md_filename,
selected_i,
selected_img,
processing_options):
pile_parts = os.path.basename(pile_md_filename).split('_')[0:3]
assert pile_parts[0] == 'uf'
uf_i = int(pile_parts[1])
uf_j = int(pile_parts[2])
if processing_options.get('normalize_intensity',False):
pile_md = json.load(open(pile_md_filename))
selected_img = selected_img * 1000.0 \
/ pile_md['intensity_median'][selected_i]
mosaic_ds.GetRasterBand(1).WriteArray(
selected_img, uf_i * 256, uf_j * 256)
alpha_band = mosaic_ds.GetRasterBand(mosaic_ds.RasterCount)
if alpha_band.GetColorInterpretation() == gdal.GCI_AlphaBand:
if alpha_band.DataType == gdal.GDT_UInt16:
opaque = 65535
else:
opaque = 255
alpha_band.WriteArray(
numpy.ones(selected_img.shape) * opaque,
uf_i * 256, uf_j * 256)
def make_metatile(pile_directory):
mosaic_filename = os.path.join(pile_directory,'mosaic.tif')
mosaic_ds = gdal.GetDriverByName('GTiff').Create(
mosaic_filename, 4096, 4096, 1, gdal.GDT_UInt16)
# TODO: Try to add georeferencing...
return mosaic_filename, mosaic_ds
def mosaic_metatile(pile_directory,
selection_options,
processing_options={}):
mosaic_filename, mosaic_ds = make_metatile(pile_directory)
counter = 0
for filename in os.listdir(pile_directory):
if (not filename.startswith('uf_')) or (not filename.endswith('_metadata.json')):
continue
pile_md_filename = os.path.join(pile_directory, filename)
i_file = pick_best_pile_layer(pile_md_filename, selection_options)
if i_file >= 0:
selected_img = get_pile_layer(pile_md_filename, i_file)
merge_pile_into_mosaic(mosaic_ds, pile_md_filename,
i_file, selected_img,
processing_options)
counter += 1
logging.info('%d piles contributed to making %s.',
counter, mosaic_filename)
return mosaic_filename | ground_surveyor/uf_mosaic.py | import os
import json
import logging
import numpy
from osgeo import gdal
from ground_surveyor import gsconfig
def pick_best_pile_layer(pile_md_filename,
selection_options):
pile_md = json.load(open(pile_md_filename))
best_i = -1
best_value = 0
target_field = selection_options.get('order_field',
'normalized_sharpness')
cc_threshold = selection_options.get('cross_correlation_threshold',
None)
small_cc_threshold = selection_options.get(
'small_cross_correlation_threshold',
None)
zero_threshold = selection_options.get('zero_threshold',
None)
for i in range(len(pile_md['sharpness'])):
if cc_threshold is not None:
cc_raw = pile_md['cross_correlation_raw'][i]
if cc_raw < cc_threshold:
continue
if small_cc_threshold is not None:
small_cc = pile_md['cross_correlation_small'][i]
if small_cc < small_cc_threshold:
continue
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
# If nothing met the threshold, try again without the threshold.
if best_i == -1 and cc_threshold is not None:
for i in range(len(pile_md['sharpness'])):
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
logging.debug('Picked input metatile %d for pile %s with %s value of %s.',
best_i,
os.path.basename(pile_md_filename)[:15],
target_field, best_value)
return best_i
def get_pile_layer(pile_md_filename, i_file):
raw_filename = pile_md_filename.replace('_datacube_metadata.json',
'_raw.tif')
raw_ds = gdal.Open(raw_filename)
return raw_ds.GetRasterBand(i_file+1).ReadAsArray()
def merge_pile_into_mosaic(mosaic_ds,
pile_md_filename,
selected_i,
selected_img,
processing_options):
pile_parts = os.path.basename(pile_md_filename).split('_')[0:3]
assert pile_parts[0] == 'uf'
uf_i = int(pile_parts[1])
uf_j = int(pile_parts[2])
if processing_options.get('normalize_intensity',False):
pile_md = json.load(open(pile_md_filename))
selected_img = selected_img * 1000.0 \
/ pile_md['intensity_median'][selected_i]
mosaic_ds.GetRasterBand(1).WriteArray(
selected_img, uf_i * 256, uf_j * 256)
alpha_band = mosaic_ds.GetRasterBand(mosaic_ds.RasterCount)
if alpha_band.GetColorInterpretation() == gdal.GCI_AlphaBand:
if alpha_band.DataType == gdal.GDT_UInt16:
opaque = 65535
else:
opaque = 255
alpha_band.WriteArray(
numpy.ones(selected_img.shape) * opaque,
uf_i * 256, uf_j * 256)
def make_metatile(pile_directory):
mosaic_filename = os.path.join(pile_directory,'mosaic.tif')
mosaic_ds = gdal.GetDriverByName('GTiff').Create(
mosaic_filename, 4096, 4096, 1, gdal.GDT_UInt16)
# TODO: Try to add georeferencing...
return mosaic_filename, mosaic_ds
def mosaic_metatile(pile_directory,
selection_options,
processing_options={}):
mosaic_filename, mosaic_ds = make_metatile(pile_directory)
counter = 0
for filename in os.listdir(pile_directory):
if (not filename.startswith('uf_')) or (not filename.endswith('_metadata.json')):
continue
pile_md_filename = os.path.join(pile_directory, filename)
i_file = pick_best_pile_layer(pile_md_filename, selection_options)
if i_file >= 0:
selected_img = get_pile_layer(pile_md_filename, i_file)
merge_pile_into_mosaic(mosaic_ds, pile_md_filename,
i_file, selected_img,
processing_options)
counter += 1
logging.info('%d piles contributed to making %s.',
counter, mosaic_filename)
return mosaic_filename | 0.178669 | 0.251182 |
import select
import socket
from typing import Union
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.com_if.com_interface_base import CommunicationInterface
from tmtccmd.tm.definitions import TelemetryListT
from tmtccmd.utility.tmtc_printer import TmTcPrinter
from tmtccmd.config.definitions import EthernetAddressT, CoreModeList
LOGGER = get_console_logger()
UDP_RECV_WIRETAPPING_ENABLED = False
UDP_SEND_WIRETAPPING_ENABLED = False
# pylint: disable=abstract-method
# pylint: disable=arguments-differ
# pylint: disable=too-many-arguments
class TcpIpUdpComIF(CommunicationInterface):
"""Communication interface for UDP communication."""
def __init__(
self,
com_if_key: str,
tm_timeout: float,
tc_timeout_factor: float,
send_address: EthernetAddressT,
max_recv_size: int,
recv_addr: Union[None, EthernetAddressT] = None,
tmtc_printer: Union[None, TmTcPrinter] = None,
init_mode: int = CoreModeList.LISTENER_MODE,
):
"""Initialize a communication interface to send and receive UDP datagrams.
:param tm_timeout:
:param tc_timeout_factor:
:param send_address:
:param max_recv_size:
:param recv_addr:
:param tmtc_printer: Printer instance, can be passed optionally to allow packet debugging
"""
super().__init__(com_if_key=com_if_key, tmtc_printer=tmtc_printer)
self.tm_timeout = tm_timeout
self.tc_timeout_factor = tc_timeout_factor
self.udp_socket = None
self.send_address = send_address
self.recv_addr = recv_addr
self.max_recv_size = max_recv_size
self.init_mode = init_mode
def __del__(self):
try:
self.close()
except IOError:
LOGGER.warning("Could not close UDP communication interface!")
def initialize(self, args: any = None) -> any:
pass
def open(self, args: any = None):
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind is possible but should not be necessary, and introduces risk of port alread
# being used.
# See: https://docs.microsoft.com/en-us/windows/win32/api/winsock/nf-winsock-bind
if self.recv_addr is not None:
LOGGER.info(
f"Binding UDP socket to {self.recv_addr[0]} and port {self.recv_addr[1]}"
)
self.udp_socket.bind(self.recv_addr)
# Set non-blocking because we use select.
self.udp_socket.setblocking(False)
if self.init_mode == CoreModeList.LISTENER_MODE:
from tmtccmd.pus.service_17_test import pack_service17_ping_command
# Send ping command immediately so the reception address is known for UDP
ping_cmd = pack_service17_ping_command(ssc=0)
self.send(ping_cmd.pack())
def close(self, args: any = None) -> None:
if self.udp_socket is not None:
self.udp_socket.close()
def send(self, data: bytearray):
if self.udp_socket is None:
return
bytes_sent = self.udp_socket.sendto(data, self.send_address)
if bytes_sent != len(data):
LOGGER.warning("Not all bytes were sent!")
def data_available(self, timeout: float = 0, parameters: any = 0) -> bool:
if self.udp_socket is None:
return False
ready = select.select([self.udp_socket], [], [], timeout)
if ready[0]:
return True
return False
def receive(self, poll_timeout: float = 0) -> TelemetryListT:
if self.udp_socket is None:
return []
try:
ready = self.data_available(poll_timeout)
if ready:
data, sender_addr = self.udp_socket.recvfrom(self.max_recv_size)
packet_list = [bytearray(data)]
return packet_list
return []
except ConnectionResetError:
LOGGER.warning("Connection reset exception occured!")
return [] | src/tmtccmd/com_if/tcpip_udp_com_if.py | import select
import socket
from typing import Union
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.com_if.com_interface_base import CommunicationInterface
from tmtccmd.tm.definitions import TelemetryListT
from tmtccmd.utility.tmtc_printer import TmTcPrinter
from tmtccmd.config.definitions import EthernetAddressT, CoreModeList
LOGGER = get_console_logger()
UDP_RECV_WIRETAPPING_ENABLED = False
UDP_SEND_WIRETAPPING_ENABLED = False
# pylint: disable=abstract-method
# pylint: disable=arguments-differ
# pylint: disable=too-many-arguments
class TcpIpUdpComIF(CommunicationInterface):
"""Communication interface for UDP communication."""
def __init__(
self,
com_if_key: str,
tm_timeout: float,
tc_timeout_factor: float,
send_address: EthernetAddressT,
max_recv_size: int,
recv_addr: Union[None, EthernetAddressT] = None,
tmtc_printer: Union[None, TmTcPrinter] = None,
init_mode: int = CoreModeList.LISTENER_MODE,
):
"""Initialize a communication interface to send and receive UDP datagrams.
:param tm_timeout:
:param tc_timeout_factor:
:param send_address:
:param max_recv_size:
:param recv_addr:
:param tmtc_printer: Printer instance, can be passed optionally to allow packet debugging
"""
super().__init__(com_if_key=com_if_key, tmtc_printer=tmtc_printer)
self.tm_timeout = tm_timeout
self.tc_timeout_factor = tc_timeout_factor
self.udp_socket = None
self.send_address = send_address
self.recv_addr = recv_addr
self.max_recv_size = max_recv_size
self.init_mode = init_mode
def __del__(self):
try:
self.close()
except IOError:
LOGGER.warning("Could not close UDP communication interface!")
def initialize(self, args: any = None) -> any:
pass
def open(self, args: any = None):
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind is possible but should not be necessary, and introduces risk of port alread
# being used.
# See: https://docs.microsoft.com/en-us/windows/win32/api/winsock/nf-winsock-bind
if self.recv_addr is not None:
LOGGER.info(
f"Binding UDP socket to {self.recv_addr[0]} and port {self.recv_addr[1]}"
)
self.udp_socket.bind(self.recv_addr)
# Set non-blocking because we use select.
self.udp_socket.setblocking(False)
if self.init_mode == CoreModeList.LISTENER_MODE:
from tmtccmd.pus.service_17_test import pack_service17_ping_command
# Send ping command immediately so the reception address is known for UDP
ping_cmd = pack_service17_ping_command(ssc=0)
self.send(ping_cmd.pack())
def close(self, args: any = None) -> None:
if self.udp_socket is not None:
self.udp_socket.close()
def send(self, data: bytearray):
if self.udp_socket is None:
return
bytes_sent = self.udp_socket.sendto(data, self.send_address)
if bytes_sent != len(data):
LOGGER.warning("Not all bytes were sent!")
def data_available(self, timeout: float = 0, parameters: any = 0) -> bool:
if self.udp_socket is None:
return False
ready = select.select([self.udp_socket], [], [], timeout)
if ready[0]:
return True
return False
def receive(self, poll_timeout: float = 0) -> TelemetryListT:
if self.udp_socket is None:
return []
try:
ready = self.data_available(poll_timeout)
if ready:
data, sender_addr = self.udp_socket.recvfrom(self.max_recv_size)
packet_list = [bytearray(data)]
return packet_list
return []
except ConnectionResetError:
LOGGER.warning("Connection reset exception occured!")
return [] | 0.698638 | 0.063395 |
import os
from os.path import join, exists
import numpy as np
from pgl.utils.data.dataloader import Dataloader
from pgl.utils.data.dataset import StreamDataset as PglStreamDataset
from pahelix.utils.data_utils import save_data_list_to_npz, load_npz_to_data_list
__all__ = ['StreamDataset']
class StreamDataset(object):
"""tbd"""
def __init__(self,
data_generator=None,
npz_data_path=None):
super(StreamDataset, self).__init__()
assert (data_generator is None) ^ (npz_data_path is None), \
"Only data_generator or npz_data_path should be set."
self.data_generator = data_generator
self.npz_data_path = npz_data_path
if not npz_data_path is None:
self.data_generator = self._load_npz_data(npz_data_path)
def _load_npz_data(self, data_path):
files = [file for file in os.listdir(data_path) if file.endswith('.npz')]
for file in files:
data_list = load_npz_to_data_list(join(data_path, file))
for data in data_list:
yield data
def _save_npz_data(self, data_list, data_path, max_num_per_file=10000):
if not exists(data_path):
os.makedirs(data_path)
sub_data_list = []
count = 0
for data in self.data_generator:
sub_data_list.append(data)
if len(sub_data_list) == 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
sub_data_list = []
count += 1
if len(sub_data_list) > 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
def save_data(self, data_path):
"""tbd"""
self._save_npz_data(self.data_generator, data_path)
def iter_batch(self, batch_size, num_workers=4, shuffle_size=1000, collate_fn=None):
"""tbd"""
class _TempDataset(PglStreamDataset):
def __init__(self, data_generator):
self.data_generator = data_generator
def __iter__(self):
for data in self.data_generator:
yield data
return Dataloader(_TempDataset(self.data_generator),
batch_size=batch_size,
num_workers=num_workers,
stream_shuffle_size=shuffle_size,
collate_fn=collate_fn) | pahelix/datasets/stream_dataset.py | import os
from os.path import join, exists
import numpy as np
from pgl.utils.data.dataloader import Dataloader
from pgl.utils.data.dataset import StreamDataset as PglStreamDataset
from pahelix.utils.data_utils import save_data_list_to_npz, load_npz_to_data_list
__all__ = ['StreamDataset']
class StreamDataset(object):
"""tbd"""
def __init__(self,
data_generator=None,
npz_data_path=None):
super(StreamDataset, self).__init__()
assert (data_generator is None) ^ (npz_data_path is None), \
"Only data_generator or npz_data_path should be set."
self.data_generator = data_generator
self.npz_data_path = npz_data_path
if not npz_data_path is None:
self.data_generator = self._load_npz_data(npz_data_path)
def _load_npz_data(self, data_path):
files = [file for file in os.listdir(data_path) if file.endswith('.npz')]
for file in files:
data_list = load_npz_to_data_list(join(data_path, file))
for data in data_list:
yield data
def _save_npz_data(self, data_list, data_path, max_num_per_file=10000):
if not exists(data_path):
os.makedirs(data_path)
sub_data_list = []
count = 0
for data in self.data_generator:
sub_data_list.append(data)
if len(sub_data_list) == 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
sub_data_list = []
count += 1
if len(sub_data_list) > 0:
file = 'part-%05d.npz' % count
save_data_list_to_npz(join(data_path, file), sub_data_list)
def save_data(self, data_path):
"""tbd"""
self._save_npz_data(self.data_generator, data_path)
def iter_batch(self, batch_size, num_workers=4, shuffle_size=1000, collate_fn=None):
"""tbd"""
class _TempDataset(PglStreamDataset):
def __init__(self, data_generator):
self.data_generator = data_generator
def __iter__(self):
for data in self.data_generator:
yield data
return Dataloader(_TempDataset(self.data_generator),
batch_size=batch_size,
num_workers=num_workers,
stream_shuffle_size=shuffle_size,
collate_fn=collate_fn) | 0.396535 | 0.259896 |
import json
import re
from tornado import httpclient
from tornado.gen import coroutine
from torip import utilities
from torip.exceptions import ToripException
__author__ = 'mendrugory'
IPV4_REGEX_PATTERN = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'
def api_factory(api_name, **config):
"""
Factory function which will return a IpLocateApi class. The default is AbstractApi()
:param api_name:
:return:
"""
if api_name == 'ip-api':
return IpApi(**config)
else:
return AbstractApi(**config)
def is_ipv4(ip):
return re.match(IPV4_REGEX_PATTERN, ip) is not None
class LocateApi:
def __init__(self, api_token=None, ioloop=None):
self.url = None
self.original_url = None
self.ioloop = ioloop
self.api_token=api_token
@coroutine
def locate(self, ip):
"""
Main function of the class whose output will be a dictionary with the information
provided by the selected API.
:param ip: IP or server name (String)
:return: dict()
"""
err = self.check_address(ip)
if err is not None:
raise ToripException(f"{err}")
url = self.build_url(ip)
data = yield self.fetch_data(url)
result = None
if data:
result = LocateApi.enrich(self.adapt(data))
return result
@coroutine
def fetch_data(self, url):
"""
It fetches the data from the self.url
:return: dict()
"""
http_client = self.get_http_client()
try:
response = yield http_client.fetch(url)
data = json.loads(response.body.decode('utf-8'))
finally:
http_client.close()
return data
def get_http_client(self):
"""
It creates an instance of AsyncHTTPClient. You can pass the ioloop (prepared for testing).
:return:
"""
return httpclient.AsyncHTTPClient(self.ioloop) if self.ioloop else httpclient.AsyncHTTPClient()
def check_address(self, ip):
return None
def build_url(self, ip):
"""
It fetches the data from the self.url
:return: dict()
"""
pass
def adapt(self, data):
"""
It adapt the output of the data
:param data: dictionary with the data from the API
:return: dict()
"""
return data
@staticmethod
def enrich(data):
"""
It enrich the received data with the utilities functions
:param data: dict()
:return: dict()
"""
data['google_maps'] = utilities.get_google_maps_url(data)
return data
class IpApi(LocateApi):
"""
IpLocateApi for the api of ip-api.com
"""
def __init__(self, api_token=None, ioloop=None):
super().__init__(api_token=api_token, ioloop=ioloop)
self.original_url = 'http://ip-api.com/json/{}'
def build_url(self, ip):
return self.original_url.format(ip)
def adapt(self, data):
if data.get('status') == 'fail':
raise ToripException('Error: {}'.format(data['message']))
return {
'region_name': data['regionName'],
'region_code': data['region'],
'isp': data['isp'],
'country_name': data['country'],
'country_code': data['countryCode'],
'city': data['city'],
'lat': data['lat'],
'lon': data['lon'],
'address': data['query'],
'time_zone': data['timezone'],
'zip_code': data['zip']
}
class AbstractApi(LocateApi):
"""
IpLocateApi for the api of abstractapi.com
"""
def __init__(self, api_token=None, ioloop=None):
super().__init__(api_token=api_token, ioloop=ioloop)
self.original_url = 'https://ipgeolocation.abstractapi.com/v1/?ip_address={}&api_key={}'
def check_address(self, ip):
if not is_ipv4(ip):
return "Error: Locator AbstractApi only accepts IPs"
def build_url(self, ip):
return self.original_url.format(ip, self.api_token)
def adapt(self, data):
if data and 'connection' not in data:
raise ToripException(f"Error locating address {data['ip_address']}")
return {
'region_name': data['region'],
'region_code': data['region_iso_code'],
'isp': data['connection']['isp_name'],
'country_name': data['country'],
'country_code': data['country_code'],
'city': data['city'],
'lat': data['latitude'],
'lon': data['longitude'],
'address': data['ip_address'],
'time_zone': data['timezone']['name'],
'zip_code': data['postal_code']
} | torip/ipapis.py | import json
import re
from tornado import httpclient
from tornado.gen import coroutine
from torip import utilities
from torip.exceptions import ToripException
__author__ = 'mendrugory'
IPV4_REGEX_PATTERN = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'
def api_factory(api_name, **config):
"""
Factory function which will return a IpLocateApi class. The default is AbstractApi()
:param api_name:
:return:
"""
if api_name == 'ip-api':
return IpApi(**config)
else:
return AbstractApi(**config)
def is_ipv4(ip):
return re.match(IPV4_REGEX_PATTERN, ip) is not None
class LocateApi:
def __init__(self, api_token=None, ioloop=None):
self.url = None
self.original_url = None
self.ioloop = ioloop
self.api_token=api_token
@coroutine
def locate(self, ip):
"""
Main function of the class whose output will be a dictionary with the information
provided by the selected API.
:param ip: IP or server name (String)
:return: dict()
"""
err = self.check_address(ip)
if err is not None:
raise ToripException(f"{err}")
url = self.build_url(ip)
data = yield self.fetch_data(url)
result = None
if data:
result = LocateApi.enrich(self.adapt(data))
return result
@coroutine
def fetch_data(self, url):
"""
It fetches the data from the self.url
:return: dict()
"""
http_client = self.get_http_client()
try:
response = yield http_client.fetch(url)
data = json.loads(response.body.decode('utf-8'))
finally:
http_client.close()
return data
def get_http_client(self):
"""
It creates an instance of AsyncHTTPClient. You can pass the ioloop (prepared for testing).
:return:
"""
return httpclient.AsyncHTTPClient(self.ioloop) if self.ioloop else httpclient.AsyncHTTPClient()
def check_address(self, ip):
return None
def build_url(self, ip):
"""
It fetches the data from the self.url
:return: dict()
"""
pass
def adapt(self, data):
"""
It adapt the output of the data
:param data: dictionary with the data from the API
:return: dict()
"""
return data
@staticmethod
def enrich(data):
"""
It enrich the received data with the utilities functions
:param data: dict()
:return: dict()
"""
data['google_maps'] = utilities.get_google_maps_url(data)
return data
class IpApi(LocateApi):
"""
IpLocateApi for the api of ip-api.com
"""
def __init__(self, api_token=None, ioloop=None):
super().__init__(api_token=api_token, ioloop=ioloop)
self.original_url = 'http://ip-api.com/json/{}'
def build_url(self, ip):
return self.original_url.format(ip)
def adapt(self, data):
if data.get('status') == 'fail':
raise ToripException('Error: {}'.format(data['message']))
return {
'region_name': data['regionName'],
'region_code': data['region'],
'isp': data['isp'],
'country_name': data['country'],
'country_code': data['countryCode'],
'city': data['city'],
'lat': data['lat'],
'lon': data['lon'],
'address': data['query'],
'time_zone': data['timezone'],
'zip_code': data['zip']
}
class AbstractApi(LocateApi):
"""
IpLocateApi for the api of abstractapi.com
"""
def __init__(self, api_token=None, ioloop=None):
super().__init__(api_token=api_token, ioloop=ioloop)
self.original_url = 'https://ipgeolocation.abstractapi.com/v1/?ip_address={}&api_key={}'
def check_address(self, ip):
if not is_ipv4(ip):
return "Error: Locator AbstractApi only accepts IPs"
def build_url(self, ip):
return self.original_url.format(ip, self.api_token)
def adapt(self, data):
if data and 'connection' not in data:
raise ToripException(f"Error locating address {data['ip_address']}")
return {
'region_name': data['region'],
'region_code': data['region_iso_code'],
'isp': data['connection']['isp_name'],
'country_name': data['country'],
'country_code': data['country_code'],
'city': data['city'],
'lat': data['latitude'],
'lon': data['longitude'],
'address': data['ip_address'],
'time_zone': data['timezone']['name'],
'zip_code': data['postal_code']
} | 0.466846 | 0.208562 |
__author__ = '<NAME>'
import time
import re
import threading
import docker
import tweepy
from config import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET
CLIENT = docker.from_env()
AUTH = tweepy.OAuthHandler(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET)
AUTH.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
API = tweepy.API(auth_handler=AUTH, retry_count=3)
def fav_tweet(api, reply):
"""Attempt to fav a tweet and return True if successful"""
# sometimes this raises TweepError even if reply.favorited
# was False
try:
api.create_favorite(id=reply.id)
except tweepy.TweepError:
return False
return True
def remove_usernames(tweet):
"""Removes user mentions from tweet"""
tweet_text = tweet.text
return re.sub(r'@\S*', '', tweet_text).strip()
def start_container(name):
"""Starts container with a given container name"""
container = CLIENT.containers.run('alpine', ['tail', '-f', '/dev/null'],
name=str(name), detach=True)
return container
def run_command(container, cmd):
"""Runs command given container obj and cmd string"""
cmd = 'sh -c "{}"'.format(str(cmd))
try:
res = container.exec_run(cmd)
print('exit_code: {}, output: {}'.format(res.exit_code, res.output.decode('utf-8')))
return res.output.decode('utf-8'), res.exit_code
except Exception as exc:
return str(exc), 1
def response_formatter(text, username, max_length=140):
"""
Formats response to be below ``max_length`` characters long.
Args:
text (str): text to return
username (str): username to @. @<username> is tacked on to end of tweet
max_lenght (int): max length of tweet. Default: ``140``
Returns:
(str): the tweet text
"""
while len('{} @{}'.format(text, username)) > max_length:
text = text[:-1]
return '{} @{}'.format(text, username)
class WorkerThread (threading.Thread):
def __init__(self, tweet):
threading.Thread.__init__(self)
self.tweet = tweet
def run(self):
if self.tweet.favorited:
return
print('tweet not fav, continue')
fav_tweet(API, self.tweet)
user_id = str(self.tweet.user.id)
username = str(self.tweet.user.screen_name)
tweet_text = remove_usernames(self.tweet)
print('text: {}'.format(tweet_text))
print('container name: {}'.format(user_id))
try:
container = CLIENT.containers.get(user_id)
except docker.errors.NotFound:
print('Container not found for {}, starting one...'.format(username))
container = start_container(username)
print('Started container for {} as {}'.format(username, user_id))
res_text, res_code = run_command(container, tweet_text)
res_text = response_formatter(res_text, username, max_length=140)
print(res_text, res_code)
API.update_status(res_text, in_reply_to_status_id=self.tweet.id)
class StreamHelper (tweepy.streaming.StreamListener):
def on_status(self, status):
wt = WorkerThread(status)
wt.start()
def on_error(self, status_code):
if status_code == 420:
print('Hit rate limit :(')
#returning False in on_data disconnects the stream
return False
def main():
"""Main function"""
sh = StreamHelper()
stream = tweepy.Stream(AUTH, sh, timeout=None)
screen_name = API.me().screen_name
stream.filter(track=[screen_name])
if __name__ == "__main__":
main() | main.py | __author__ = '<NAME>'
import time
import re
import threading
import docker
import tweepy
from config import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET
CLIENT = docker.from_env()
AUTH = tweepy.OAuthHandler(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET)
AUTH.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
API = tweepy.API(auth_handler=AUTH, retry_count=3)
def fav_tweet(api, reply):
"""Attempt to fav a tweet and return True if successful"""
# sometimes this raises TweepError even if reply.favorited
# was False
try:
api.create_favorite(id=reply.id)
except tweepy.TweepError:
return False
return True
def remove_usernames(tweet):
"""Removes user mentions from tweet"""
tweet_text = tweet.text
return re.sub(r'@\S*', '', tweet_text).strip()
def start_container(name):
"""Starts container with a given container name"""
container = CLIENT.containers.run('alpine', ['tail', '-f', '/dev/null'],
name=str(name), detach=True)
return container
def run_command(container, cmd):
"""Runs command given container obj and cmd string"""
cmd = 'sh -c "{}"'.format(str(cmd))
try:
res = container.exec_run(cmd)
print('exit_code: {}, output: {}'.format(res.exit_code, res.output.decode('utf-8')))
return res.output.decode('utf-8'), res.exit_code
except Exception as exc:
return str(exc), 1
def response_formatter(text, username, max_length=140):
"""
Formats response to be below ``max_length`` characters long.
Args:
text (str): text to return
username (str): username to @. @<username> is tacked on to end of tweet
max_lenght (int): max length of tweet. Default: ``140``
Returns:
(str): the tweet text
"""
while len('{} @{}'.format(text, username)) > max_length:
text = text[:-1]
return '{} @{}'.format(text, username)
class WorkerThread (threading.Thread):
def __init__(self, tweet):
threading.Thread.__init__(self)
self.tweet = tweet
def run(self):
if self.tweet.favorited:
return
print('tweet not fav, continue')
fav_tweet(API, self.tweet)
user_id = str(self.tweet.user.id)
username = str(self.tweet.user.screen_name)
tweet_text = remove_usernames(self.tweet)
print('text: {}'.format(tweet_text))
print('container name: {}'.format(user_id))
try:
container = CLIENT.containers.get(user_id)
except docker.errors.NotFound:
print('Container not found for {}, starting one...'.format(username))
container = start_container(username)
print('Started container for {} as {}'.format(username, user_id))
res_text, res_code = run_command(container, tweet_text)
res_text = response_formatter(res_text, username, max_length=140)
print(res_text, res_code)
API.update_status(res_text, in_reply_to_status_id=self.tweet.id)
class StreamHelper (tweepy.streaming.StreamListener):
def on_status(self, status):
wt = WorkerThread(status)
wt.start()
def on_error(self, status_code):
if status_code == 420:
print('Hit rate limit :(')
#returning False in on_data disconnects the stream
return False
def main():
"""Main function"""
sh = StreamHelper()
stream = tweepy.Stream(AUTH, sh, timeout=None)
screen_name = API.me().screen_name
stream.filter(track=[screen_name])
if __name__ == "__main__":
main() | 0.368178 | 0.066965 |
import signal
import subprocess
from time import sleep
from os.path import join
from dateutil.parser import parse
from datetime import datetime
from autonmap.client import client_server
from autonmap.client import config_manager
from autonmap.client import process_manager
from autonmap.client import report_client
"""
This module controls all client processes of the autonmap program
"""
__LOCKFILE_NAME__ = "processmanager.lock"
class Lock:
def __init__(self):
self.lock = False
self.locate_lock()
def locate_lock(self):
from os import walk
for root, dirs, files in walk(config_manager.get_base()):
if __LOCKFILE_NAME__ in files:
self.lock = True
def release_lock(self):
from os import remove
try:
remove(join(config_manager.get_base(), __LOCKFILE_NAME__))
self.lock = False
except OSError:
return False
def get_lock(self):
with open(join(config_manager.get_base(), __LOCKFILE_NAME__), "w") as file:
pass
self.lock = True
class Timeout:
class Timeout(Exception):
pass
def __init__(self, sec=0):
self.sec = sec
def raise_timeout(self, *args):
raise self.Timeout()
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0)
class Client:
"""
Module that controls several subprocess that work together to
complete work handed out.
"""
def __init__(self):
self.conn = client_server.ClientServer()
self.process = process_manager.ProcessManager()
self.server_ip = config_manager.get_global("server_ip")
self.server_port = int(config_manager.get_global("server_port"))
self.report_manager = report_client.ReportManger()
self.lock = Lock()
def do_work(self):
"""
This function asks for work from the server and after it receives the work to
be completed it will then initiate the process required to begin the work.
:return: None
"""
sleeps = 0
while self.lock.lock:
print("Process Locked cannot gather work. Sleeping for 1 min.")
sleep(60)
processes = subprocess.Popen(["pgrep", 'autonmap'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processes = processes.communicate()
processes = str(processes[0], 'ascii').split("\n")
print(processes)
if not processes or len(processes) == 2:
print("Nothing blocking process, releasing lock")
self.lock.release_lock()
break
while processes:
proc = processes.pop()
print("Found Process {}".format(proc))
print("Process ID that is halting the program: ".format(proc))
print("Checking if process has run for too long and is stuck.")
p = subprocess.Popen(['ps', '-o', 'stime,time', proc], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
p = parse(str(p.communicate()[0], 'ascii'))
except ValueError:
continue
if (datetime.now() - p).seconds > 7200:
print("Killing process: {}".format(proc))
com = subprocess.Popen(['kill', '-9', proc], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
com = com.communicate()
print("Process Kill Attempt OUT: {}\tERROR: {}".format(str(com[0], 'ascii'),
str(com[1], 'ascii')))
if str(com[1], 'ascii'):
print("Found error {}, breaking from process search".format(com[1]))
break
else:
break
sleeps += 1
if sleeps > 10:
print("Failed to find get work!!!")
return False
else:
print("No process blocking, releasing lock.")
self.lock.release_lock()
self.lock.get_lock()
print("Acquired Lock.")
self.conn.connect(self.server_ip, self.server_port)
self.conn.send(config_manager.get_client_commands("request_job"))
self.conn.receive()
jobs = self.conn.do_job()
self.conn.close()
print("Adding work to the process manager")
for host in jobs:
print(host)
self.process.add_host(host)
print("Hosts added to process manager, starting work.")
print("----------------------------------------------")
self.process.run()
self.lock.release_lock()
print("Work Completed, Running the report manager.")
print("----------------------------------------------")
self.report_manager.generate_database_report()
print("Report filed, releasing lock.")
self.lock.release_lock()
def stop(self):
"""
This function sends the kill signal to the worker process and tries to shut them down.
:return: None
"""
print("Starting shutdown")
self.process.exit.kill = True
self.conn.close()
self.lock.release_lock()
print("Finished shutdown")
def main():
"""
Function that initiates the sub-processes that find and complete the work.
:return: None
"""
client = Client()
client.do_work()
if __name__ == "__main__":
main() | autonmap/client/client.py | import signal
import subprocess
from time import sleep
from os.path import join
from dateutil.parser import parse
from datetime import datetime
from autonmap.client import client_server
from autonmap.client import config_manager
from autonmap.client import process_manager
from autonmap.client import report_client
"""
This module controls all client processes of the autonmap program
"""
__LOCKFILE_NAME__ = "processmanager.lock"
class Lock:
def __init__(self):
self.lock = False
self.locate_lock()
def locate_lock(self):
from os import walk
for root, dirs, files in walk(config_manager.get_base()):
if __LOCKFILE_NAME__ in files:
self.lock = True
def release_lock(self):
from os import remove
try:
remove(join(config_manager.get_base(), __LOCKFILE_NAME__))
self.lock = False
except OSError:
return False
def get_lock(self):
with open(join(config_manager.get_base(), __LOCKFILE_NAME__), "w") as file:
pass
self.lock = True
class Timeout:
class Timeout(Exception):
pass
def __init__(self, sec=0):
self.sec = sec
def raise_timeout(self, *args):
raise self.Timeout()
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0)
class Client:
"""
Module that controls several subprocess that work together to
complete work handed out.
"""
def __init__(self):
self.conn = client_server.ClientServer()
self.process = process_manager.ProcessManager()
self.server_ip = config_manager.get_global("server_ip")
self.server_port = int(config_manager.get_global("server_port"))
self.report_manager = report_client.ReportManger()
self.lock = Lock()
def do_work(self):
"""
This function asks for work from the server and after it receives the work to
be completed it will then initiate the process required to begin the work.
:return: None
"""
sleeps = 0
while self.lock.lock:
print("Process Locked cannot gather work. Sleeping for 1 min.")
sleep(60)
processes = subprocess.Popen(["pgrep", 'autonmap'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processes = processes.communicate()
processes = str(processes[0], 'ascii').split("\n")
print(processes)
if not processes or len(processes) == 2:
print("Nothing blocking process, releasing lock")
self.lock.release_lock()
break
while processes:
proc = processes.pop()
print("Found Process {}".format(proc))
print("Process ID that is halting the program: ".format(proc))
print("Checking if process has run for too long and is stuck.")
p = subprocess.Popen(['ps', '-o', 'stime,time', proc], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
p = parse(str(p.communicate()[0], 'ascii'))
except ValueError:
continue
if (datetime.now() - p).seconds > 7200:
print("Killing process: {}".format(proc))
com = subprocess.Popen(['kill', '-9', proc], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
com = com.communicate()
print("Process Kill Attempt OUT: {}\tERROR: {}".format(str(com[0], 'ascii'),
str(com[1], 'ascii')))
if str(com[1], 'ascii'):
print("Found error {}, breaking from process search".format(com[1]))
break
else:
break
sleeps += 1
if sleeps > 10:
print("Failed to find get work!!!")
return False
else:
print("No process blocking, releasing lock.")
self.lock.release_lock()
self.lock.get_lock()
print("Acquired Lock.")
self.conn.connect(self.server_ip, self.server_port)
self.conn.send(config_manager.get_client_commands("request_job"))
self.conn.receive()
jobs = self.conn.do_job()
self.conn.close()
print("Adding work to the process manager")
for host in jobs:
print(host)
self.process.add_host(host)
print("Hosts added to process manager, starting work.")
print("----------------------------------------------")
self.process.run()
self.lock.release_lock()
print("Work Completed, Running the report manager.")
print("----------------------------------------------")
self.report_manager.generate_database_report()
print("Report filed, releasing lock.")
self.lock.release_lock()
def stop(self):
"""
This function sends the kill signal to the worker process and tries to shut them down.
:return: None
"""
print("Starting shutdown")
self.process.exit.kill = True
self.conn.close()
self.lock.release_lock()
print("Finished shutdown")
def main():
"""
Function that initiates the sub-processes that find and complete the work.
:return: None
"""
client = Client()
client.do_work()
if __name__ == "__main__":
main() | 0.435421 | 0.059976 |
import os
import unittest
import random
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import load_wine, load_iris
from stree.Splitter import Splitter
from .utils import load_dataset, load_disc_dataset
class Splitter_test(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._random_state = 1
super().__init__(*args, **kwargs)
@staticmethod
def build(
clf=SVC,
min_samples_split=0,
feature_select="random",
criterion="gini",
criteria="max_samples",
random_state=None,
):
return Splitter(
clf=clf(random_state=random_state, kernel="rbf"),
min_samples_split=min_samples_split,
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
random_state=random_state,
)
@classmethod
def setUp(cls):
os.environ["TESTING"] = "1"
def test_init(self):
with self.assertRaises(ValueError):
self.build(criterion="duck")
with self.assertRaises(ValueError):
self.build(feature_select="duck")
with self.assertRaises(ValueError):
self.build(criteria="duck")
with self.assertRaises(ValueError):
_ = Splitter(clf=None)
for feature_select in ["best", "random"]:
for criterion in ["gini", "entropy"]:
for criteria in ["max_samples", "impurity"]:
tcl = self.build(
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
)
self.assertEqual(feature_select, tcl._feature_select)
self.assertEqual(criterion, tcl._criterion)
self.assertEqual(criteria, tcl._criteria)
def test_gini(self):
expected_values = [
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.48),
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.7777777777777778),
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.520408163265306),
([0, 0, 1, 1, 1, 1, 0, 0], 0.5),
([0, 0, 1, 1, 2, 2, 3, 3], 0.75),
([0, 0, 1, 1, 1, 1, 1, 1], 0.375),
([0], 0),
([1, 1, 1, 1], 0),
]
for labels, expected in expected_values:
self.assertAlmostEqual(expected, Splitter._gini(labels))
tcl = self.build(criterion="gini")
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
def test_entropy(self):
expected_values = [
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.9709505944546686),
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.9111886696810589),
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.8120406807940999),
([0, 0, 1, 1, 1, 1, 0, 0], 1),
([0, 0, 1, 1, 2, 2, 3, 3], 1),
([0, 0, 1, 1, 1, 1, 1, 1], 0.8112781244591328),
([1], 0),
([0, 0, 0, 0], 0),
]
for labels, expected in expected_values:
self.assertAlmostEqual(expected, Splitter._entropy(labels))
tcl = self.build(criterion="entropy")
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
def test_information_gain(self):
expected_values = [
(
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 1],
0.16333333333333333,
0.25642589168200297,
),
(
[0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1],
[5, 3, 2, 1, 1],
0.007381776239907684,
-0.03328610916207225,
),
([], [], 0.0, 0.0),
([1], [], 0.0, 0.0),
([], [1], 0.0, 0.0),
([0, 0, 0, 0], [0, 0], 0.0, 0.0),
([], [1, 1, 1, 2], 0.0, 0.0),
(None, [1, 2, 3], 0.0, 0.0),
([1, 2, 3], None, 0.0, 0.0),
]
for yu, yd, expected_gini, expected_entropy in expected_values:
yu = np.array(yu, dtype=np.int32) if yu is not None else None
yd = np.array(yd, dtype=np.int32) if yd is not None else None
if yu is not None and yd is not None:
complete = np.append(yu, yd)
elif yd is not None:
complete = yd
else:
complete = yu
tcl = self.build(criterion="gini")
computed = tcl.information_gain(complete, yu, yd)
self.assertAlmostEqual(expected_gini, computed)
tcl = self.build(criterion="entropy")
computed = tcl.information_gain(complete, yu, yd)
self.assertAlmostEqual(expected_entropy, computed)
def test_max_samples(self):
tcl = self.build(criteria="max_samples")
data = np.array(
[
[-0.1, 0.2, -0.3],
[0.7, 0.01, -0.1],
[0.7, -0.9, 0.5],
[0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
]
)
expected = data[:, 0]
y = [1, 2, 1, 0, 0, 0]
computed = tcl._max_samples(data, y)
self.assertEqual(0, computed)
computed_data = data[:, computed]
self.assertEqual((6,), computed_data.shape)
self.assertListEqual(expected.tolist(), computed_data.tolist())
def test_impurity(self):
tcl = self.build(criteria="impurity")
data = np.array(
[
[-0.1, 0.2, -0.3],
[0.7, 0.01, -0.1],
[0.7, -0.9, 0.5],
[0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
]
)
expected = data[:, 2]
y = np.array([1, 2, 1, 0, 0, 0])
computed = tcl._impurity(data, y)
self.assertEqual(2, computed)
computed_data = data[:, computed]
self.assertEqual((6,), computed_data.shape)
self.assertListEqual(expected.tolist(), computed_data.tolist())
def test_generate_subspaces(self):
features = 250
for max_features in range(2, features):
num = len(Splitter._generate_spaces(features, max_features))
self.assertEqual(5, num)
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
def test_best_splitter_few_sets(self):
X, y = load_iris(return_X_y=True)
X = np.delete(X, 3, 1)
tcl = self.build(
feature_select="best", random_state=self._random_state
)
dataset, computed = tcl.get_subspace(X, y, max_features=2)
self.assertListEqual([0, 2], list(computed))
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
def test_splitter_parameter(self):
expected_values = [
[0, 6, 11, 12], # best entropy max_samples
[0, 6, 11, 12], # best entropy impurity
[0, 6, 11, 12], # best gini max_samples
[0, 6, 11, 12], # best gini impurity
[0, 3, 8, 12], # random entropy max_samples
[0, 3, 7, 12], # random entropy impurity
[1, 7, 9, 12], # random gini max_samples
[1, 5, 8, 12], # random gini impurity
[6, 9, 11, 12], # mutual entropy max_samples
[6, 9, 11, 12], # mutual entropy impurity
[6, 9, 11, 12], # mutual gini max_samples
[6, 9, 11, 12], # mutual gini impurity
]
X, y = load_wine(return_X_y=True)
rn = 0
for feature_select in ["best", "random", "mutual"]:
for criterion in ["entropy", "gini"]:
for criteria in [
"max_samples",
"impurity",
]:
tcl = self.build(
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
)
expected = expected_values.pop(0)
random.seed(rn)
rn += 1
dataset, computed = tcl.get_subspace(X, y, max_features=4)
# print(
# "{}, # {:7s}{:8s}{:15s}".format(
# list(computed),
# feature_select,
# criterion,
# criteria,
# )
# )
self.assertListEqual(expected, sorted(list(computed)))
self.assertListEqual(
X[:, computed].tolist(), dataset.tolist()
)
def test_get_best_subspaces(self):
results = [
(4, [3, 4, 11, 13]),
(7, [1, 3, 4, 5, 11, 13, 16]),
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
]
X, y = load_dataset(n_features=20)
for k, expected in results:
tcl = self.build(
feature_select="best",
)
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_best_subspaces_discrete(self):
results = [
(4, [0, 3, 16, 18]),
(7, [0, 3, 13, 14, 16, 18, 19]),
(9, [0, 3, 7, 13, 14, 15, 16, 18, 19]),
]
X, y = load_disc_dataset(n_features=20)
for k, expected in results:
tcl = self.build(
feature_select="best",
)
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_cfs_subspaces(self):
results = [
(4, [1, 5, 9, 12]),
(6, [1, 5, 9, 12, 4, 2]),
(7, [1, 5, 9, 12, 4, 2, 3]),
]
X, y = load_dataset(n_features=20, n_informative=7)
for k, expected in results:
tcl = self.build(feature_select="cfs")
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_fcbf_subspaces(self):
results = [
(4, [1, 5, 9, 12]),
(6, [1, 5, 9, 12, 4, 2]),
(7, [1, 5, 9, 12, 4, 2, 16]),
]
for rs, expected in results:
X, y = load_dataset(n_features=20, n_informative=7)
tcl = self.build(feature_select="fcbf", random_state=rs)
Xs, computed = tcl.get_subspace(X, y, rs)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist()) | stree/tests/Splitter_test.py | import os
import unittest
import random
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import load_wine, load_iris
from stree.Splitter import Splitter
from .utils import load_dataset, load_disc_dataset
class Splitter_test(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._random_state = 1
super().__init__(*args, **kwargs)
@staticmethod
def build(
clf=SVC,
min_samples_split=0,
feature_select="random",
criterion="gini",
criteria="max_samples",
random_state=None,
):
return Splitter(
clf=clf(random_state=random_state, kernel="rbf"),
min_samples_split=min_samples_split,
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
random_state=random_state,
)
@classmethod
def setUp(cls):
os.environ["TESTING"] = "1"
def test_init(self):
with self.assertRaises(ValueError):
self.build(criterion="duck")
with self.assertRaises(ValueError):
self.build(feature_select="duck")
with self.assertRaises(ValueError):
self.build(criteria="duck")
with self.assertRaises(ValueError):
_ = Splitter(clf=None)
for feature_select in ["best", "random"]:
for criterion in ["gini", "entropy"]:
for criteria in ["max_samples", "impurity"]:
tcl = self.build(
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
)
self.assertEqual(feature_select, tcl._feature_select)
self.assertEqual(criterion, tcl._criterion)
self.assertEqual(criteria, tcl._criteria)
def test_gini(self):
expected_values = [
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.48),
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.7777777777777778),
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.520408163265306),
([0, 0, 1, 1, 1, 1, 0, 0], 0.5),
([0, 0, 1, 1, 2, 2, 3, 3], 0.75),
([0, 0, 1, 1, 1, 1, 1, 1], 0.375),
([0], 0),
([1, 1, 1, 1], 0),
]
for labels, expected in expected_values:
self.assertAlmostEqual(expected, Splitter._gini(labels))
tcl = self.build(criterion="gini")
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
def test_entropy(self):
expected_values = [
([0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 0.9709505944546686),
([0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1], 0.9111886696810589),
([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2], 0.8120406807940999),
([0, 0, 1, 1, 1, 1, 0, 0], 1),
([0, 0, 1, 1, 2, 2, 3, 3], 1),
([0, 0, 1, 1, 1, 1, 1, 1], 0.8112781244591328),
([1], 0),
([0, 0, 0, 0], 0),
]
for labels, expected in expected_values:
self.assertAlmostEqual(expected, Splitter._entropy(labels))
tcl = self.build(criterion="entropy")
self.assertAlmostEqual(expected, tcl.criterion_function(labels))
def test_information_gain(self):
expected_values = [
(
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 1],
0.16333333333333333,
0.25642589168200297,
),
(
[0, 1, 1, 2, 2, 3, 4, 5, 3, 2, 1, 1],
[5, 3, 2, 1, 1],
0.007381776239907684,
-0.03328610916207225,
),
([], [], 0.0, 0.0),
([1], [], 0.0, 0.0),
([], [1], 0.0, 0.0),
([0, 0, 0, 0], [0, 0], 0.0, 0.0),
([], [1, 1, 1, 2], 0.0, 0.0),
(None, [1, 2, 3], 0.0, 0.0),
([1, 2, 3], None, 0.0, 0.0),
]
for yu, yd, expected_gini, expected_entropy in expected_values:
yu = np.array(yu, dtype=np.int32) if yu is not None else None
yd = np.array(yd, dtype=np.int32) if yd is not None else None
if yu is not None and yd is not None:
complete = np.append(yu, yd)
elif yd is not None:
complete = yd
else:
complete = yu
tcl = self.build(criterion="gini")
computed = tcl.information_gain(complete, yu, yd)
self.assertAlmostEqual(expected_gini, computed)
tcl = self.build(criterion="entropy")
computed = tcl.information_gain(complete, yu, yd)
self.assertAlmostEqual(expected_entropy, computed)
def test_max_samples(self):
tcl = self.build(criteria="max_samples")
data = np.array(
[
[-0.1, 0.2, -0.3],
[0.7, 0.01, -0.1],
[0.7, -0.9, 0.5],
[0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
]
)
expected = data[:, 0]
y = [1, 2, 1, 0, 0, 0]
computed = tcl._max_samples(data, y)
self.assertEqual(0, computed)
computed_data = data[:, computed]
self.assertEqual((6,), computed_data.shape)
self.assertListEqual(expected.tolist(), computed_data.tolist())
def test_impurity(self):
tcl = self.build(criteria="impurity")
data = np.array(
[
[-0.1, 0.2, -0.3],
[0.7, 0.01, -0.1],
[0.7, -0.9, 0.5],
[0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
[-0.1, 0.2, 0.3],
]
)
expected = data[:, 2]
y = np.array([1, 2, 1, 0, 0, 0])
computed = tcl._impurity(data, y)
self.assertEqual(2, computed)
computed_data = data[:, computed]
self.assertEqual((6,), computed_data.shape)
self.assertListEqual(expected.tolist(), computed_data.tolist())
def test_generate_subspaces(self):
features = 250
for max_features in range(2, features):
num = len(Splitter._generate_spaces(features, max_features))
self.assertEqual(5, num)
self.assertEqual(3, len(Splitter._generate_spaces(3, 2)))
self.assertEqual(4, len(Splitter._generate_spaces(4, 3)))
def test_best_splitter_few_sets(self):
X, y = load_iris(return_X_y=True)
X = np.delete(X, 3, 1)
tcl = self.build(
feature_select="best", random_state=self._random_state
)
dataset, computed = tcl.get_subspace(X, y, max_features=2)
self.assertListEqual([0, 2], list(computed))
self.assertListEqual(X[:, computed].tolist(), dataset.tolist())
def test_splitter_parameter(self):
expected_values = [
[0, 6, 11, 12], # best entropy max_samples
[0, 6, 11, 12], # best entropy impurity
[0, 6, 11, 12], # best gini max_samples
[0, 6, 11, 12], # best gini impurity
[0, 3, 8, 12], # random entropy max_samples
[0, 3, 7, 12], # random entropy impurity
[1, 7, 9, 12], # random gini max_samples
[1, 5, 8, 12], # random gini impurity
[6, 9, 11, 12], # mutual entropy max_samples
[6, 9, 11, 12], # mutual entropy impurity
[6, 9, 11, 12], # mutual gini max_samples
[6, 9, 11, 12], # mutual gini impurity
]
X, y = load_wine(return_X_y=True)
rn = 0
for feature_select in ["best", "random", "mutual"]:
for criterion in ["entropy", "gini"]:
for criteria in [
"max_samples",
"impurity",
]:
tcl = self.build(
feature_select=feature_select,
criterion=criterion,
criteria=criteria,
)
expected = expected_values.pop(0)
random.seed(rn)
rn += 1
dataset, computed = tcl.get_subspace(X, y, max_features=4)
# print(
# "{}, # {:7s}{:8s}{:15s}".format(
# list(computed),
# feature_select,
# criterion,
# criteria,
# )
# )
self.assertListEqual(expected, sorted(list(computed)))
self.assertListEqual(
X[:, computed].tolist(), dataset.tolist()
)
def test_get_best_subspaces(self):
results = [
(4, [3, 4, 11, 13]),
(7, [1, 3, 4, 5, 11, 13, 16]),
(9, [1, 3, 4, 5, 7, 10, 11, 13, 16]),
]
X, y = load_dataset(n_features=20)
for k, expected in results:
tcl = self.build(
feature_select="best",
)
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_best_subspaces_discrete(self):
results = [
(4, [0, 3, 16, 18]),
(7, [0, 3, 13, 14, 16, 18, 19]),
(9, [0, 3, 7, 13, 14, 15, 16, 18, 19]),
]
X, y = load_disc_dataset(n_features=20)
for k, expected in results:
tcl = self.build(
feature_select="best",
)
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_cfs_subspaces(self):
results = [
(4, [1, 5, 9, 12]),
(6, [1, 5, 9, 12, 4, 2]),
(7, [1, 5, 9, 12, 4, 2, 3]),
]
X, y = load_dataset(n_features=20, n_informative=7)
for k, expected in results:
tcl = self.build(feature_select="cfs")
Xs, computed = tcl.get_subspace(X, y, k)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist())
def test_get_fcbf_subspaces(self):
results = [
(4, [1, 5, 9, 12]),
(6, [1, 5, 9, 12, 4, 2]),
(7, [1, 5, 9, 12, 4, 2, 16]),
]
for rs, expected in results:
X, y = load_dataset(n_features=20, n_informative=7)
tcl = self.build(feature_select="fcbf", random_state=rs)
Xs, computed = tcl.get_subspace(X, y, rs)
self.assertListEqual(expected, list(computed))
self.assertListEqual(X[:, expected].tolist(), Xs.tolist()) | 0.659624 | 0.539347 |
from __future__ import print_function
import XInput
from inputs import get_gamepad
import inputs
import time
import threading
import random
import vgamepad as vg
# Array of the buttons (using 0 for LT and 1 for RT)
button = [None] * 10
button = [vg.XUSB_BUTTON.XUSB_GAMEPAD_A, vg.XUSB_BUTTON.XUSB_GAMEPAD_Y, vg.XUSB_BUTTON.XUSB_GAMEPAD_B,
vg.XUSB_BUTTON.XUSB_GAMEPAD_X, vg.XUSB_BUTTON.XUSB_GAMEPAD_LEFT_SHOULDER,
vg.XUSB_BUTTON.XUSB_GAMEPAD_RIGHT_SHOULDER, 0, 1]
# Array of axis possibilities (I only decided to include normal and
# reversed controls, anything more was too much)
axis = [None] * 2
axis = [0, 1]
# The Vgamepads will always be related by user_index + numControllers
def randomize(controllerNum):
Vgamepad = vg.VX360Gamepad()
global flag
while True:
events = get_gamepad(controllerNum)
for event in events:
if event.code == 'ABS_X' or event.code == 'ABS_Y':
state = XInput.get_state(controllerNum)
thumbValues = XInput.get_thumb_values(state)
# analogX = analogState.Gamepad.sThumbLX
# analogY = analogState.Gamepad.sThumbLY
analogX = thumbValues[0][0]
analogY = thumbValues[0][1]
# There are 2 combinations, (x, y) and (-x, -y)
if axis[0] == 0:
Vgamepad.left_joystick_float(analogX, analogY)
elif axis[0] == 1:
Vgamepad.left_joystick_float(-analogX, -analogY)
Vgamepad.update()
elif event.code == 'BTN_SOUTH' and event.state == 1:
# Checking if the button is LT or RT
if button[0] == 0:
Vgamepad.left_trigger(255)
elif button[0] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[0])
Vgamepad.update()
elif event.code == 'BTN_SOUTH' and event.state == 0:
if button[0] == 0:
Vgamepad.left_trigger(0)
elif button[0] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[0])
Vgamepad.update()
elif event.code == 'BTN_NORTH' and event.state == 1:
if button[1] == 0:
Vgamepad.left_trigger(255)
elif button[1] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[1])
Vgamepad.update()
elif event.code == 'BTN_NORTH' and event.state == 0:
if button[1] == 0:
Vgamepad.left_trigger(0)
elif button[1] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[1])
Vgamepad.update()
elif event.code == 'BTN_EAST' and event.state == 1:
if button[2] == 0:
Vgamepad.left_trigger(255)
elif button[2] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[2])
Vgamepad.update()
elif event.code == 'BTN_EAST' and event.state == 0:
if button[2] == 0:
Vgamepad.left_trigger(0)
elif button[2] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[2])
Vgamepad.update()
elif event.code == 'BTN_WEST' and event.state == 1:
if button[3] == 0:
Vgamepad.left_trigger(255)
elif button[3] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[3])
Vgamepad.update()
elif event.code == 'BTN_WEST' and event.state == 0:
if button[3] == 0:
Vgamepad.left_trigger(0)
elif button[3] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[3])
Vgamepad.update()
elif event.code == 'BTN_TL' and event.state == 1:
if button[4] == 0:
Vgamepad.left_trigger(255)
elif button[4] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[4])
Vgamepad.update()
elif event.code == 'BTN_TL' and event.state == 0:
if button[4] == 0:
Vgamepad.left_trigger(0)
elif button[4] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[4])
Vgamepad.update()
elif event.code == 'BTN_TR' and event.state == 1:
if button[5] == 0:
Vgamepad.left_trigger(255)
elif button[5] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[5])
Vgamepad.update()
elif event.code == 'BTN_TR' and event.state == 0:
if button[5] == 0:
Vgamepad.left_trigger(0)
elif button[5] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[5])
Vgamepad.update()
elif event.code == 'ABS_Z' and event.state != 0:
if button[6] == 0:
Vgamepad.left_trigger(255)
elif button[6] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[6])
Vgamepad.update()
elif event.code == 'ABS_Z' and event.state == 0:
if button[6] == 0:
Vgamepad.left_trigger(0)
elif button[6] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[6])
Vgamepad.update()
elif event.code == 'ABS_RZ' and event.state != 0:
if button[7] == 0:
Vgamepad.left_trigger(255)
elif button[7] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[7])
Vgamepad.update()
elif event.code == 'ABS_RZ' and event.state == 0:
if button[7] == 0:
Vgamepad.left_trigger(0)
elif button[7] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[7])
Vgamepad.update()
# Start isn't randomized cuz that's dumb
elif event.code == 'BTN_START' and event.state == 1:
Vgamepad.press_button(button=vg.XUSB_BUTTON.XUSB_GAMEPAD_START)
Vgamepad.update()
elif event.code == 'BTN_START' and event.state == 0:
Vgamepad.release_button(button=vg.XUSB_BUTTON.XUSB_GAMEPAD_START)
Vgamepad.update()
# Timer thread, randomizes the buttons every __ seconds
def timing(randomization):
# Once this randomization occurs the length of the button inputs becomes incorrect
if randomization == "button":
random.shuffle(button)
elif randomization == "axis":
random.shuffle(axis)
elif randomization == "both":
random.shuffle(button)
random.shuffle(axis)
def main(randomization, seconds):
global devices
devices = inputs.DeviceManager()
global numControllers
numControllers = 0
# Check the number of controllers
for device in devices.gamepads:
devices.gamepads[numControllers] = device
numControllers += 1
print("numControllers: " + str(numControllers))
for i in range(numControllers):
p = threading.Thread(target=randomize, args=[i], daemon=True)
p.start()
# Infinite loop for the timer and randomizer
while True:
timer = threading.Timer(seconds, timing(randomization))
timer.daemon = True
timer.start()
time.sleep(seconds)
if __name__ == "__main__":
# Call main using default values from config file
import json
# Read config settings
with open("configs/config.json") as f:
config = json.load(f)
randomConfig = config["RANDOM"]
# Set config settings
for randomization in randomConfig["randomization"]:
if randomConfig["randomization"][str(randomization)] == str(True):
randomization = randomConfig["randomization"][str(randomization)]
if randomization is False:
print("At least one gamemode must be chosen")
quit()
# Load config settings
main(randomization, float(randomConfig['seconds'])) | scripts/RC.py | from __future__ import print_function
import XInput
from inputs import get_gamepad
import inputs
import time
import threading
import random
import vgamepad as vg
# Array of the buttons (using 0 for LT and 1 for RT)
button = [None] * 10
button = [vg.XUSB_BUTTON.XUSB_GAMEPAD_A, vg.XUSB_BUTTON.XUSB_GAMEPAD_Y, vg.XUSB_BUTTON.XUSB_GAMEPAD_B,
vg.XUSB_BUTTON.XUSB_GAMEPAD_X, vg.XUSB_BUTTON.XUSB_GAMEPAD_LEFT_SHOULDER,
vg.XUSB_BUTTON.XUSB_GAMEPAD_RIGHT_SHOULDER, 0, 1]
# Array of axis possibilities (I only decided to include normal and
# reversed controls, anything more was too much)
axis = [None] * 2
axis = [0, 1]
# The Vgamepads will always be related by user_index + numControllers
def randomize(controllerNum):
Vgamepad = vg.VX360Gamepad()
global flag
while True:
events = get_gamepad(controllerNum)
for event in events:
if event.code == 'ABS_X' or event.code == 'ABS_Y':
state = XInput.get_state(controllerNum)
thumbValues = XInput.get_thumb_values(state)
# analogX = analogState.Gamepad.sThumbLX
# analogY = analogState.Gamepad.sThumbLY
analogX = thumbValues[0][0]
analogY = thumbValues[0][1]
# There are 2 combinations, (x, y) and (-x, -y)
if axis[0] == 0:
Vgamepad.left_joystick_float(analogX, analogY)
elif axis[0] == 1:
Vgamepad.left_joystick_float(-analogX, -analogY)
Vgamepad.update()
elif event.code == 'BTN_SOUTH' and event.state == 1:
# Checking if the button is LT or RT
if button[0] == 0:
Vgamepad.left_trigger(255)
elif button[0] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[0])
Vgamepad.update()
elif event.code == 'BTN_SOUTH' and event.state == 0:
if button[0] == 0:
Vgamepad.left_trigger(0)
elif button[0] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[0])
Vgamepad.update()
elif event.code == 'BTN_NORTH' and event.state == 1:
if button[1] == 0:
Vgamepad.left_trigger(255)
elif button[1] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[1])
Vgamepad.update()
elif event.code == 'BTN_NORTH' and event.state == 0:
if button[1] == 0:
Vgamepad.left_trigger(0)
elif button[1] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[1])
Vgamepad.update()
elif event.code == 'BTN_EAST' and event.state == 1:
if button[2] == 0:
Vgamepad.left_trigger(255)
elif button[2] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[2])
Vgamepad.update()
elif event.code == 'BTN_EAST' and event.state == 0:
if button[2] == 0:
Vgamepad.left_trigger(0)
elif button[2] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[2])
Vgamepad.update()
elif event.code == 'BTN_WEST' and event.state == 1:
if button[3] == 0:
Vgamepad.left_trigger(255)
elif button[3] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[3])
Vgamepad.update()
elif event.code == 'BTN_WEST' and event.state == 0:
if button[3] == 0:
Vgamepad.left_trigger(0)
elif button[3] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[3])
Vgamepad.update()
elif event.code == 'BTN_TL' and event.state == 1:
if button[4] == 0:
Vgamepad.left_trigger(255)
elif button[4] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[4])
Vgamepad.update()
elif event.code == 'BTN_TL' and event.state == 0:
if button[4] == 0:
Vgamepad.left_trigger(0)
elif button[4] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[4])
Vgamepad.update()
elif event.code == 'BTN_TR' and event.state == 1:
if button[5] == 0:
Vgamepad.left_trigger(255)
elif button[5] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[5])
Vgamepad.update()
elif event.code == 'BTN_TR' and event.state == 0:
if button[5] == 0:
Vgamepad.left_trigger(0)
elif button[5] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[5])
Vgamepad.update()
elif event.code == 'ABS_Z' and event.state != 0:
if button[6] == 0:
Vgamepad.left_trigger(255)
elif button[6] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[6])
Vgamepad.update()
elif event.code == 'ABS_Z' and event.state == 0:
if button[6] == 0:
Vgamepad.left_trigger(0)
elif button[6] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[6])
Vgamepad.update()
elif event.code == 'ABS_RZ' and event.state != 0:
if button[7] == 0:
Vgamepad.left_trigger(255)
elif button[7] == 1:
Vgamepad.right_trigger(255)
else:
Vgamepad.press_button(button=button[7])
Vgamepad.update()
elif event.code == 'ABS_RZ' and event.state == 0:
if button[7] == 0:
Vgamepad.left_trigger(0)
elif button[7] == 1:
Vgamepad.right_trigger(0)
else:
Vgamepad.release_button(button=button[7])
Vgamepad.update()
# Start isn't randomized cuz that's dumb
elif event.code == 'BTN_START' and event.state == 1:
Vgamepad.press_button(button=vg.XUSB_BUTTON.XUSB_GAMEPAD_START)
Vgamepad.update()
elif event.code == 'BTN_START' and event.state == 0:
Vgamepad.release_button(button=vg.XUSB_BUTTON.XUSB_GAMEPAD_START)
Vgamepad.update()
# Timer thread, randomizes the buttons every __ seconds
def timing(randomization):
# Once this randomization occurs the length of the button inputs becomes incorrect
if randomization == "button":
random.shuffle(button)
elif randomization == "axis":
random.shuffle(axis)
elif randomization == "both":
random.shuffle(button)
random.shuffle(axis)
def main(randomization, seconds):
global devices
devices = inputs.DeviceManager()
global numControllers
numControllers = 0
# Check the number of controllers
for device in devices.gamepads:
devices.gamepads[numControllers] = device
numControllers += 1
print("numControllers: " + str(numControllers))
for i in range(numControllers):
p = threading.Thread(target=randomize, args=[i], daemon=True)
p.start()
# Infinite loop for the timer and randomizer
while True:
timer = threading.Timer(seconds, timing(randomization))
timer.daemon = True
timer.start()
time.sleep(seconds)
if __name__ == "__main__":
# Call main using default values from config file
import json
# Read config settings
with open("configs/config.json") as f:
config = json.load(f)
randomConfig = config["RANDOM"]
# Set config settings
for randomization in randomConfig["randomization"]:
if randomConfig["randomization"][str(randomization)] == str(True):
randomization = randomConfig["randomization"][str(randomization)]
if randomization is False:
print("At least one gamemode must be chosen")
quit()
# Load config settings
main(randomization, float(randomConfig['seconds'])) | 0.272702 | 0.26341 |
import sympy
from sympy.utilities import lambdify
from ndispers._baseclass import Medium, wl, T
from ndispers.helper import vars2
class FusedSilica(Medium):
"""
Fused Silica glass
Dispersion formula for refractive index
---------------------------------------
n(wl_um) = sqrt(1 + B1 * wl_um**2/(wl_um**2 - C1) + B2 * wl_um**2/(wl_um**2 - C2) + B3 * wl_um**2/(wl_um**2 - C3))
Thermo-optic coefficient
------------------------
dn/dT = 11.3e-6 /K
Validity range
---------------
0.21 to 3.71 um
Ref
----
- <NAME> and <NAME>, "Index of Refraction of Fused-quartz Glass for Ultraviolet, Visible, and Infrared Wavelengths" J. Res. Nat. Bur. Stand. 53:185–189 (1954)
- <NAME>, "Interspecimen Comparison of the Refractive Index of Fused Silica" J. Opt. Soc. Am. 55 :1205-1209 (1965)
- <NAME>., et al. "Measurements of refractive indices and thermo-optical coefficients using a white-light Michelson interferometer." Applied optics 55.24 (2016): 6639-6643.
Examples
---------
"""
__slots__ = ["_B1", "_C1", "_B2", "_C2", "_B3", "_C3", "_dndT"]
def __init__(self):
super().__init__()
""" Constants of dispersion formula """
# For ordinary ray
self._B1 = 0.6961663
self._C1 = 0.0684043**2
self._B2 = 0.4079426
self._C2 = 0.1162414**2
self._B3 = 0.8974794
self._C3 = 9.896161**2
self._dndT = 11.3e-6 #/K
@property
def symbols(self):
return [wl, T]
@property
def constants(self):
print(vars2(self))
def n_expr(self, pol='o'):
""" Sympy expression, dispersion formula """
return sympy.sqrt(1 + self._B1 * wl**2 / (wl**2 - self._C1) + self._B2 * wl**2 / (wl**2 - self._C2) + self._B3 * wl**2 / (wl**2 - self._C3)) + self._dndT * (T - 24)
def n(self, wl_um, T_degC):
"""
Refractive index as a function of wavelength
input
------
wl_um : float or array_like, wavelength in um
T_degC : float or array_like, temperature of crystal in degree C.
return
-------
Refractive index, float
"""
return super().n(wl_um, T_degC, pol='o')
def dn_wl(self, wl_um, T_degC):
return super().dn_wl(wl_um, T_degC, pol='o')
def d2n_wl(self, wl_um, T_degC):
return super().d2n_wl(wl_um, T_degC, pol='o')
def d3n_wl(self, wl_um, T_degC):
return super().d3n_wl(wl_um, T_degC, pol='o')
def GD(self, wl_um, T_degC):
"""Group Delay [fs/mm]"""
return super().GD(wl_um, T_degC, pol='o')
def GV(self, wl_um, T_degC):
"""Group Velocity [um/fs]"""
return super().GV(wl_um, T_degC, pol='o')
def ng(self, wl_um, T_degC):
"""Group index, c/Group velocity"""
return super().ng(wl_um, T_degC, pol='o')
def GVD(self, wl_um, T_degC):
"""Group Delay Dispersion [fs^2/mm]"""
return super().GVD(wl_um, T_degC, pol='o')
def TOD(self, wl_um, T_degC):
"""Third Order Dispersion [fs^3/mm]"""
return super().TOD(wl_um, T_degC, pol='o') | ndispers/media/glasses/_fusedsilica.py | import sympy
from sympy.utilities import lambdify
from ndispers._baseclass import Medium, wl, T
from ndispers.helper import vars2
class FusedSilica(Medium):
"""
Fused Silica glass
Dispersion formula for refractive index
---------------------------------------
n(wl_um) = sqrt(1 + B1 * wl_um**2/(wl_um**2 - C1) + B2 * wl_um**2/(wl_um**2 - C2) + B3 * wl_um**2/(wl_um**2 - C3))
Thermo-optic coefficient
------------------------
dn/dT = 11.3e-6 /K
Validity range
---------------
0.21 to 3.71 um
Ref
----
- <NAME> and <NAME>, "Index of Refraction of Fused-quartz Glass for Ultraviolet, Visible, and Infrared Wavelengths" J. Res. Nat. Bur. Stand. 53:185–189 (1954)
- <NAME>, "Interspecimen Comparison of the Refractive Index of Fused Silica" J. Opt. Soc. Am. 55 :1205-1209 (1965)
- <NAME>., et al. "Measurements of refractive indices and thermo-optical coefficients using a white-light Michelson interferometer." Applied optics 55.24 (2016): 6639-6643.
Examples
---------
"""
__slots__ = ["_B1", "_C1", "_B2", "_C2", "_B3", "_C3", "_dndT"]
def __init__(self):
super().__init__()
""" Constants of dispersion formula """
# For ordinary ray
self._B1 = 0.6961663
self._C1 = 0.0684043**2
self._B2 = 0.4079426
self._C2 = 0.1162414**2
self._B3 = 0.8974794
self._C3 = 9.896161**2
self._dndT = 11.3e-6 #/K
@property
def symbols(self):
return [wl, T]
@property
def constants(self):
print(vars2(self))
def n_expr(self, pol='o'):
""" Sympy expression, dispersion formula """
return sympy.sqrt(1 + self._B1 * wl**2 / (wl**2 - self._C1) + self._B2 * wl**2 / (wl**2 - self._C2) + self._B3 * wl**2 / (wl**2 - self._C3)) + self._dndT * (T - 24)
def n(self, wl_um, T_degC):
"""
Refractive index as a function of wavelength
input
------
wl_um : float or array_like, wavelength in um
T_degC : float or array_like, temperature of crystal in degree C.
return
-------
Refractive index, float
"""
return super().n(wl_um, T_degC, pol='o')
def dn_wl(self, wl_um, T_degC):
return super().dn_wl(wl_um, T_degC, pol='o')
def d2n_wl(self, wl_um, T_degC):
return super().d2n_wl(wl_um, T_degC, pol='o')
def d3n_wl(self, wl_um, T_degC):
return super().d3n_wl(wl_um, T_degC, pol='o')
def GD(self, wl_um, T_degC):
"""Group Delay [fs/mm]"""
return super().GD(wl_um, T_degC, pol='o')
def GV(self, wl_um, T_degC):
"""Group Velocity [um/fs]"""
return super().GV(wl_um, T_degC, pol='o')
def ng(self, wl_um, T_degC):
"""Group index, c/Group velocity"""
return super().ng(wl_um, T_degC, pol='o')
def GVD(self, wl_um, T_degC):
"""Group Delay Dispersion [fs^2/mm]"""
return super().GVD(wl_um, T_degC, pol='o')
def TOD(self, wl_um, T_degC):
"""Third Order Dispersion [fs^3/mm]"""
return super().TOD(wl_um, T_degC, pol='o') | 0.792906 | 0.427337 |
import json
import os
import sys
from os import environ
from pathlib import Path
from typing import Dict, Iterable
import boto3
import pytest
from _pytest.monkeypatch import MonkeyPatch
from moto import mock_ec2
from mypy_boto3_ec2.client import EC2Client
from pytest_cases import fixture
from .manage_ec2 import create_instance, set_name
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
@pytest.fixture(scope="session", autouse=True)
def aws_credentials() -> None:
"""Mocked AWS Credentials for moto."""
# From "How do I avoid tests from mutating my real infrastructure"
# <https://docs.getmoto.org/en/latest/docs/getting_started.html#how-do-i-avoid-tests-from-mutating-my-real-infrastructure>
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
environ["AWS_DEFAULT_REGION"] = "us-east-1"
@pytest.fixture(scope="session")
def monkeypatch_session() -> Iterable[MonkeyPatch]:
"""Monkeypatch object valid in session context."""
# <https://github.com/pytest-dev/pytest/issues/363#issuecomment-289830794>
monkeypatch = MonkeyPatch()
yield monkeypatch
monkeypatch.undo()
@pytest.fixture(scope="session", autouse=True)
def replace_aws_cli_executable(monkeypatch_session: pytest.MonkeyPatch) -> None:
"""Adjust path so that AWS CLI is replaced with our version."""
exe_path = Path(__file__).parent / "mock_aws_executable"
monkeypatch_session.setenv(
"PATH",
str(
exe_path.resolve(),
),
prepend=os.pathsep,
)
@pytest.fixture(scope="session")
def mocked_ec2_client(aws_credentials) -> Iterable[EC2Client]:
with mock_ec2():
yield boto3.client("ec2")
@fixture(scope="function")
def boto3_disabled(monkeypatch) -> Literal["boto3_disabled"]:
monkeypatch.setitem(sys.modules, "boto3", None)
return "boto3_disabled"
boto3_infrastructure_to_mock = {
"named": "my-named-boto3-instance",
"unnamed": None,
"duplicate1": "duplicate-boto3-name",
"duplicate2": "duplicate-boto3-name",
}
infrastructure_to_mock = {
"named": "my-named-instance",
"unnamed": None,
"duplicate1": "duplicate-name",
"duplicate2": "duplicate-name",
}
@pytest.fixture(scope="session")
def mocked_ec2_instances(mocked_ec2_client: EC2Client) -> Dict[str, str]:
ids = {}
for label, name in infrastructure_to_mock.items():
generated_ec2_id = create_instance(mocked_ec2_client)
ids[label] = generated_ec2_id
if name is not None:
set_name(mocked_ec2_client, generated_ec2_id, name)
awscli_output = json.dumps(
mocked_ec2_client.describe_instances(),
default=str,
indent=2,
)
awscli_output_file = Path(__file__).parent / "mock_aws_executable" / "output.json"
awscli_output_file.write_text(awscli_output)
return ids | tests/conftest.py | import json
import os
import sys
from os import environ
from pathlib import Path
from typing import Dict, Iterable
import boto3
import pytest
from _pytest.monkeypatch import MonkeyPatch
from moto import mock_ec2
from mypy_boto3_ec2.client import EC2Client
from pytest_cases import fixture
from .manage_ec2 import create_instance, set_name
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
@pytest.fixture(scope="session", autouse=True)
def aws_credentials() -> None:
"""Mocked AWS Credentials for moto."""
# From "How do I avoid tests from mutating my real infrastructure"
# <https://docs.getmoto.org/en/latest/docs/getting_started.html#how-do-i-avoid-tests-from-mutating-my-real-infrastructure>
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
environ["AWS_DEFAULT_REGION"] = "us-east-1"
@pytest.fixture(scope="session")
def monkeypatch_session() -> Iterable[MonkeyPatch]:
"""Monkeypatch object valid in session context."""
# <https://github.com/pytest-dev/pytest/issues/363#issuecomment-289830794>
monkeypatch = MonkeyPatch()
yield monkeypatch
monkeypatch.undo()
@pytest.fixture(scope="session", autouse=True)
def replace_aws_cli_executable(monkeypatch_session: pytest.MonkeyPatch) -> None:
"""Adjust path so that AWS CLI is replaced with our version."""
exe_path = Path(__file__).parent / "mock_aws_executable"
monkeypatch_session.setenv(
"PATH",
str(
exe_path.resolve(),
),
prepend=os.pathsep,
)
@pytest.fixture(scope="session")
def mocked_ec2_client(aws_credentials) -> Iterable[EC2Client]:
with mock_ec2():
yield boto3.client("ec2")
@fixture(scope="function")
def boto3_disabled(monkeypatch) -> Literal["boto3_disabled"]:
monkeypatch.setitem(sys.modules, "boto3", None)
return "boto3_disabled"
boto3_infrastructure_to_mock = {
"named": "my-named-boto3-instance",
"unnamed": None,
"duplicate1": "duplicate-boto3-name",
"duplicate2": "duplicate-boto3-name",
}
infrastructure_to_mock = {
"named": "my-named-instance",
"unnamed": None,
"duplicate1": "duplicate-name",
"duplicate2": "duplicate-name",
}
@pytest.fixture(scope="session")
def mocked_ec2_instances(mocked_ec2_client: EC2Client) -> Dict[str, str]:
ids = {}
for label, name in infrastructure_to_mock.items():
generated_ec2_id = create_instance(mocked_ec2_client)
ids[label] = generated_ec2_id
if name is not None:
set_name(mocked_ec2_client, generated_ec2_id, name)
awscli_output = json.dumps(
mocked_ec2_client.describe_instances(),
default=str,
indent=2,
)
awscli_output_file = Path(__file__).parent / "mock_aws_executable" / "output.json"
awscli_output_file.write_text(awscli_output)
return ids | 0.450359 | 0.189221 |
from typing import List
from quantity import Quantity
from quantity_space import QuantitySpace
from quantity_state import QuantityState
class QualitativeState:
def __init__(self):
self.qualitative_state_quantities = []
def add_quantity(self, quantity: Quantity, value: QuantitySpace, gradient: str):
quantity_state = QuantityState(quantity, value, gradient)
self.qualitative_state_quantities.append(quantity_state)
def add_quantity_state(self, quantity_state: QuantityState):
self.qualitative_state_quantities.append(quantity_state)
def get_quantities(self) -> List[QuantityState]:
return self.qualitative_state_quantities
def get_quantity(self, quantity_label: str) -> QuantityState:
for quantity_state in self.qualitative_state_quantities:
if quantity_state.quantity.label == quantity_label:
return quantity_state
def get_string_representation(self):
result = ''
for quantity in self.get_quantities():
result = result + \
f'{quantity.quantity.label}[{quantity.value.label},{quantity.gradient}]\n'
return result
def contains_gradient(self, gradient: str) -> bool:
for quantity_state in self.qualitative_state_quantities:
if quantity_state.gradient == gradient:
return True
return False
def __eq__(self, other):
"""Overrides the default implementation"""
if not isinstance(other, QualitativeState):
return False
for i, quantity in enumerate(self.get_quantities()):
if (self.qualitative_state_quantities[i].quantity.label != other.qualitative_state_quantities[i].quantity.label
or self.qualitative_state_quantities[i].value.label != other.qualitative_state_quantities[i].value.label
or self.qualitative_state_quantities[i].gradient != other.qualitative_state_quantities[i].gradient):
return False
return True
def __str__(self):
result = '('
for quantity in self.get_quantities():
result = result + \
f'({quantity.quantity.label}, {quantity.value.label}, {quantity.gradient}), '
result = result + ')'
return result | qualitative-reasoning/qualitative_state.py | from typing import List
from quantity import Quantity
from quantity_space import QuantitySpace
from quantity_state import QuantityState
class QualitativeState:
def __init__(self):
self.qualitative_state_quantities = []
def add_quantity(self, quantity: Quantity, value: QuantitySpace, gradient: str):
quantity_state = QuantityState(quantity, value, gradient)
self.qualitative_state_quantities.append(quantity_state)
def add_quantity_state(self, quantity_state: QuantityState):
self.qualitative_state_quantities.append(quantity_state)
def get_quantities(self) -> List[QuantityState]:
return self.qualitative_state_quantities
def get_quantity(self, quantity_label: str) -> QuantityState:
for quantity_state in self.qualitative_state_quantities:
if quantity_state.quantity.label == quantity_label:
return quantity_state
def get_string_representation(self):
result = ''
for quantity in self.get_quantities():
result = result + \
f'{quantity.quantity.label}[{quantity.value.label},{quantity.gradient}]\n'
return result
def contains_gradient(self, gradient: str) -> bool:
for quantity_state in self.qualitative_state_quantities:
if quantity_state.gradient == gradient:
return True
return False
def __eq__(self, other):
"""Overrides the default implementation"""
if not isinstance(other, QualitativeState):
return False
for i, quantity in enumerate(self.get_quantities()):
if (self.qualitative_state_quantities[i].quantity.label != other.qualitative_state_quantities[i].quantity.label
or self.qualitative_state_quantities[i].value.label != other.qualitative_state_quantities[i].value.label
or self.qualitative_state_quantities[i].gradient != other.qualitative_state_quantities[i].gradient):
return False
return True
def __str__(self):
result = '('
for quantity in self.get_quantities():
result = result + \
f'({quantity.quantity.label}, {quantity.value.label}, {quantity.gradient}), '
result = result + ')'
return result | 0.898383 | 0.438304 |
from urls import requests_url
def test_get_requests_succeeds(valid_request_model, client, request_headers):
"""
Tests that response is okay.
Args:
valid_request_model (Model): a valid model created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(), headers=request_headers)
response = res.get_json()
assert response['success']
assert response['message'] == 'requests fetched successfully'
assert len(response['data']) == 1
assert res.status_code == 200
def test_get_request_succeeds_with_valid_request_id_in_params(
valid_request_model, client, request_headers
):
"""
Tests that response is okay when request id exists.
Args:
valid_request_model (Model): a valid model created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(1), headers=request_headers)
response = res.get_json()
assert response['success']
assert response['message'] == 'request fetched successfully'
assert response['data']['title'] == 'Improve customer care services'
assert res.status_code == 200
def test_get_request_succeeds_with_non_existent_request_id_in_params(
client, request_headers
):
"""
Tests that response is okay when request id exists.
Args:
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(1), headers=request_headers)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified request'
assert res.status_code == 404
def test_post_requests_succeeds_with_valid_request_body(
valid_request_body, client, request_headers
):
"""
Tests that response is okay when request body is valid.
Args:
valid_request_body (dict): a valid request body created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(), headers=request_headers, json=valid_request_body
)
response = res.get_json()
assert response['success']
assert response['message'] == 'request created successfully'
assert response['data']['title'] == 'Improve customer care services'
assert res.status_code == 201
def test_post_requests_succeeds_with_existing_priority_in_body(
valid_request_body_with_existing_priority, client, request_headers
):
"""
Tests that response shows failure when request body has conflicting
priority.
Args:
valid_request_body_with_existing_priority (dict): a request body
with existing priority for a client
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=valid_request_body_with_existing_priority
)
response = res.get_json()
assert response['success']
assert (response['message'] == 'request created successfully')
assert res.status_code == 201
def test_post_requests_fails_with_missing_fields_in_request_body(
invalid_request_body_with_missing_fields, client, request_headers
):
"""
Tests that response shows failure when request body has missing fields.
Args:
invalid_request_body_with_missing_fields (dict): a request body with
missing fields created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_missing_fields
)
response = res.get_json()
assert not response['success']
assert (
response['message']['client_id'][0] ==
'Missing data for required field.'
)
assert (
response['message']['staff_id'][0] ==
'Missing data for required field.'
)
assert res.status_code == 400
def test_post_requests_fails_with_non_existent_client_in_body(
invalid_request_body_with_non_existent_client, client, request_headers
):
"""
Tests that response shows failure when request body has non-existent
client id.
Args:
invalid_request_body_with_non_existent_client (dict): a request body
with non-existent client id
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_non_existent_client
)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified client'
assert res.status_code == 404
def test_post_requests_fails_with_non_existent_staff_in_body(
invalid_request_body_with_non_existent_staff, client, request_headers
):
"""
Tests that response shows failure when request body has non-existent
staff id.
Args:
invalid_request_body_with_non_existent_staff (dict): a request body
with non-existent staff id
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_non_existent_staff
)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified staff'
assert res.status_code == 404
def test_post_requests_fails_with_invalid_enum_value_in_body(
invalid_request_body_with_invalid_enum_value, client, request_headers
):
"""
Tests that response shows failure when request body has invalid enum value.
Args:
invalid_request_body_with_invalid_enum_value (dict): a request body
with invalid enum value
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_enum_value
)
response = res.get_json()
assert not response['success']
assert (
response['message']['product_area'][0] == 'Invalid enum value POLITICS'
)
assert res.status_code == 400
def test_post_requests_fails_with_invalid_priority_in_body(
invalid_request_body_with_invalid_priority, client, request_headers
):
"""
Tests that response shows failure when request body has invalid enum value.
Args:
invalid_request_body_with_invalid_enum_value (dict): a request body
with invalid enum value
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_priority
)
response = res.get_json()
assert not response['success']
assert (
response['message'] == 'priority value must be between 0 and 100'
)
assert res.status_code == 400
def test_post_requests_fails_with_bad_json_in_body(
bad_request_json_string, client, request_headers
):
"""
Tests that response shows failure when request body has ill-formed json
string.
Args:
bad_request_json_string (dict): a request body with ill-formed json
string
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(), headers=request_headers, data=bad_request_json_string
)
response = res.get_json()
assert not response['success']
assert res.status_code == 400
def test_post_requests_fails_with_invalid_string_length_in_body(
invalid_request_body_with_invalid_string_length, client, request_headers
):
"""
Tests that response shows failure when request body has invalid
string length in body.
Args:
invalid_request_body_with_invalid_string_length (dict): a request body
with invalid string length
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_string_length
)
response = res.get_json()
assert not response['success']
assert response['message'] == {
'description': ['string cannot be longer than 250']
}
assert res.status_code == 400
def test_post_requests_fails_with_invalid_invalid_target_date_in_body(
invalid_request_body_with_invalid_date, client, request_headers
):
"""
Tests that response shows failure when request body has invalid
target date.
Args:
invalid_request_body_with_invalid_string_length (dict): a request body
with invalid string length
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_date
)
response = res.get_json()
assert not response['success']
assert response['message'] == {
'target_date': ['date can only be a later time in the future']
}
assert res.status_code == 400 | tests/views/test_request_endpoints.py | from urls import requests_url
def test_get_requests_succeeds(valid_request_model, client, request_headers):
"""
Tests that response is okay.
Args:
valid_request_model (Model): a valid model created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(), headers=request_headers)
response = res.get_json()
assert response['success']
assert response['message'] == 'requests fetched successfully'
assert len(response['data']) == 1
assert res.status_code == 200
def test_get_request_succeeds_with_valid_request_id_in_params(
valid_request_model, client, request_headers
):
"""
Tests that response is okay when request id exists.
Args:
valid_request_model (Model): a valid model created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(1), headers=request_headers)
response = res.get_json()
assert response['success']
assert response['message'] == 'request fetched successfully'
assert response['data']['title'] == 'Improve customer care services'
assert res.status_code == 200
def test_get_request_succeeds_with_non_existent_request_id_in_params(
client, request_headers
):
"""
Tests that response is okay when request id exists.
Args:
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.get(requests_url(1), headers=request_headers)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified request'
assert res.status_code == 404
def test_post_requests_succeeds_with_valid_request_body(
valid_request_body, client, request_headers
):
"""
Tests that response is okay when request body is valid.
Args:
valid_request_body (dict): a valid request body created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(), headers=request_headers, json=valid_request_body
)
response = res.get_json()
assert response['success']
assert response['message'] == 'request created successfully'
assert response['data']['title'] == 'Improve customer care services'
assert res.status_code == 201
def test_post_requests_succeeds_with_existing_priority_in_body(
valid_request_body_with_existing_priority, client, request_headers
):
"""
Tests that response shows failure when request body has conflicting
priority.
Args:
valid_request_body_with_existing_priority (dict): a request body
with existing priority for a client
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=valid_request_body_with_existing_priority
)
response = res.get_json()
assert response['success']
assert (response['message'] == 'request created successfully')
assert res.status_code == 201
def test_post_requests_fails_with_missing_fields_in_request_body(
invalid_request_body_with_missing_fields, client, request_headers
):
"""
Tests that response shows failure when request body has missing fields.
Args:
invalid_request_body_with_missing_fields (dict): a request body with
missing fields created by a fixture.
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_missing_fields
)
response = res.get_json()
assert not response['success']
assert (
response['message']['client_id'][0] ==
'Missing data for required field.'
)
assert (
response['message']['staff_id'][0] ==
'Missing data for required field.'
)
assert res.status_code == 400
def test_post_requests_fails_with_non_existent_client_in_body(
invalid_request_body_with_non_existent_client, client, request_headers
):
"""
Tests that response shows failure when request body has non-existent
client id.
Args:
invalid_request_body_with_non_existent_client (dict): a request body
with non-existent client id
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_non_existent_client
)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified client'
assert res.status_code == 404
def test_post_requests_fails_with_non_existent_staff_in_body(
invalid_request_body_with_non_existent_staff, client, request_headers
):
"""
Tests that response shows failure when request body has non-existent
staff id.
Args:
invalid_request_body_with_non_existent_staff (dict): a request body
with non-existent staff id
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_non_existent_staff
)
response = res.get_json()
assert not response['success']
assert response['message'] == 'cannot find specified staff'
assert res.status_code == 404
def test_post_requests_fails_with_invalid_enum_value_in_body(
invalid_request_body_with_invalid_enum_value, client, request_headers
):
"""
Tests that response shows failure when request body has invalid enum value.
Args:
invalid_request_body_with_invalid_enum_value (dict): a request body
with invalid enum value
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_enum_value
)
response = res.get_json()
assert not response['success']
assert (
response['message']['product_area'][0] == 'Invalid enum value POLITICS'
)
assert res.status_code == 400
def test_post_requests_fails_with_invalid_priority_in_body(
invalid_request_body_with_invalid_priority, client, request_headers
):
"""
Tests that response shows failure when request body has invalid enum value.
Args:
invalid_request_body_with_invalid_enum_value (dict): a request body
with invalid enum value
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_priority
)
response = res.get_json()
assert not response['success']
assert (
response['message'] == 'priority value must be between 0 and 100'
)
assert res.status_code == 400
def test_post_requests_fails_with_bad_json_in_body(
bad_request_json_string, client, request_headers
):
"""
Tests that response shows failure when request body has ill-formed json
string.
Args:
bad_request_json_string (dict): a request body with ill-formed json
string
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(), headers=request_headers, data=bad_request_json_string
)
response = res.get_json()
assert not response['success']
assert res.status_code == 400
def test_post_requests_fails_with_invalid_string_length_in_body(
invalid_request_body_with_invalid_string_length, client, request_headers
):
"""
Tests that response shows failure when request body has invalid
string length in body.
Args:
invalid_request_body_with_invalid_string_length (dict): a request body
with invalid string length
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_string_length
)
response = res.get_json()
assert not response['success']
assert response['message'] == {
'description': ['string cannot be longer than 250']
}
assert res.status_code == 400
def test_post_requests_fails_with_invalid_invalid_target_date_in_body(
invalid_request_body_with_invalid_date, client, request_headers
):
"""
Tests that response shows failure when request body has invalid
target date.
Args:
invalid_request_body_with_invalid_string_length (dict): a request body
with invalid string length
client (FlaskClient): a test client created by a fixture.
request_headers (dict): a header created by a fixture.
"""
res = client.post(
requests_url(),
headers=request_headers,
json=invalid_request_body_with_invalid_date
)
response = res.get_json()
assert not response['success']
assert response['message'] == {
'target_date': ['date can only be a later time in the future']
}
assert res.status_code == 400 | 0.841858 | 0.381623 |
Crypits_0_Dice_Reward_Types = {
'[1]': 'crypits',
'[2]': 'crypits',
'[3]': 'crypits',
'[4]': 'crypits',
'[5]': 'crypits',
'[6]': 'crypits',
'[7]': 'crypits',
'[8]': 'crypits',
'[9]': 'crypits',
'[10]': 'crypits',
'[11]': 'crypits',
'[12]': 'crypits',
'[13]': 'crypits',
'[14]': 'crypits',
'[15]': 'crypits',
'[16]': 'crypits',
'[17]': 'crypits',
'[18]': 'crypits',
'[19]': 'crypits',
'[20]': 'crypits'
}
Crypits_0_Dice_Rewards = {
'reward_0': {
'reward': 10000000000000000,
},
'reward_1': {
'reward': 10000000000000000,
},
'reward_2': {
'reward': 10000000000000000,
},
'reward_3': {
'reward': 10000000000000000,
},
'reward_4': {
'reward': 10000000000000000,
},
'reward_5': {
'reward': 10000000000000000,
},
'reward_6': {
'reward': 10000000000000000,
},
'reward_7': {
'reward': 155555550000000000,
},
'reward_8': {
'reward': 150000000000000000,
},
'reward_9': {
'reward': 10000000000000000,
},
'reward_10': {
'reward': 10000000000000000,
},
'reward_11': {
'reward': 40000000000000000,
},
'reward_12': {
'reward': 90000000000000000,
},
'reward_13': {
'reward': 500000000000000000,
},
'reward_14': {
'reward': 190000000000000000,
},
'reward_15': {
'reward': 170000000000000000,
},
'reward_16': {
'reward': 990000000000000000,
},
'reward_17': {
'reward': 9900000000000000,
},
'reward_18': {
'reward': 7700000000000000,
},
'reward_19': {
'reward': 876000000000000000,
},
}
Crypits_0_Dice_Returns = {
'[1]': {
'public return': 'Rolled A 1!',
'self return': 'You Rolled A 1!',
'reward type': Crypits_0_Dice_Reward_Types['[1]'],
'reward': Crypits_0_Dice_Rewards['reward_0']['reward'],
},
'[2]': {
'self return': 'You Rolled A 2!',
'reward type': Crypits_0_Dice_Reward_Types['[2]'],
'reward': Crypits_0_Dice_Rewards['reward_1']['reward'],
},
'[3]': {
'self return': 'You Rolled A 3!',
'reward type': Crypits_0_Dice_Reward_Types['[3]'],
'reward': Crypits_0_Dice_Rewards['reward_2']['reward'],
},
'[4]': {
'self return': 'You Rolled A 4!',
'reward type': Crypits_0_Dice_Reward_Types['[4]'],
'reward': Crypits_0_Dice_Rewards['reward_3']['reward'],
},
'[5]': {
'self return': 'You Rolled A 5!',
'reward type': Crypits_0_Dice_Reward_Types['[5]'],
'reward': Crypits_0_Dice_Rewards['reward_4']['reward'],
},
'[6]': {
'self return': 'You Rolled A 6!',
'reward type': Crypits_0_Dice_Reward_Types['[6]'],
'reward': Crypits_0_Dice_Rewards['reward_5']['reward'],
},
'[7]': {
'self return': 'You Rolled A 7!',
'reward type': Crypits_0_Dice_Reward_Types['[7]'],
'reward': Crypits_0_Dice_Rewards['reward_6']['reward'],
},
'[8]': {
'self return': 'You Rolled A 8!',
'reward type': Crypits_0_Dice_Reward_Types['[8]'],
'reward': Crypits_0_Dice_Rewards['reward_7']['reward'],
},
'[9]': {
'self return': 'You Rolled A 9!',
'reward type': Crypits_0_Dice_Reward_Types['[9]'],
'reward': Crypits_0_Dice_Rewards['reward_8']['reward'],
},
'[10]': {
'self return': 'You Rolled A 10!',
'reward type': Crypits_0_Dice_Reward_Types['[10]'],
'reward': Crypits_0_Dice_Rewards['reward_9']['reward'],
},
'[11]': {
'self return': 'You Rolled A 11!',
'reward type': Crypits_0_Dice_Reward_Types['[11]'],
'reward': Crypits_0_Dice_Rewards['reward_10']['reward'],
},
'[12]': {
'self return': 'You Rolled A 12!',
'reward type': Crypits_0_Dice_Reward_Types['[12]'],
'reward': Crypits_0_Dice_Rewards['reward_11']['reward'],
},
'[13]': {
'self return': 'You Rolled A 13!',
'reward type': Crypits_0_Dice_Reward_Types['[13]'],
'reward': Crypits_0_Dice_Rewards['reward_12']['reward'],
},
'[14]': {
'self return': 'You Rolled A 14!',
'reward type': Crypits_0_Dice_Reward_Types['[14]'],
'reward': Crypits_0_Dice_Rewards['reward_13']['reward'],
},
'[15]': {
'self return': 'You Rolled A 15!',
'reward type': Crypits_0_Dice_Reward_Types['[15]'],
'reward': Crypits_0_Dice_Rewards['reward_14']['reward'],
},
'[16]': {
'self return': 'You Rolled A 16!',
'reward type': Crypits_0_Dice_Reward_Types['[16]'],
'reward': Crypits_0_Dice_Rewards['reward_15']['reward'],
},
'[17]': {
'self return': 'You Rolled A 17!',
'reward type': Crypits_0_Dice_Reward_Types['[17]'],
'reward': Crypits_0_Dice_Rewards['reward_16']['reward'],
},
'[18]': {
'self return': 'You Rolled A 18!',
'reward type': Crypits_0_Dice_Reward_Types['[18]'],
'reward': Crypits_0_Dice_Rewards['reward_17']['reward'],
},
'[19]': {
'return': 'You Rolled A 19!',
'reward type': Crypits_0_Dice_Reward_Types['[19]'],
'reward': Crypits_0_Dice_Rewards['reward_18']['reward'],
},
'[20]': {
'return': 'You Rolled A 20!',
'reward type': Crypits_0_Dice_Reward_Types['[20]'],
'reward': Crypits_0_Dice_Rewards['reward_19']['reward'],
},
} | crypits_rewards.py | Crypits_0_Dice_Reward_Types = {
'[1]': 'crypits',
'[2]': 'crypits',
'[3]': 'crypits',
'[4]': 'crypits',
'[5]': 'crypits',
'[6]': 'crypits',
'[7]': 'crypits',
'[8]': 'crypits',
'[9]': 'crypits',
'[10]': 'crypits',
'[11]': 'crypits',
'[12]': 'crypits',
'[13]': 'crypits',
'[14]': 'crypits',
'[15]': 'crypits',
'[16]': 'crypits',
'[17]': 'crypits',
'[18]': 'crypits',
'[19]': 'crypits',
'[20]': 'crypits'
}
Crypits_0_Dice_Rewards = {
'reward_0': {
'reward': 10000000000000000,
},
'reward_1': {
'reward': 10000000000000000,
},
'reward_2': {
'reward': 10000000000000000,
},
'reward_3': {
'reward': 10000000000000000,
},
'reward_4': {
'reward': 10000000000000000,
},
'reward_5': {
'reward': 10000000000000000,
},
'reward_6': {
'reward': 10000000000000000,
},
'reward_7': {
'reward': 155555550000000000,
},
'reward_8': {
'reward': 150000000000000000,
},
'reward_9': {
'reward': 10000000000000000,
},
'reward_10': {
'reward': 10000000000000000,
},
'reward_11': {
'reward': 40000000000000000,
},
'reward_12': {
'reward': 90000000000000000,
},
'reward_13': {
'reward': 500000000000000000,
},
'reward_14': {
'reward': 190000000000000000,
},
'reward_15': {
'reward': 170000000000000000,
},
'reward_16': {
'reward': 990000000000000000,
},
'reward_17': {
'reward': 9900000000000000,
},
'reward_18': {
'reward': 7700000000000000,
},
'reward_19': {
'reward': 876000000000000000,
},
}
Crypits_0_Dice_Returns = {
'[1]': {
'public return': 'Rolled A 1!',
'self return': 'You Rolled A 1!',
'reward type': Crypits_0_Dice_Reward_Types['[1]'],
'reward': Crypits_0_Dice_Rewards['reward_0']['reward'],
},
'[2]': {
'self return': 'You Rolled A 2!',
'reward type': Crypits_0_Dice_Reward_Types['[2]'],
'reward': Crypits_0_Dice_Rewards['reward_1']['reward'],
},
'[3]': {
'self return': 'You Rolled A 3!',
'reward type': Crypits_0_Dice_Reward_Types['[3]'],
'reward': Crypits_0_Dice_Rewards['reward_2']['reward'],
},
'[4]': {
'self return': 'You Rolled A 4!',
'reward type': Crypits_0_Dice_Reward_Types['[4]'],
'reward': Crypits_0_Dice_Rewards['reward_3']['reward'],
},
'[5]': {
'self return': 'You Rolled A 5!',
'reward type': Crypits_0_Dice_Reward_Types['[5]'],
'reward': Crypits_0_Dice_Rewards['reward_4']['reward'],
},
'[6]': {
'self return': 'You Rolled A 6!',
'reward type': Crypits_0_Dice_Reward_Types['[6]'],
'reward': Crypits_0_Dice_Rewards['reward_5']['reward'],
},
'[7]': {
'self return': 'You Rolled A 7!',
'reward type': Crypits_0_Dice_Reward_Types['[7]'],
'reward': Crypits_0_Dice_Rewards['reward_6']['reward'],
},
'[8]': {
'self return': 'You Rolled A 8!',
'reward type': Crypits_0_Dice_Reward_Types['[8]'],
'reward': Crypits_0_Dice_Rewards['reward_7']['reward'],
},
'[9]': {
'self return': 'You Rolled A 9!',
'reward type': Crypits_0_Dice_Reward_Types['[9]'],
'reward': Crypits_0_Dice_Rewards['reward_8']['reward'],
},
'[10]': {
'self return': 'You Rolled A 10!',
'reward type': Crypits_0_Dice_Reward_Types['[10]'],
'reward': Crypits_0_Dice_Rewards['reward_9']['reward'],
},
'[11]': {
'self return': 'You Rolled A 11!',
'reward type': Crypits_0_Dice_Reward_Types['[11]'],
'reward': Crypits_0_Dice_Rewards['reward_10']['reward'],
},
'[12]': {
'self return': 'You Rolled A 12!',
'reward type': Crypits_0_Dice_Reward_Types['[12]'],
'reward': Crypits_0_Dice_Rewards['reward_11']['reward'],
},
'[13]': {
'self return': 'You Rolled A 13!',
'reward type': Crypits_0_Dice_Reward_Types['[13]'],
'reward': Crypits_0_Dice_Rewards['reward_12']['reward'],
},
'[14]': {
'self return': 'You Rolled A 14!',
'reward type': Crypits_0_Dice_Reward_Types['[14]'],
'reward': Crypits_0_Dice_Rewards['reward_13']['reward'],
},
'[15]': {
'self return': 'You Rolled A 15!',
'reward type': Crypits_0_Dice_Reward_Types['[15]'],
'reward': Crypits_0_Dice_Rewards['reward_14']['reward'],
},
'[16]': {
'self return': 'You Rolled A 16!',
'reward type': Crypits_0_Dice_Reward_Types['[16]'],
'reward': Crypits_0_Dice_Rewards['reward_15']['reward'],
},
'[17]': {
'self return': 'You Rolled A 17!',
'reward type': Crypits_0_Dice_Reward_Types['[17]'],
'reward': Crypits_0_Dice_Rewards['reward_16']['reward'],
},
'[18]': {
'self return': 'You Rolled A 18!',
'reward type': Crypits_0_Dice_Reward_Types['[18]'],
'reward': Crypits_0_Dice_Rewards['reward_17']['reward'],
},
'[19]': {
'return': 'You Rolled A 19!',
'reward type': Crypits_0_Dice_Reward_Types['[19]'],
'reward': Crypits_0_Dice_Rewards['reward_18']['reward'],
},
'[20]': {
'return': 'You Rolled A 20!',
'reward type': Crypits_0_Dice_Reward_Types['[20]'],
'reward': Crypits_0_Dice_Rewards['reward_19']['reward'],
},
} | 0.428233 | 0.383468 |
from functools import wraps, partial
from typing import Union, Optional, TypeVar, Type
import pytest
import andi
class Foo:
pass
class Bar:
pass
class Baz:
pass
def test_andi():
def func1(x: Foo):
pass
def func2():
pass
def func3(x: Bar, y: Foo):
pass
assert andi.inspect(Foo.__init__) == {}
assert andi.inspect(func1) == {'x': [Foo]}
assert andi.inspect(func2) == {}
assert andi.inspect(func3) == {'x': [Bar], 'y': [Foo]}
def test_union():
def func(x: Union[Foo, Bar]):
pass
assert andi.inspect(func) == {'x': [Foo, Bar]}
def test_optional():
def func(x: Optional[Foo]):
pass
assert andi.inspect(func) == {'x': [Foo, type(None)]}
def test_optional_union():
def func(x: Optional[Union[Foo, Baz]]):
pass
assert andi.inspect(func) == {'x': [Foo, Baz, type(None)]}
def test_not_annotated():
def func(x):
pass
assert andi.inspect(func) == {'x': []}
def test_string_types():
def func(x: 'Bar'):
pass
assert andi.inspect(func) == {'x': [Bar]}
def test_string_types_with_fn():
""" String type references not supported for __init__ in classes declared
within functions """
class Fuu:
def __init__(self, bur :'Bur'):
pass
class Bur:
pass
with pytest.raises(NameError):
andi.inspect(Fuu.__init__)
def test_init_methods():
class MyClass:
def __init__(self, x: Foo):
self.x = x
assert andi.inspect(MyClass.__init__) == {'x': [Foo]}
assert andi.inspect(MyClass) == {'x': [Foo]}
def test_classmethod():
T = TypeVar('T')
class MyClass:
@classmethod
def from_foo(cls: Type[T], foo: Foo) -> T:
return cls()
assert andi.inspect(MyClass.from_foo) == {'foo': [Foo]}
def test_decorated():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@decorator
def func(x: 'Bar'):
pass
assert andi.inspect(func) == {'x': [Bar]}
@pytest.mark.xfail(reason="functools.partial support is not implemented")
def test_partial():
def func(x: Foo, y: Bar):
pass
func_nofoo = partial(func, x=Foo())
assert andi.inspect(func_nofoo) == {'y': [Bar]}
def test_callable_object():
class MyClass:
def __call__(self, x: Bar):
pass
obj = MyClass()
assert andi.inspect(obj) == {'x': [Bar]} | tests/test_inspect.py | from functools import wraps, partial
from typing import Union, Optional, TypeVar, Type
import pytest
import andi
class Foo:
pass
class Bar:
pass
class Baz:
pass
def test_andi():
def func1(x: Foo):
pass
def func2():
pass
def func3(x: Bar, y: Foo):
pass
assert andi.inspect(Foo.__init__) == {}
assert andi.inspect(func1) == {'x': [Foo]}
assert andi.inspect(func2) == {}
assert andi.inspect(func3) == {'x': [Bar], 'y': [Foo]}
def test_union():
def func(x: Union[Foo, Bar]):
pass
assert andi.inspect(func) == {'x': [Foo, Bar]}
def test_optional():
def func(x: Optional[Foo]):
pass
assert andi.inspect(func) == {'x': [Foo, type(None)]}
def test_optional_union():
def func(x: Optional[Union[Foo, Baz]]):
pass
assert andi.inspect(func) == {'x': [Foo, Baz, type(None)]}
def test_not_annotated():
def func(x):
pass
assert andi.inspect(func) == {'x': []}
def test_string_types():
def func(x: 'Bar'):
pass
assert andi.inspect(func) == {'x': [Bar]}
def test_string_types_with_fn():
""" String type references not supported for __init__ in classes declared
within functions """
class Fuu:
def __init__(self, bur :'Bur'):
pass
class Bur:
pass
with pytest.raises(NameError):
andi.inspect(Fuu.__init__)
def test_init_methods():
class MyClass:
def __init__(self, x: Foo):
self.x = x
assert andi.inspect(MyClass.__init__) == {'x': [Foo]}
assert andi.inspect(MyClass) == {'x': [Foo]}
def test_classmethod():
T = TypeVar('T')
class MyClass:
@classmethod
def from_foo(cls: Type[T], foo: Foo) -> T:
return cls()
assert andi.inspect(MyClass.from_foo) == {'foo': [Foo]}
def test_decorated():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@decorator
def func(x: 'Bar'):
pass
assert andi.inspect(func) == {'x': [Bar]}
@pytest.mark.xfail(reason="functools.partial support is not implemented")
def test_partial():
def func(x: Foo, y: Bar):
pass
func_nofoo = partial(func, x=Foo())
assert andi.inspect(func_nofoo) == {'y': [Bar]}
def test_callable_object():
class MyClass:
def __call__(self, x: Bar):
pass
obj = MyClass()
assert andi.inspect(obj) == {'x': [Bar]} | 0.893495 | 0.712282 |
import socket
import selectors
import types
import fire
class SocketReader():
def __init__(self, host, port, sep = '\n'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen()
sock.setblocking(False)
self.sock = sock
sel = selectors.DefaultSelector()
sel.register(sock, selectors.EVENT_READ, data=None)
self.sel = sel
self.sep = sep
def service_connection(self, key, mask, selector, separator):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024)
if recv_data: # Receive chunk of data from socket -> transform to UTF8 string
msg = recv_data.decode('utf-8')
return msg
else: # Event happened (select fired) but no data -> error
print('closing connection to', data.addr)
selector.unregister(sock)
sock.close()
return None
def accept_wrapper(self, sock, selector):
conn, addr = sock.accept()
print('accepted connection from', addr)
conn.setblocking(False)
selector.register(conn, selectors.EVENT_READ, data=addr) # `data` to distinguish between connection & new to-accept in select (l:50)
def produce(self):
msg = ""
sep = self.sep
sel = self.sel
while True:
events = sel.select(timeout=None)
for key, mask in events:
if key.data is None:
self.accept_wrapper(key.fileobj, sel)
else:
dta = self.service_connection(key, mask, sel, sep) # Get Chunk of data from socket
if not dta: return # Socket is down / got no data / ... exit application
msg += dta
# Go through current msg buffer and yield one full message (delimetered) by one
sep_idx = msg.find(sep)
while sep_idx != -1:
currMsg, msg = msg[:sep_idx], msg[sep_idx+1:] # Cut last finished message (up until first ';') -> yield it
yield currMsg # Process the rest (could contain multiple messages)
sep_idx = msg.find(sep)
def main(host='127.0.0.1', port=4444, sep='\n'):
listener = SocketReader(host, port, sep)
for i in listener.produce():
print('>>>', i)
if __name__ == "__main__":
fire.Fire(main) | wrappers/SocketReader.py | import socket
import selectors
import types
import fire
class SocketReader():
def __init__(self, host, port, sep = '\n'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen()
sock.setblocking(False)
self.sock = sock
sel = selectors.DefaultSelector()
sel.register(sock, selectors.EVENT_READ, data=None)
self.sel = sel
self.sep = sep
def service_connection(self, key, mask, selector, separator):
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024)
if recv_data: # Receive chunk of data from socket -> transform to UTF8 string
msg = recv_data.decode('utf-8')
return msg
else: # Event happened (select fired) but no data -> error
print('closing connection to', data.addr)
selector.unregister(sock)
sock.close()
return None
def accept_wrapper(self, sock, selector):
conn, addr = sock.accept()
print('accepted connection from', addr)
conn.setblocking(False)
selector.register(conn, selectors.EVENT_READ, data=addr) # `data` to distinguish between connection & new to-accept in select (l:50)
def produce(self):
msg = ""
sep = self.sep
sel = self.sel
while True:
events = sel.select(timeout=None)
for key, mask in events:
if key.data is None:
self.accept_wrapper(key.fileobj, sel)
else:
dta = self.service_connection(key, mask, sel, sep) # Get Chunk of data from socket
if not dta: return # Socket is down / got no data / ... exit application
msg += dta
# Go through current msg buffer and yield one full message (delimetered) by one
sep_idx = msg.find(sep)
while sep_idx != -1:
currMsg, msg = msg[:sep_idx], msg[sep_idx+1:] # Cut last finished message (up until first ';') -> yield it
yield currMsg # Process the rest (could contain multiple messages)
sep_idx = msg.find(sep)
def main(host='127.0.0.1', port=4444, sep='\n'):
listener = SocketReader(host, port, sep)
for i in listener.produce():
print('>>>', i)
if __name__ == "__main__":
fire.Fire(main) | 0.196518 | 0.083106 |
from typing import Any, Union
from collections import deque, OrderedDict
from collections.abc import Mapping
from typing_extensions import Literal
from typing_json.typechecking import is_instance, is_namedtuple
from typing_json.encoding import JSON_BASE_TYPES, is_json_encodable
_UNREACHABLE_ERROR_MSG = "Should never reach this point, please open an issue on GitHub."
def from_json_obj(obj: Any, t: Any) -> Any:
""" Converts an object of json standard type to json encodable type. """
# pylint:disable=invalid-name,too-many-branches,too-many-statements,too-many-return-statements
if not is_json_encodable(t):
raise TypeError("Type %s is not json-encodable."%str(t))
if t in JSON_BASE_TYPES:
if not isinstance(obj, t):
raise TypeError("Object %s is not %s."%(str(obj), str(t)))
return obj
if t in (None, type(None)):
if obj is not None:
raise TypeError("Object %s is not null (t=%s)."%(str(obj), str(t)))
return None
if t is ...:
if obj is not None:
raise TypeError("Object %s is not null (t=%s)."%(str(obj), str(t)))
return ...
if is_namedtuple(t):
if not isinstance(obj, (dict, OrderedDict, list)):
raise TypeError("Object %s is not (ordered) dictionary or list (t=%s)."%(str(obj), str(t))) # pylint:disable=line-too-long
fields = getattr(t, "_fields")
field_types = getattr(t, "_field_types")
field_defaults = getattr(t, "_field_defaults")
if isinstance(obj, list):
if len(fields) != len(obj):
raise TypeError("Object %s does not provide the right number of values for a namedtuple.")
return_val = t(*tuple(from_json_obj(obj[i] if i < len(obj) else field_defaults[field], field_types[field]) for i, field in enumerate(fields))) # pylint:disable=line-too-long
assert is_instance(return_val, t)
return return_val
converted_dict: OrderedDict() = {} # type:ignore
if set(obj.keys()).union(set(field_defaults.keys())) != set(field_types.keys()):
key_diff = set(obj.keys()).union(set(field_defaults.keys())) - set(field_types.keys())
if key_diff:
raise TypeError("Object %s does not have the required keys: t=%s, extra keys %s."%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long
key_diff = set(field_types.keys()) - set(obj.keys()).union(set(field_defaults.keys()))
raise TypeError("Object %s does not have the required keys: t=%s, missing keys %s."%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long
for field in fields:
field_type = field_types[field]
if not field in obj:
converted_dict[field] = field_defaults[field]
else:
converted_dict[field] = from_json_obj(obj[field], field_type)
return_val = t(**converted_dict)
assert is_instance(return_val, t)
return return_val
if hasattr(t, "__origin__") and hasattr(t, "__args__"): # generics
if t.__origin__ is Union:
for s in t.__args__:
try:
return_val = from_json_obj(obj, s)
assert is_instance(return_val, t)
return return_val
except TypeError:
continue
raise TypeError("Object %s is not convertible to any of %s."%(str(obj), str(t)))
if t.__origin__ is Literal:
if not is_instance(obj, t):
raise TypeError("Object %s is not allowed (t=%s)."%(str(obj), str(t)))
return obj
if t.__origin__ is list:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = list(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is deque:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = deque(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is set:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = set(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is frozenset:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = frozenset(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is tuple:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return
return_val = tuple(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
else:
if len(obj) != len(t.__args__):
raise TypeError("List %s is of incorrect length (t=%s)."%(str(obj), str(t)))
return_val = tuple(from_json_obj(x, t.__args__[i]) for i, x in enumerate(obj))
assert is_instance(return_val, t)
return return_val
if t.__origin__ in (dict, Mapping):
if not isinstance(obj, (dict, OrderedDict)):
raise TypeError("Object %s is not dict or OrderedDict (t=%s)."%(str(obj), str(t)))
converted_dict = dict() # type:ignore
for field in obj:
if not isinstance(field, str):
raise TypeError("Object key %s is string (t=%s)."%(field, str(t)))
converted_dict[field] = from_json_obj(obj[field], t.__args__[1])
assert is_instance(converted_dict, t)
return converted_dict
if t.__origin__ is OrderedDict:
if not isinstance(obj, OrderedDict):
raise TypeError("Object %s is not dict or OrderedDict (t=%s)."%(str(obj), str(t)))
converted_dict = OrderedDict() # type:ignore
for field in obj:
if not isinstance(field, str):
raise TypeError("Object key %s is string (t=%s)."%(field, str(t)))
converted_dict[field] = from_json_obj(obj[field], t.__args__[1])
assert is_instance(converted_dict, t)
return converted_dict
raise AssertionError(_UNREACHABLE_ERROR_MSG) # pragma: no cover | typing_json/decoding.py |
from typing import Any, Union
from collections import deque, OrderedDict
from collections.abc import Mapping
from typing_extensions import Literal
from typing_json.typechecking import is_instance, is_namedtuple
from typing_json.encoding import JSON_BASE_TYPES, is_json_encodable
_UNREACHABLE_ERROR_MSG = "Should never reach this point, please open an issue on GitHub."
def from_json_obj(obj: Any, t: Any) -> Any:
""" Converts an object of json standard type to json encodable type. """
# pylint:disable=invalid-name,too-many-branches,too-many-statements,too-many-return-statements
if not is_json_encodable(t):
raise TypeError("Type %s is not json-encodable."%str(t))
if t in JSON_BASE_TYPES:
if not isinstance(obj, t):
raise TypeError("Object %s is not %s."%(str(obj), str(t)))
return obj
if t in (None, type(None)):
if obj is not None:
raise TypeError("Object %s is not null (t=%s)."%(str(obj), str(t)))
return None
if t is ...:
if obj is not None:
raise TypeError("Object %s is not null (t=%s)."%(str(obj), str(t)))
return ...
if is_namedtuple(t):
if not isinstance(obj, (dict, OrderedDict, list)):
raise TypeError("Object %s is not (ordered) dictionary or list (t=%s)."%(str(obj), str(t))) # pylint:disable=line-too-long
fields = getattr(t, "_fields")
field_types = getattr(t, "_field_types")
field_defaults = getattr(t, "_field_defaults")
if isinstance(obj, list):
if len(fields) != len(obj):
raise TypeError("Object %s does not provide the right number of values for a namedtuple.")
return_val = t(*tuple(from_json_obj(obj[i] if i < len(obj) else field_defaults[field], field_types[field]) for i, field in enumerate(fields))) # pylint:disable=line-too-long
assert is_instance(return_val, t)
return return_val
converted_dict: OrderedDict() = {} # type:ignore
if set(obj.keys()).union(set(field_defaults.keys())) != set(field_types.keys()):
key_diff = set(obj.keys()).union(set(field_defaults.keys())) - set(field_types.keys())
if key_diff:
raise TypeError("Object %s does not have the required keys: t=%s, extra keys %s."%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long
key_diff = set(field_types.keys()) - set(obj.keys()).union(set(field_defaults.keys()))
raise TypeError("Object %s does not have the required keys: t=%s, missing keys %s."%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long
for field in fields:
field_type = field_types[field]
if not field in obj:
converted_dict[field] = field_defaults[field]
else:
converted_dict[field] = from_json_obj(obj[field], field_type)
return_val = t(**converted_dict)
assert is_instance(return_val, t)
return return_val
if hasattr(t, "__origin__") and hasattr(t, "__args__"): # generics
if t.__origin__ is Union:
for s in t.__args__:
try:
return_val = from_json_obj(obj, s)
assert is_instance(return_val, t)
return return_val
except TypeError:
continue
raise TypeError("Object %s is not convertible to any of %s."%(str(obj), str(t)))
if t.__origin__ is Literal:
if not is_instance(obj, t):
raise TypeError("Object %s is not allowed (t=%s)."%(str(obj), str(t)))
return obj
if t.__origin__ is list:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = list(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is deque:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = deque(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is set:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = set(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is frozenset:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
return_val = frozenset(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
if t.__origin__ is tuple:
if not isinstance(obj, list):
raise TypeError("Object %s is not list (t=%s)."%(str(obj), str(t)))
if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return
return_val = tuple(from_json_obj(x, t.__args__[0]) for x in obj)
assert is_instance(return_val, t)
return return_val
else:
if len(obj) != len(t.__args__):
raise TypeError("List %s is of incorrect length (t=%s)."%(str(obj), str(t)))
return_val = tuple(from_json_obj(x, t.__args__[i]) for i, x in enumerate(obj))
assert is_instance(return_val, t)
return return_val
if t.__origin__ in (dict, Mapping):
if not isinstance(obj, (dict, OrderedDict)):
raise TypeError("Object %s is not dict or OrderedDict (t=%s)."%(str(obj), str(t)))
converted_dict = dict() # type:ignore
for field in obj:
if not isinstance(field, str):
raise TypeError("Object key %s is string (t=%s)."%(field, str(t)))
converted_dict[field] = from_json_obj(obj[field], t.__args__[1])
assert is_instance(converted_dict, t)
return converted_dict
if t.__origin__ is OrderedDict:
if not isinstance(obj, OrderedDict):
raise TypeError("Object %s is not dict or OrderedDict (t=%s)."%(str(obj), str(t)))
converted_dict = OrderedDict() # type:ignore
for field in obj:
if not isinstance(field, str):
raise TypeError("Object key %s is string (t=%s)."%(field, str(t)))
converted_dict[field] = from_json_obj(obj[field], t.__args__[1])
assert is_instance(converted_dict, t)
return converted_dict
raise AssertionError(_UNREACHABLE_ERROR_MSG) # pragma: no cover | 0.659295 | 0.189484 |
from __future__ import print_function
import roslib
roslib.load_manifest('mct_blob_finder')
import rospy
import threading
import sys
from cv_bridge.cv_bridge import CvBridge
from mct_blob_finder import BlobFinder
import mct_introspection
# Messages
from sensor_msgs.msg import Image
from mct_msg_and_srv.msg import SeqAndImage
from mct_msg_and_srv.msg import BlobData
from mct_msg_and_srv.msg import Blob
# Services
from mct_msg_and_srv.srv import BlobFinderSetParam
from mct_msg_and_srv.srv import BlobFinderSetParamResponse
from mct_msg_and_srv.srv import BlobFinderGetParam
from mct_msg_and_srv.srv import BlobFinderGetParamResponse
class BlobFinderNode(object):
def __init__(self,topic=None):
self.topic = topic
self.lock = threading.Lock()
self.bridge = CvBridge()
self.blobFinder = BlobFinder()
self.blobFinder.threshold = 150
self.blobFinder.filter_by_area = True
self.blobFinder.min_area = 0
self.blobFinder.max_area = 200
self.topic_type = mct_introspection.get_topic_type(topic)
rospy.init_node('blob_finder')
self.ready = False
if self.topic_type == 'sensor_msgs/Image':
self.image_sub = rospy.Subscriber(self.topic,Image,self.image_callback)
else:
self.image_sub = rospy.Subscriber(self.topic,SeqAndImage,self.seq_and_image_callback)
self.image_pub = rospy.Publisher('image_blobs', Image)
self.blob_data_pub = rospy.Publisher('blob_data', BlobData)
node_name = rospy.get_name()
self.set_param_srv = rospy.Service(
'{0}/set_param'.format(node_name),
BlobFinderSetParam,
self.handle_set_param_srv
)
self.get_param_srv = rospy.Service(
'{0}/get_param'.format(node_name),
BlobFinderGetParam,
self.handle_get_param_srv
)
self.ready = True
def handle_set_param_srv(self, req):
"""
Handles requests to set the blob finder's parameters. Currently this
is just the threshold used for binarizing the image.
"""
with self.lock:
self.blobFinder.threshold = req.threshold
self.blobFinder.filter_by_area = req.filter_by_area
self.blobFinder.min_area = req.min_area
self.blobFinder.max_area = req.max_area
return BlobFinderSetParamResponse(True,'')
def handle_get_param_srv(self,req):
"""
Handles requests for the blob finders parameters
"""
with self.lock:
threshold = self.blobFinder.threshold
filter_by_area = self.blobFinder.filter_by_area
min_area = self.blobFinder.min_area
max_area = self.blobFinder.max_area
resp_args = (threshold, filter_by_area, min_area, max_area)
return BlobFinderGetParamResponse(*resp_args)
def publish_blob_data(self,blobs_list, blobs_image, image_header, image_seq):
"""
Publish image of blobs and blob data.
"""
blobs_rosimage = self.bridge.cv_to_imgmsg(blobs_image,encoding="passthrough")
self.image_pub.publish(blobs_rosimage)
# Create the blob data message and publish
blob_data_msg = BlobData()
blob_data_msg.header = image_header
blob_data_msg.image_seq = image_seq
blob_data_msg.number_of_blobs = len(blobs_list)
for b in blobs_list:
blob_msg = Blob()
for k, v in b.iteritems():
setattr(blob_msg,k,v)
blob_data_msg.blob.append(blob_msg)
self.blob_data_pub.publish(blob_data_msg)
def image_callback(self,data):
"""
Callback for image topic subscription.
"""
if not self.ready:
return
with self.lock:
blobs_list, blobs_image = self.blobFinder.findBlobs(data)
self.publish_blob_data(blobs_list, blobs_image, data.header, data.header.seq)
def seq_and_image_callback(self,data):
"""
Callback for SeqAndImage topic subscription.
"""
if not self.ready:
return
with self.lock:
blobs_list, blobs_image = self.blobFinder.findBlobs(data.image)
self.publish_blob_data(blobs_list, blobs_image, data.image.header, data.seq)
def run(self):
rospy.spin()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
topic = sys.argv[1]
node = BlobFinderNode(topic)
node.run() | mct_blob_finder/nodes/blob_finder_node.py | from __future__ import print_function
import roslib
roslib.load_manifest('mct_blob_finder')
import rospy
import threading
import sys
from cv_bridge.cv_bridge import CvBridge
from mct_blob_finder import BlobFinder
import mct_introspection
# Messages
from sensor_msgs.msg import Image
from mct_msg_and_srv.msg import SeqAndImage
from mct_msg_and_srv.msg import BlobData
from mct_msg_and_srv.msg import Blob
# Services
from mct_msg_and_srv.srv import BlobFinderSetParam
from mct_msg_and_srv.srv import BlobFinderSetParamResponse
from mct_msg_and_srv.srv import BlobFinderGetParam
from mct_msg_and_srv.srv import BlobFinderGetParamResponse
class BlobFinderNode(object):
def __init__(self,topic=None):
self.topic = topic
self.lock = threading.Lock()
self.bridge = CvBridge()
self.blobFinder = BlobFinder()
self.blobFinder.threshold = 150
self.blobFinder.filter_by_area = True
self.blobFinder.min_area = 0
self.blobFinder.max_area = 200
self.topic_type = mct_introspection.get_topic_type(topic)
rospy.init_node('blob_finder')
self.ready = False
if self.topic_type == 'sensor_msgs/Image':
self.image_sub = rospy.Subscriber(self.topic,Image,self.image_callback)
else:
self.image_sub = rospy.Subscriber(self.topic,SeqAndImage,self.seq_and_image_callback)
self.image_pub = rospy.Publisher('image_blobs', Image)
self.blob_data_pub = rospy.Publisher('blob_data', BlobData)
node_name = rospy.get_name()
self.set_param_srv = rospy.Service(
'{0}/set_param'.format(node_name),
BlobFinderSetParam,
self.handle_set_param_srv
)
self.get_param_srv = rospy.Service(
'{0}/get_param'.format(node_name),
BlobFinderGetParam,
self.handle_get_param_srv
)
self.ready = True
def handle_set_param_srv(self, req):
"""
Handles requests to set the blob finder's parameters. Currently this
is just the threshold used for binarizing the image.
"""
with self.lock:
self.blobFinder.threshold = req.threshold
self.blobFinder.filter_by_area = req.filter_by_area
self.blobFinder.min_area = req.min_area
self.blobFinder.max_area = req.max_area
return BlobFinderSetParamResponse(True,'')
def handle_get_param_srv(self,req):
"""
Handles requests for the blob finders parameters
"""
with self.lock:
threshold = self.blobFinder.threshold
filter_by_area = self.blobFinder.filter_by_area
min_area = self.blobFinder.min_area
max_area = self.blobFinder.max_area
resp_args = (threshold, filter_by_area, min_area, max_area)
return BlobFinderGetParamResponse(*resp_args)
def publish_blob_data(self,blobs_list, blobs_image, image_header, image_seq):
"""
Publish image of blobs and blob data.
"""
blobs_rosimage = self.bridge.cv_to_imgmsg(blobs_image,encoding="passthrough")
self.image_pub.publish(blobs_rosimage)
# Create the blob data message and publish
blob_data_msg = BlobData()
blob_data_msg.header = image_header
blob_data_msg.image_seq = image_seq
blob_data_msg.number_of_blobs = len(blobs_list)
for b in blobs_list:
blob_msg = Blob()
for k, v in b.iteritems():
setattr(blob_msg,k,v)
blob_data_msg.blob.append(blob_msg)
self.blob_data_pub.publish(blob_data_msg)
def image_callback(self,data):
"""
Callback for image topic subscription.
"""
if not self.ready:
return
with self.lock:
blobs_list, blobs_image = self.blobFinder.findBlobs(data)
self.publish_blob_data(blobs_list, blobs_image, data.header, data.header.seq)
def seq_and_image_callback(self,data):
"""
Callback for SeqAndImage topic subscription.
"""
if not self.ready:
return
with self.lock:
blobs_list, blobs_image = self.blobFinder.findBlobs(data.image)
self.publish_blob_data(blobs_list, blobs_image, data.image.header, data.seq)
def run(self):
rospy.spin()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
topic = sys.argv[1]
node = BlobFinderNode(topic)
node.run() | 0.350199 | 0.180865 |
import re
from typing import Optional
from wikitextparser import WikiText, Section
from to_python.core.context import ParseFunctionSide, ContextData
from to_python.core.filter import FilterAbstract
from to_python.core.format import colorize_token_list
from to_python.core.signature import SignatureParser, SignatureTokenizer
from to_python.core.types import FunctionSignature
from to_python.filters.data_list.doc import FilterParseDocs
class WikiGetSyntaxSection:
"""
Picks a syntax section from wiki page
"""
# TODO: Think about transforming that utility class into a filter
def __init__(self, context: ContextData, f_name: str, raw_data: str,
wiki: WikiText):
self.context = context
self.f_name = f_name
self.raw_data = raw_data
self.wiki = wiki
self.section_index = 0
self.start_index = 0
self.section: Optional[Section] = None
def no_syntax_section(self):
"""
Process situation, when the Syntax section have not been found
"""
if self.context.side_data[self.f_name].side != \
ParseFunctionSide.SHARED:
print(
f'\u001b[33m[WARN] \u001b[0m'
f'No Syntax section "{self.f_name}"\u001b[0m'
)
def multiple_syntax_section(self, section_list):
"""
Process situation, when the Syntax section
have been found multiple time
"""
if len(section_list) != 1:
print(
f'\u001b[33m[WARN] \u001b[0m'
f'Multiple Syntax sections "{self.f_name}". \u001b[0m\n'
f'\u001b[33m[WARN] \u001b[0m'
f'Selecting the first:\u001b[0m'
)
for section, index in section_list:
print(f' \u001b[34m{index: 2}.\u001b[0m {section.title}')
self.section, self.section_index = section_list[0]
self.start_index = self.section.span[0]
def get(self, paragraph_title_part: str = 'syntax') -> Optional[Section]:
"""
Finds syntax section in wiki page (or part of wiki page)
:return:
"""
syntax = FilterParseDocs.get_sections_title_contains(
self.wiki,
paragraph_title_part
)
if not syntax:
self.no_syntax_section()
else:
self.multiple_syntax_section(syntax)
return self.section
def pick_text(self) -> str:
try:
next_section: Optional[WikiText] = self.wiki.sections[
self.section_index + 1]
except IndexError:
next_section = None
if next_section is None:
end_index = len(str(self.wiki))
else:
end_index = next_section.span[0]
return str(self.wiki)[self.start_index:end_index]
class FilterParseFunctionSignature(FilterAbstract):
"""
Parses function signature
"""
def __init__(self):
super().__init__('functions')
@staticmethod
def clean_code(code: str) -> str:
lines = code.split('\n')
for i, line in enumerate(lines):
line = re.sub(r'--.+$', '', line)
lines[i] = line.strip()
return ' '.join(lines)
def parse_signature(self, code: str) -> FunctionSignature:
"""
Parses given code
"""
code = self.clean_code(code)
tokenized = SignatureTokenizer(code).tokenize()
if self.context.verbose:
colors = colorize_token_list(tokenized)
print(f'[V] {code: <175}', f'[V] {colors: <175}\n', sep='\n')
return SignatureParser(
tokenized=tokenized
).parse()
def pick_signature_container(self, f_name: str, raw_data: str,
wiki: WikiText) -> str:
"""
Picks media wiki code, containing signature
"""
syntax_picker = WikiGetSyntaxSection(self.context_data, f_name,
raw_data, wiki)
syntax_picker.get()
code_inside = syntax_picker.pick_text()
if 'syntaxhighlight' not in code_inside:
raise RuntimeError(
f'[ERROR] Result media wiki code '
f'does not contain signature. "{f_name}"'
)
return code_inside
SELECT_CODE_REGEX = re.compile(
r'<syntaxhighlight[^>]*lua[^>]*>([\s\S]+?)</syntaxhighlight>')
def pick_signature(self, f_name: str, raw_data: str, wiki: WikiText) -> \
str:
"""
Picks out function signature code from an entire data
"""
container = self.pick_signature_container(f_name, raw_data, wiki)
signature = re.search(self.SELECT_CODE_REGEX, container).group(1)
return signature.strip()
def apply(self):
print('\n\n ============ Parse Functions ============')
for f_name in self.context_data.parsed:
raw_content = self.context_data.side_data[f_name]
wiki_content = self.context_data.wiki_side[f_name]
if raw_content.client is not None:
self.context_data.parsed[f_name].client[
0].signature = self.parse_signature(
self.pick_signature(f_name, raw_content.client,
wiki_content.client)
)
if raw_content.server is not None:
self.context_data.parsed[f_name].server[
0].signature = self.parse_signature(
self.pick_signature(f_name, raw_content.server,
wiki_content.server)
)
print('Function signature parsing complete\u001b[0m') | to_python/filters/data_list/signature.py | import re
from typing import Optional
from wikitextparser import WikiText, Section
from to_python.core.context import ParseFunctionSide, ContextData
from to_python.core.filter import FilterAbstract
from to_python.core.format import colorize_token_list
from to_python.core.signature import SignatureParser, SignatureTokenizer
from to_python.core.types import FunctionSignature
from to_python.filters.data_list.doc import FilterParseDocs
class WikiGetSyntaxSection:
"""
Picks a syntax section from wiki page
"""
# TODO: Think about transforming that utility class into a filter
def __init__(self, context: ContextData, f_name: str, raw_data: str,
wiki: WikiText):
self.context = context
self.f_name = f_name
self.raw_data = raw_data
self.wiki = wiki
self.section_index = 0
self.start_index = 0
self.section: Optional[Section] = None
def no_syntax_section(self):
"""
Process situation, when the Syntax section have not been found
"""
if self.context.side_data[self.f_name].side != \
ParseFunctionSide.SHARED:
print(
f'\u001b[33m[WARN] \u001b[0m'
f'No Syntax section "{self.f_name}"\u001b[0m'
)
def multiple_syntax_section(self, section_list):
"""
Process situation, when the Syntax section
have been found multiple time
"""
if len(section_list) != 1:
print(
f'\u001b[33m[WARN] \u001b[0m'
f'Multiple Syntax sections "{self.f_name}". \u001b[0m\n'
f'\u001b[33m[WARN] \u001b[0m'
f'Selecting the first:\u001b[0m'
)
for section, index in section_list:
print(f' \u001b[34m{index: 2}.\u001b[0m {section.title}')
self.section, self.section_index = section_list[0]
self.start_index = self.section.span[0]
def get(self, paragraph_title_part: str = 'syntax') -> Optional[Section]:
"""
Finds syntax section in wiki page (or part of wiki page)
:return:
"""
syntax = FilterParseDocs.get_sections_title_contains(
self.wiki,
paragraph_title_part
)
if not syntax:
self.no_syntax_section()
else:
self.multiple_syntax_section(syntax)
return self.section
def pick_text(self) -> str:
try:
next_section: Optional[WikiText] = self.wiki.sections[
self.section_index + 1]
except IndexError:
next_section = None
if next_section is None:
end_index = len(str(self.wiki))
else:
end_index = next_section.span[0]
return str(self.wiki)[self.start_index:end_index]
class FilterParseFunctionSignature(FilterAbstract):
"""
Parses function signature
"""
def __init__(self):
super().__init__('functions')
@staticmethod
def clean_code(code: str) -> str:
lines = code.split('\n')
for i, line in enumerate(lines):
line = re.sub(r'--.+$', '', line)
lines[i] = line.strip()
return ' '.join(lines)
def parse_signature(self, code: str) -> FunctionSignature:
"""
Parses given code
"""
code = self.clean_code(code)
tokenized = SignatureTokenizer(code).tokenize()
if self.context.verbose:
colors = colorize_token_list(tokenized)
print(f'[V] {code: <175}', f'[V] {colors: <175}\n', sep='\n')
return SignatureParser(
tokenized=tokenized
).parse()
def pick_signature_container(self, f_name: str, raw_data: str,
wiki: WikiText) -> str:
"""
Picks media wiki code, containing signature
"""
syntax_picker = WikiGetSyntaxSection(self.context_data, f_name,
raw_data, wiki)
syntax_picker.get()
code_inside = syntax_picker.pick_text()
if 'syntaxhighlight' not in code_inside:
raise RuntimeError(
f'[ERROR] Result media wiki code '
f'does not contain signature. "{f_name}"'
)
return code_inside
SELECT_CODE_REGEX = re.compile(
r'<syntaxhighlight[^>]*lua[^>]*>([\s\S]+?)</syntaxhighlight>')
def pick_signature(self, f_name: str, raw_data: str, wiki: WikiText) -> \
str:
"""
Picks out function signature code from an entire data
"""
container = self.pick_signature_container(f_name, raw_data, wiki)
signature = re.search(self.SELECT_CODE_REGEX, container).group(1)
return signature.strip()
def apply(self):
print('\n\n ============ Parse Functions ============')
for f_name in self.context_data.parsed:
raw_content = self.context_data.side_data[f_name]
wiki_content = self.context_data.wiki_side[f_name]
if raw_content.client is not None:
self.context_data.parsed[f_name].client[
0].signature = self.parse_signature(
self.pick_signature(f_name, raw_content.client,
wiki_content.client)
)
if raw_content.server is not None:
self.context_data.parsed[f_name].server[
0].signature = self.parse_signature(
self.pick_signature(f_name, raw_content.server,
wiki_content.server)
)
print('Function signature parsing complete\u001b[0m') | 0.567098 | 0.223504 |
import pyglet
import pyperclip
import keyword
import tokenize
import io
import os
from pyno.utils import x_y_pan_scale, font
from pyno.draw import quad_aligned
highlight = set(list(__builtins__.keys()) +
list(keyword.__dict__.keys()) +
keyword.kwlist +
['call', 'cleanup'])
x_shift = 40
y_shift = 20
layout_padding = 5
resize_button = 10
nline_width = 30
nline_padding = 2
help_offset = 5
class CodeEditor():
'''
Code editor is the window you define nodes function
'''
def __init__(self, node, highlighting=1):
self.node = node # node-owner of this codeEditor
self.document = pyglet.text.document.FormattedDocument(node.code)
self.highlighting = highlighting # 0: off, 1: python (node), 2: file (sub)
@self.document.event
def on_insert_text(start, end):
self.update_highlighting()
@self.document.event
def on_delete_text(start, end):
self.update_highlighting()
self.document.set_style(0, len(node.code),
dict(font_name=font,
font_size=11, color=(255, 255, 255, 230)))
self.layout = pyglet.text.layout.IncrementalTextLayout(
self.document,
*node.editor_size,
multiline=True, wrap_lines=False)
self.update_label = pyglet.text.Label('CTRL+ENTER to save and execute',
font_name=font,
anchor_y='top',
font_size=9)
self.line_numbering = pyglet.text.Label('',
font_name=font,
font_size=11,
color=(255, 255, 255, 127),
align='right',
anchor_y='top',
width=nline_width,
multiline=True)
self.autocomplete = pyglet.text.Label('',
font_name=font,
font_size=9,
color=(125, 255, 125, 127))
self.caret = pyglet.text.caret.Caret(self.layout)
self.caret.color = (255, 255, 255)
self.caret.visible = False
self.hover = False
self.hovered = True
self.resize = False
self.change = False
self.pan_scale = [[0.0, 0.0], 1]
self.screen_size = (800, 600)
def update_node(self):
# Push code to node
self.node.new_code(self.document.text)
self.node.need_update = True
self.change = False
def intersect_point(self, point):
# Intersection with whole codeEditor
l = self.layout
if 0 < point[0] - l.x + 20 < l.width + 20 and \
0 < point[1] - l.y < l.height + 10:
self.node.hover = True
return True
return False
def intersect_corner(self, point):
# Intersection with bottom right corner to resize
l = self.layout
return (0 < point[0] - (l.x + l.width - resize_button) < resize_button and
0 < point[1] - l.y < resize_button)
def render(self):
self.node.make_child_active()
l = self.layout
l.x = self.node.x + self.node.cw + x_shift
l.y = self.node.y - l.height + self.node.ch + y_shift
if self.change:
self.update_label.x = l.x
self.update_label.y = l.y - help_offset
self.update_label.draw()
if self.hover:
if self.document.text and not self.hovered:
self.hovered = True
self.update_highlighting()
color = self.node.color if not self.change else (255, 100, 10)
# codeEditor background
quad_aligned(l.x - layout_padding, l.y,
l.width + layout_padding, l.height,
((0, 0, 0) if not self.change
else (20, 10, 5)) + (230,))
if self.resize:
quad_aligned(l.x - layout_padding, l.y + l.height,
self.node.editor_size[0] + layout_padding,
-self.node.editor_size[1],
color + (100,))
# codeEditor resize corner
quad_aligned(l.x + l.width - resize_button, l.y,
resize_button, resize_button, color + (255,))
# codeEditor left line
quad_aligned(l.x - layout_padding - nline_width, l.y,
nline_width, l.height, color + (255,))
# codeEditor left line numbering
font_height = self.layout.content_height / self.layout.get_line_count()
line_offset = (-self.layout.view_y) % font_height
first_line = int(-self.layout.view_y / font_height)
count_line = min(int((self.layout.height + line_offset) / font_height), self.layout.get_line_count())
self.line_numbering.x = l.x - layout_padding - nline_width - nline_padding
self.line_numbering.y = l.y + l.height + line_offset
self.line_numbering.text = '\n'.join(['%02i'%i for i in range(first_line + 1, first_line + count_line + 1)])
self.line_numbering.draw()
# codeEditor autocomplete hint
self.autocomplete.x = l.x
self.autocomplete.y = l.y + l.height + help_offset
self.autocomplete.draw()
else:
if self.document.text and self.hovered:
self.hovered = False
self.document.set_style(0, len(self.node.code),
dict(color=(255, 255, 255, 50)))
self.layout.draw()
def update_highlighting(self):
if len(self.document.text) == 0:
return
# reset highlighting and hint
self.document.set_style(0, len(self.document.text),
dict(color=(255, 255, 255, 255)))
self.autocomplete.text = ""
if self.highlighting == 0: # 0: off
return
elif self.highlighting == 1: # 1: python
# rudimentary syntax highlighting and autocomplete hint
newline_offset = ([-1] +
[i for i, ch in enumerate(self.document.text) if ch == '\n'] +
[len(self.document.text)])
try:
obj_string = ""
for item in tokenize.tokenize(io.BytesIO(self.document.text.encode('utf-8')).readline):
start = newline_offset[item.start[0] - 1] + item.start[1] + 1
stopp = newline_offset[item.end[0] - 1] + item.end[1] + 1
# rudimentary autocomplete hint
if (item.type == tokenize.NAME) or (item.string == "."):
obj_string += item.string
else:
obj_string = ""
if (start <= self.caret.position <= stopp):
if not obj_string:
obj_string = item.string
try:
obj = eval(obj_string.strip(), self.node.env)
#print("Code hint:\n", obj.__doc__)
self.autocomplete.text = obj.__doc__.split("\n")[0]
except:
pass
# syntax highlighting
if (item.type == tokenize.NAME) and (item.string in highlight):
pass
elif (item.type in [tokenize.COMMENT, tokenize.OP, tokenize.NUMBER, tokenize.STRING]):
pass # (we could e.g. set another color here...)
else:
continue # do not highlight this token
self.document.set_style(start, stopp,
dict(color=(255, 200, 100, 255)))
except tokenize.TokenError:
pass
elif self.highlighting == 2: # 2: file
if os.path.exists(self.document.text):
self.document.set_style(0, len(self.node.code),
dict(color=(255, 200, 100, 255)))
# --- Input events ---
def on_mouse_press(self, x, y, button, modifiers):
x, y = x_y_pan_scale(x, y, self.pan_scale, self.screen_size)
if self.intersect_corner((x, y)):
self.resize = True
elif button == 1 and self.hover:
self.set_focus()
self.caret.on_mouse_press(x, y, button, modifiers)
self.update_highlighting()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
x, y = x_y_pan_scale(x, y, self.pan_scale, self.screen_size)
dx, dy = int(dx / self.pan_scale[1]), int(dy / self.pan_scale[1])
if buttons == 1 and self.resize:
width = max(self.node.editor_size[0] + dx, 300)
height = max(self.node.editor_size[1] - dy, 150)
self.node.editor_size = width, height
elif buttons == 1 and self.hover:
self.caret.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
if self.resize:
self.layout.width, self.layout.height = self.node.editor_size
self.resize = False
def on_text(self, text):
if self.hover:
self.change = True
self.caret.on_text(text)
def on_text_motion(self, motion):
if self.hover:
self.caret.on_text_motion(motion)
def on_text_motion_select(self, motion):
if self.hover:
self.caret.on_text_motion_select(motion)
def on_key_press(self, symbol, modifiers):
key = pyglet.window.key
if symbol == key.TAB:
self.change = True
self.document.insert_text(self.caret.position, ' ')
self.caret.position += 2
elif modifiers & key.MOD_CTRL and symbol == key.ENTER:
print('Reload code')
self.update_node()
elif modifiers & key.MOD_CTRL:
if symbol == key.C and self.caret.mark:
self.copy_text()
elif symbol == key.V:
start = min(self.caret.position, self.caret.mark or self.caret.position)
end = max(self.caret.position, self.caret.mark or self.caret.position)
text = pyperclip.paste()
self.document.delete_text(start, end)
self.document.insert_text(self.caret.position, text)
self.caret.position += len(text)
self.caret.mark = self.caret.position
elif symbol == key.X and self.caret.mark:
start, end = self.copy_text()
self.document.delete_text(start, end)
self.caret.mark = self.caret.position
elif symbol == key.BACKSPACE or symbol == key.DELETE:
self.change = True
def copy_text(self):
start = min(self.caret.position, self.caret.mark)
end = max(self.caret.position, self.caret.mark)
text = self.document.text[start:end]
pyperclip.copy(text)
return (start, end)
def set_focus(self):
self.caret.visible = True
self.caret.mark = 0
self.caret.position = len(self.document.text)
def __del__(self):
self.layout.delete()
self.update_label.delete()
self.caret.delete() | pyno/codeEditor.py | import pyglet
import pyperclip
import keyword
import tokenize
import io
import os
from pyno.utils import x_y_pan_scale, font
from pyno.draw import quad_aligned
highlight = set(list(__builtins__.keys()) +
list(keyword.__dict__.keys()) +
keyword.kwlist +
['call', 'cleanup'])
x_shift = 40
y_shift = 20
layout_padding = 5
resize_button = 10
nline_width = 30
nline_padding = 2
help_offset = 5
class CodeEditor():
'''
Code editor is the window you define nodes function
'''
def __init__(self, node, highlighting=1):
self.node = node # node-owner of this codeEditor
self.document = pyglet.text.document.FormattedDocument(node.code)
self.highlighting = highlighting # 0: off, 1: python (node), 2: file (sub)
@self.document.event
def on_insert_text(start, end):
self.update_highlighting()
@self.document.event
def on_delete_text(start, end):
self.update_highlighting()
self.document.set_style(0, len(node.code),
dict(font_name=font,
font_size=11, color=(255, 255, 255, 230)))
self.layout = pyglet.text.layout.IncrementalTextLayout(
self.document,
*node.editor_size,
multiline=True, wrap_lines=False)
self.update_label = pyglet.text.Label('CTRL+ENTER to save and execute',
font_name=font,
anchor_y='top',
font_size=9)
self.line_numbering = pyglet.text.Label('',
font_name=font,
font_size=11,
color=(255, 255, 255, 127),
align='right',
anchor_y='top',
width=nline_width,
multiline=True)
self.autocomplete = pyglet.text.Label('',
font_name=font,
font_size=9,
color=(125, 255, 125, 127))
self.caret = pyglet.text.caret.Caret(self.layout)
self.caret.color = (255, 255, 255)
self.caret.visible = False
self.hover = False
self.hovered = True
self.resize = False
self.change = False
self.pan_scale = [[0.0, 0.0], 1]
self.screen_size = (800, 600)
def update_node(self):
# Push code to node
self.node.new_code(self.document.text)
self.node.need_update = True
self.change = False
def intersect_point(self, point):
# Intersection with whole codeEditor
l = self.layout
if 0 < point[0] - l.x + 20 < l.width + 20 and \
0 < point[1] - l.y < l.height + 10:
self.node.hover = True
return True
return False
def intersect_corner(self, point):
# Intersection with bottom right corner to resize
l = self.layout
return (0 < point[0] - (l.x + l.width - resize_button) < resize_button and
0 < point[1] - l.y < resize_button)
def render(self):
self.node.make_child_active()
l = self.layout
l.x = self.node.x + self.node.cw + x_shift
l.y = self.node.y - l.height + self.node.ch + y_shift
if self.change:
self.update_label.x = l.x
self.update_label.y = l.y - help_offset
self.update_label.draw()
if self.hover:
if self.document.text and not self.hovered:
self.hovered = True
self.update_highlighting()
color = self.node.color if not self.change else (255, 100, 10)
# codeEditor background
quad_aligned(l.x - layout_padding, l.y,
l.width + layout_padding, l.height,
((0, 0, 0) if not self.change
else (20, 10, 5)) + (230,))
if self.resize:
quad_aligned(l.x - layout_padding, l.y + l.height,
self.node.editor_size[0] + layout_padding,
-self.node.editor_size[1],
color + (100,))
# codeEditor resize corner
quad_aligned(l.x + l.width - resize_button, l.y,
resize_button, resize_button, color + (255,))
# codeEditor left line
quad_aligned(l.x - layout_padding - nline_width, l.y,
nline_width, l.height, color + (255,))
# codeEditor left line numbering
font_height = self.layout.content_height / self.layout.get_line_count()
line_offset = (-self.layout.view_y) % font_height
first_line = int(-self.layout.view_y / font_height)
count_line = min(int((self.layout.height + line_offset) / font_height), self.layout.get_line_count())
self.line_numbering.x = l.x - layout_padding - nline_width - nline_padding
self.line_numbering.y = l.y + l.height + line_offset
self.line_numbering.text = '\n'.join(['%02i'%i for i in range(first_line + 1, first_line + count_line + 1)])
self.line_numbering.draw()
# codeEditor autocomplete hint
self.autocomplete.x = l.x
self.autocomplete.y = l.y + l.height + help_offset
self.autocomplete.draw()
else:
if self.document.text and self.hovered:
self.hovered = False
self.document.set_style(0, len(self.node.code),
dict(color=(255, 255, 255, 50)))
self.layout.draw()
def update_highlighting(self):
if len(self.document.text) == 0:
return
# reset highlighting and hint
self.document.set_style(0, len(self.document.text),
dict(color=(255, 255, 255, 255)))
self.autocomplete.text = ""
if self.highlighting == 0: # 0: off
return
elif self.highlighting == 1: # 1: python
# rudimentary syntax highlighting and autocomplete hint
newline_offset = ([-1] +
[i for i, ch in enumerate(self.document.text) if ch == '\n'] +
[len(self.document.text)])
try:
obj_string = ""
for item in tokenize.tokenize(io.BytesIO(self.document.text.encode('utf-8')).readline):
start = newline_offset[item.start[0] - 1] + item.start[1] + 1
stopp = newline_offset[item.end[0] - 1] + item.end[1] + 1
# rudimentary autocomplete hint
if (item.type == tokenize.NAME) or (item.string == "."):
obj_string += item.string
else:
obj_string = ""
if (start <= self.caret.position <= stopp):
if not obj_string:
obj_string = item.string
try:
obj = eval(obj_string.strip(), self.node.env)
#print("Code hint:\n", obj.__doc__)
self.autocomplete.text = obj.__doc__.split("\n")[0]
except:
pass
# syntax highlighting
if (item.type == tokenize.NAME) and (item.string in highlight):
pass
elif (item.type in [tokenize.COMMENT, tokenize.OP, tokenize.NUMBER, tokenize.STRING]):
pass # (we could e.g. set another color here...)
else:
continue # do not highlight this token
self.document.set_style(start, stopp,
dict(color=(255, 200, 100, 255)))
except tokenize.TokenError:
pass
elif self.highlighting == 2: # 2: file
if os.path.exists(self.document.text):
self.document.set_style(0, len(self.node.code),
dict(color=(255, 200, 100, 255)))
# --- Input events ---
def on_mouse_press(self, x, y, button, modifiers):
x, y = x_y_pan_scale(x, y, self.pan_scale, self.screen_size)
if self.intersect_corner((x, y)):
self.resize = True
elif button == 1 and self.hover:
self.set_focus()
self.caret.on_mouse_press(x, y, button, modifiers)
self.update_highlighting()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
x, y = x_y_pan_scale(x, y, self.pan_scale, self.screen_size)
dx, dy = int(dx / self.pan_scale[1]), int(dy / self.pan_scale[1])
if buttons == 1 and self.resize:
width = max(self.node.editor_size[0] + dx, 300)
height = max(self.node.editor_size[1] - dy, 150)
self.node.editor_size = width, height
elif buttons == 1 and self.hover:
self.caret.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
if self.resize:
self.layout.width, self.layout.height = self.node.editor_size
self.resize = False
def on_text(self, text):
if self.hover:
self.change = True
self.caret.on_text(text)
def on_text_motion(self, motion):
if self.hover:
self.caret.on_text_motion(motion)
def on_text_motion_select(self, motion):
if self.hover:
self.caret.on_text_motion_select(motion)
def on_key_press(self, symbol, modifiers):
key = pyglet.window.key
if symbol == key.TAB:
self.change = True
self.document.insert_text(self.caret.position, ' ')
self.caret.position += 2
elif modifiers & key.MOD_CTRL and symbol == key.ENTER:
print('Reload code')
self.update_node()
elif modifiers & key.MOD_CTRL:
if symbol == key.C and self.caret.mark:
self.copy_text()
elif symbol == key.V:
start = min(self.caret.position, self.caret.mark or self.caret.position)
end = max(self.caret.position, self.caret.mark or self.caret.position)
text = pyperclip.paste()
self.document.delete_text(start, end)
self.document.insert_text(self.caret.position, text)
self.caret.position += len(text)
self.caret.mark = self.caret.position
elif symbol == key.X and self.caret.mark:
start, end = self.copy_text()
self.document.delete_text(start, end)
self.caret.mark = self.caret.position
elif symbol == key.BACKSPACE or symbol == key.DELETE:
self.change = True
def copy_text(self):
start = min(self.caret.position, self.caret.mark)
end = max(self.caret.position, self.caret.mark)
text = self.document.text[start:end]
pyperclip.copy(text)
return (start, end)
def set_focus(self):
self.caret.visible = True
self.caret.mark = 0
self.caret.position = len(self.document.text)
def __del__(self):
self.layout.delete()
self.update_label.delete()
self.caret.delete() | 0.448668 | 0.128416 |
class BoolNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class NumberNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class StringNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class ListNode:
def __init__(self,elementNodes, position):
self.elementNodes = elementNodes
self.position = position
class BinaryOperationNode:
def __init__(self,leftNode,operationToken,rightNode):
self.leftNode = leftNode
self.operationToken = operationToken
self.rightNode = rightNode
self.position = leftNode.position
def __repr__(self):
return f'({self.leftNode},{self.operationToken},{self.rightNode})'
class UnaryOperationNode:
def __init__(self,operationToken,rightNode):
self.operationToken = operationToken
self.rightNode = rightNode
self.position = operationToken.position
def __repr__(self):
return f'({self.operationToken},{self.rightNode})'
class VarAccessNode:
def __init__(self,varToken):
self.varToken = varToken
self.position = varToken.position
class VarAssignNode:
def __init__(self,varToken,expressionNode):
self.varToken = varToken
self.expressionNode = expressionNode
self.position = varToken.position
class IfNode:
def __init__(self,baseCase,elseCase):
self.baseCase = baseCase
self.elseCase = elseCase
self.position = baseCase[0].position
class ForNode:
def __init__(self,varName,startValueNode,endValueNode,stepValueNode ,bodyNode):
self.varName = varName
self.startValueNode = startValueNode
self.endValueNode = endValueNode
self.stepValueNode = stepValueNode
self.bodyNode = bodyNode
self.position = varName.position
class WhileNode:
def __init__(self,conditionNode,bodyNode):
self.conditionNode = conditionNode
self.bodyNode = bodyNode
self.position = conditionNode.position
class PrintNode:
def __init__(self,bodyNode):
self.bodyNode = bodyNode
self.position = bodyNode.position
class SumNode:
def __init__(self,listNode):
self.listNode = listNode
self.position = listNode.position
class StringifyNode:
def __init__(self,bodyNode):
self.bodyNode = bodyNode
self.position = bodyNode.position | server/zeta_basic/PARSER/AbstractSyntaxTreeNodes.py | class BoolNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class NumberNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class StringNode:
def __init__(self,token):
self.token = token
self.position = token.position
def __repr__(self):
return f'{self.token}'
class ListNode:
def __init__(self,elementNodes, position):
self.elementNodes = elementNodes
self.position = position
class BinaryOperationNode:
def __init__(self,leftNode,operationToken,rightNode):
self.leftNode = leftNode
self.operationToken = operationToken
self.rightNode = rightNode
self.position = leftNode.position
def __repr__(self):
return f'({self.leftNode},{self.operationToken},{self.rightNode})'
class UnaryOperationNode:
def __init__(self,operationToken,rightNode):
self.operationToken = operationToken
self.rightNode = rightNode
self.position = operationToken.position
def __repr__(self):
return f'({self.operationToken},{self.rightNode})'
class VarAccessNode:
def __init__(self,varToken):
self.varToken = varToken
self.position = varToken.position
class VarAssignNode:
def __init__(self,varToken,expressionNode):
self.varToken = varToken
self.expressionNode = expressionNode
self.position = varToken.position
class IfNode:
def __init__(self,baseCase,elseCase):
self.baseCase = baseCase
self.elseCase = elseCase
self.position = baseCase[0].position
class ForNode:
def __init__(self,varName,startValueNode,endValueNode,stepValueNode ,bodyNode):
self.varName = varName
self.startValueNode = startValueNode
self.endValueNode = endValueNode
self.stepValueNode = stepValueNode
self.bodyNode = bodyNode
self.position = varName.position
class WhileNode:
def __init__(self,conditionNode,bodyNode):
self.conditionNode = conditionNode
self.bodyNode = bodyNode
self.position = conditionNode.position
class PrintNode:
def __init__(self,bodyNode):
self.bodyNode = bodyNode
self.position = bodyNode.position
class SumNode:
def __init__(self,listNode):
self.listNode = listNode
self.position = listNode.position
class StringifyNode:
def __init__(self,bodyNode):
self.bodyNode = bodyNode
self.position = bodyNode.position | 0.8415 | 0.244916 |
import socket , re , datetime , threading , time
#IRC Information
SERVER = 'irc.freenode.net'
PORT = 6667
NICKNAME = 'write nickname here'
CHANNEL = '#write channel here'
network = SERVER.split('.')
#Terminal Colours
Black = '\x1b[30m'
Red = '\x1b[31m'
Green = '\x1b[32m'
Yellow = '\x1b[33m'
Blue = '\x1b[34m'
Purple = '\x1b[35m'
Cyan = '\x1b[36m'
White = '\x1b[37m'
Cancel = '\x1b[0m'
#Logo
print(Green + '''
╦╦═╗╔═╗ ┌─┐┬ ┬┌─┐┌┐┌┌┬┐
║╠╦╝║ │ │ │├┤ │││ │
╩╩╚═╚═╝ └─┘┴─┘┴└─┘┘└┘ ┴
''' + Cancel)
#----------------------------------------------------------------------------------------------------
#Functions:
def send_data(command):
''' To send data to IRC '''
IRC.send((command + '\n').encode())
def send_txt():
''' To send text to IRC '''
text = input()
IRC.send(('PRIVMSG ' + CHANNEL + ' :' + text + '\r\n').encode())
#print(Cyan + datetime.datetime.now().strftime('%H:%M') + Purple + '\t' + text + Cancel)
def Connection():
''' Maintian Connection and listen for incoming text'''
while True:
buffer = IRC.recv(1024)
msg = buffer.decode().split()
#print(msg)
if msg[1] == 'NOTICE':
print('\x1b[36m' + 'Calling' + msg[0] + '\x1b[0m')
server_a = msg[0].split(':')
server_b = server_a[1]
if msg[1] == '433':
print(Red + '[-] Nickname already in use, try a different one' + Cancel)
break
if msg[1] == 'JOIN':
print(Yellow + '---------------------------')
print('\tCONNECTED')
print('Network:\t' , network[1] , '\nServer:\t\t' , server_b , '\nChannel:\t' , msg[2])
print('---------------------------' + Cancel)
if msg[0] == 'PING': #When server pings answer with pong to maintain connection
server1 = msg[1].split(':')
server2 = server1[1]
send_data('PONG %s' % server2)
#print('Received' , msg[0] , 'from' , msg[1] , 'Sent back:' , 'PONG')
#Receive Text
if msg[1] == 'PRIVMSG':
if any(['ThisISaBoT' in m for m in msg]):
text = ' '.join(msg[3 : ])
text = text.strip(':')
nick1 = msg[0].split('!')
nick2 = nick1[0].split(':')
print(Cyan + datetime.datetime.now().strftime('%H:%M') + '\t' + Red + nick2[1] , '\t' , text + Cancel)
else:
text = ' '.join(msg[3 : ])
text = text.strip(':')
nick1 = msg[0].split('!')
nick2 = nick1[0].split(':')
print(Cyan + datetime.datetime.now().strftime('%H:%M') + Cancel + '\t' + nick2[1] , '\t' , text)
#----------------------------------------------------------------------------------------------------
#Connect To IRC
IRC = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
IRC.connect((SERVER , PORT))
send_data('USER user host server name')
send_data('NICK ' + NICKNAME)
send_data('JOIN ' + CHANNEL)
#MainTain Connection and Receive Text
t1 = threading.Thread(target = Connection)
t1.daemon = True
t1.start()
#Send User Text
while True:
send_txt() | IRC.py |
import socket , re , datetime , threading , time
#IRC Information
SERVER = 'irc.freenode.net'
PORT = 6667
NICKNAME = 'write nickname here'
CHANNEL = '#write channel here'
network = SERVER.split('.')
#Terminal Colours
Black = '\x1b[30m'
Red = '\x1b[31m'
Green = '\x1b[32m'
Yellow = '\x1b[33m'
Blue = '\x1b[34m'
Purple = '\x1b[35m'
Cyan = '\x1b[36m'
White = '\x1b[37m'
Cancel = '\x1b[0m'
#Logo
print(Green + '''
╦╦═╗╔═╗ ┌─┐┬ ┬┌─┐┌┐┌┌┬┐
║╠╦╝║ │ │ │├┤ │││ │
╩╩╚═╚═╝ └─┘┴─┘┴└─┘┘└┘ ┴
''' + Cancel)
#----------------------------------------------------------------------------------------------------
#Functions:
def send_data(command):
''' To send data to IRC '''
IRC.send((command + '\n').encode())
def send_txt():
''' To send text to IRC '''
text = input()
IRC.send(('PRIVMSG ' + CHANNEL + ' :' + text + '\r\n').encode())
#print(Cyan + datetime.datetime.now().strftime('%H:%M') + Purple + '\t' + text + Cancel)
def Connection():
''' Maintian Connection and listen for incoming text'''
while True:
buffer = IRC.recv(1024)
msg = buffer.decode().split()
#print(msg)
if msg[1] == 'NOTICE':
print('\x1b[36m' + 'Calling' + msg[0] + '\x1b[0m')
server_a = msg[0].split(':')
server_b = server_a[1]
if msg[1] == '433':
print(Red + '[-] Nickname already in use, try a different one' + Cancel)
break
if msg[1] == 'JOIN':
print(Yellow + '---------------------------')
print('\tCONNECTED')
print('Network:\t' , network[1] , '\nServer:\t\t' , server_b , '\nChannel:\t' , msg[2])
print('---------------------------' + Cancel)
if msg[0] == 'PING': #When server pings answer with pong to maintain connection
server1 = msg[1].split(':')
server2 = server1[1]
send_data('PONG %s' % server2)
#print('Received' , msg[0] , 'from' , msg[1] , 'Sent back:' , 'PONG')
#Receive Text
if msg[1] == 'PRIVMSG':
if any(['ThisISaBoT' in m for m in msg]):
text = ' '.join(msg[3 : ])
text = text.strip(':')
nick1 = msg[0].split('!')
nick2 = nick1[0].split(':')
print(Cyan + datetime.datetime.now().strftime('%H:%M') + '\t' + Red + nick2[1] , '\t' , text + Cancel)
else:
text = ' '.join(msg[3 : ])
text = text.strip(':')
nick1 = msg[0].split('!')
nick2 = nick1[0].split(':')
print(Cyan + datetime.datetime.now().strftime('%H:%M') + Cancel + '\t' + nick2[1] , '\t' , text)
#----------------------------------------------------------------------------------------------------
#Connect To IRC
IRC = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
IRC.connect((SERVER , PORT))
send_data('USER user host server name')
send_data('NICK ' + NICKNAME)
send_data('JOIN ' + CHANNEL)
#MainTain Connection and Receive Text
t1 = threading.Thread(target = Connection)
t1.daemon = True
t1.start()
#Send User Text
while True:
send_txt() | 0.042196 | 0.078184 |
import os
import sys
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist^(1/2)(float): color distance between red, green, and blue pixel values
"""
# red_dist_sqr = (pixel.red-red)*(pixel.red-red)
# green_dist_sqr = (pixel.green-green)*(pixel.green-green)
# blue_dist_sqr = (pixel.blue-blue)*(pixel.blue-blue)
# dist_sqr = red_dist_sqr + green_dist_sqr + blue_dist_sqr
return ((pixel.red-red)*(pixel.red-red)+(pixel.green-green)*(pixel.green-green)+(pixel.blue-blue)*(pixel.blue-blue))**0.5
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
returning in the order: [red, green, blue]
"""
red_sum = 0
green_sum = 0
blue_sum = 0
for i in range(len(pixels)):
red_sum += pixels[i].red
green_sum += pixels[i].green
blue_sum += pixels[i].blue
rgb = [red_sum//len(pixels), green_sum//len(pixels), blue_sum//len(pixels)]
return rgb
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
(Pixel): pixel closest to RGB averages
"""
index = 0
min_dist = get_pixel_dist(pixels[0], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2])
for i in range(1, len(pixels)):
if min_dist >= get_pixel_dist(pixels[i], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2]):
min_dist = get_pixel_dist(pixels[i], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2])
index = i
# identify the pixel which has the minimun color distance to the average color
return pixels[index]
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
pixels = []
# list to contain all the pixels in the image
for i in range(width):
for j in range(height):
for k in range(len(images)):
# images[k].make_as_big_as(images[0])
# To make sure all the list of images have same size thus they can be compared with same pixels
pixels.append(images[k].get_pixel(i, j))
# Get the pixels of all images at (i, j)
best_pix = get_best_pixel(pixels)
result_pix = result.get_pixel(i, j)
result_pix.red = best_pix.red
result_pix.green = best_pix.green
result_pix.blue = best_pix.blue
pixels = []
# Re-assign to void list
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main() | sc_projects/SC101/stanCodoshop/stanCodoshop.py | import os
import sys
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist^(1/2)(float): color distance between red, green, and blue pixel values
"""
# red_dist_sqr = (pixel.red-red)*(pixel.red-red)
# green_dist_sqr = (pixel.green-green)*(pixel.green-green)
# blue_dist_sqr = (pixel.blue-blue)*(pixel.blue-blue)
# dist_sqr = red_dist_sqr + green_dist_sqr + blue_dist_sqr
return ((pixel.red-red)*(pixel.red-red)+(pixel.green-green)*(pixel.green-green)+(pixel.blue-blue)*(pixel.blue-blue))**0.5
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
returning in the order: [red, green, blue]
"""
red_sum = 0
green_sum = 0
blue_sum = 0
for i in range(len(pixels)):
red_sum += pixels[i].red
green_sum += pixels[i].green
blue_sum += pixels[i].blue
rgb = [red_sum//len(pixels), green_sum//len(pixels), blue_sum//len(pixels)]
return rgb
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
(Pixel): pixel closest to RGB averages
"""
index = 0
min_dist = get_pixel_dist(pixels[0], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2])
for i in range(1, len(pixels)):
if min_dist >= get_pixel_dist(pixels[i], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2]):
min_dist = get_pixel_dist(pixels[i], get_average(pixels)[0], get_average(pixels)[1], get_average(pixels)[2])
index = i
# identify the pixel which has the minimun color distance to the average color
return pixels[index]
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
pixels = []
# list to contain all the pixels in the image
for i in range(width):
for j in range(height):
for k in range(len(images)):
# images[k].make_as_big_as(images[0])
# To make sure all the list of images have same size thus they can be compared with same pixels
pixels.append(images[k].get_pixel(i, j))
# Get the pixels of all images at (i, j)
best_pix = get_best_pixel(pixels)
result_pix = result.get_pixel(i, j)
result_pix.red = best_pix.red
result_pix.green = best_pix.green
result_pix.blue = best_pix.blue
pixels = []
# Re-assign to void list
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main() | 0.717309 | 0.690957 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.tools import split_title_year
log = logging.getLogger('est_series_tvmaze')
class EstimatesSeriesTVMaze(object):
@plugin.priority(2)
def estimate(self, entry):
if not all(field in entry for field in ['series_name', 'series_season']):
return
series_name = entry['series_name']
season = entry['series_season']
episode_number = entry.get('series_episode')
title, year_match = split_title_year(series_name)
# This value should be added to input plugins to trigger a season lookuo
season_pack = entry.get('season_pack_lookup')
kwargs = {}
kwargs['tvmaze_id'] = entry.get('tvmaze_id')
kwargs['tvdb_id'] = entry.get('tvdb_id') or entry.get('trakt_series_tvdb_id')
kwargs['tvrage_id'] = entry.get('tvrage_id') or entry.get('trakt_series_tvrage_id')
kwargs['imdb_id'] = entry.get('imdb_id')
kwargs['show_name'] = title
kwargs['show_year'] = entry.get('trakt_series_year') or entry.get('year') or entry.get(
'imdb_year') or year_match
kwargs['show_network'] = entry.get('network') or entry.get('trakt_series_network')
kwargs['show_country'] = entry.get('country') or entry.get('trakt_series_country')
kwargs['show_language'] = entry.get('language')
kwargs['series_season'] = season
kwargs['series_episode'] = episode_number
kwargs['series_name'] = series_name
api_tvmaze = plugin.get_plugin_by_name('api_tvmaze').instance
if season_pack:
lookup = api_tvmaze.season_lookup
log.debug('Searching api_tvmaze for season')
else:
log.debug('Searching api_tvmaze for episode')
lookup = api_tvmaze.episode_lookup
for k, v in list(kwargs.items()):
if v:
log.debug('%s: %s', k, v)
try:
entity = lookup(**kwargs)
except LookupError as e:
log.debug(str(e))
return
if entity and entity.airdate:
log.debug('received air-date: %s', entity.airdate)
return entity.airdate
return
@event('plugin.register')
def register_plugin():
plugin.register(EstimatesSeriesTVMaze, 'est_series_tvmaze', interfaces=['estimate_release'], api_ver=2) | flexget/plugins/estimators/est_release_series_tvmaze.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.tools import split_title_year
log = logging.getLogger('est_series_tvmaze')
class EstimatesSeriesTVMaze(object):
@plugin.priority(2)
def estimate(self, entry):
if not all(field in entry for field in ['series_name', 'series_season']):
return
series_name = entry['series_name']
season = entry['series_season']
episode_number = entry.get('series_episode')
title, year_match = split_title_year(series_name)
# This value should be added to input plugins to trigger a season lookuo
season_pack = entry.get('season_pack_lookup')
kwargs = {}
kwargs['tvmaze_id'] = entry.get('tvmaze_id')
kwargs['tvdb_id'] = entry.get('tvdb_id') or entry.get('trakt_series_tvdb_id')
kwargs['tvrage_id'] = entry.get('tvrage_id') or entry.get('trakt_series_tvrage_id')
kwargs['imdb_id'] = entry.get('imdb_id')
kwargs['show_name'] = title
kwargs['show_year'] = entry.get('trakt_series_year') or entry.get('year') or entry.get(
'imdb_year') or year_match
kwargs['show_network'] = entry.get('network') or entry.get('trakt_series_network')
kwargs['show_country'] = entry.get('country') or entry.get('trakt_series_country')
kwargs['show_language'] = entry.get('language')
kwargs['series_season'] = season
kwargs['series_episode'] = episode_number
kwargs['series_name'] = series_name
api_tvmaze = plugin.get_plugin_by_name('api_tvmaze').instance
if season_pack:
lookup = api_tvmaze.season_lookup
log.debug('Searching api_tvmaze for season')
else:
log.debug('Searching api_tvmaze for episode')
lookup = api_tvmaze.episode_lookup
for k, v in list(kwargs.items()):
if v:
log.debug('%s: %s', k, v)
try:
entity = lookup(**kwargs)
except LookupError as e:
log.debug(str(e))
return
if entity and entity.airdate:
log.debug('received air-date: %s', entity.airdate)
return entity.airdate
return
@event('plugin.register')
def register_plugin():
plugin.register(EstimatesSeriesTVMaze, 'est_series_tvmaze', interfaces=['estimate_release'], api_ver=2) | 0.577019 | 0.059374 |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Income", "instances": 51, "metric_value": 0.9975, "depth": 1}
if obj[11]<=7:
# {"feature": "Coffeehouse", "instances": 45, "metric_value": 0.971, "depth": 2}
if obj[13]>1.0:
# {"feature": "Occupation", "instances": 26, "metric_value": 0.9829, "depth": 3}
if obj[10]<=10:
# {"feature": "Bar", "instances": 21, "metric_value": 0.9984, "depth": 4}
if obj[12]<=1.0:
# {"feature": "Education", "instances": 14, "metric_value": 0.9403, "depth": 5}
if obj[9]>0:
# {"feature": "Passanger", "instances": 9, "metric_value": 0.9911, "depth": 6}
if obj[0]<=1:
# {"feature": "Coupon", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[3]>1:
return 'False'
elif obj[3]<=1:
return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Time", "instances": 4, "metric_value": 0.8113, "depth": 7}
if obj[2]>2:
return 'True'
elif obj[2]<=2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[9]<=0:
return 'True'
else: return 'True'
elif obj[12]>1.0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.5917, "depth": 5}
if obj[14]>0.0:
return 'False'
elif obj[14]<=0.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[10]>10:
return 'True'
else: return 'True'
elif obj[13]<=1.0:
# {"feature": "Age", "instances": 19, "metric_value": 0.6292, "depth": 3}
if obj[6]>0:
# {"feature": "Children", "instances": 18, "metric_value": 0.5033, "depth": 4}
if obj[8]<=0:
return 'False'
elif obj[8]>0:
# {"feature": "Time", "instances": 4, "metric_value": 1.0, "depth": 5}
if obj[2]<=0:
return 'True'
elif obj[2]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[11]>7:
return 'True'
else: return 'True' | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_20/rule_6.py | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Income", "instances": 51, "metric_value": 0.9975, "depth": 1}
if obj[11]<=7:
# {"feature": "Coffeehouse", "instances": 45, "metric_value": 0.971, "depth": 2}
if obj[13]>1.0:
# {"feature": "Occupation", "instances": 26, "metric_value": 0.9829, "depth": 3}
if obj[10]<=10:
# {"feature": "Bar", "instances": 21, "metric_value": 0.9984, "depth": 4}
if obj[12]<=1.0:
# {"feature": "Education", "instances": 14, "metric_value": 0.9403, "depth": 5}
if obj[9]>0:
# {"feature": "Passanger", "instances": 9, "metric_value": 0.9911, "depth": 6}
if obj[0]<=1:
# {"feature": "Coupon", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[3]>1:
return 'False'
elif obj[3]<=1:
return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Time", "instances": 4, "metric_value": 0.8113, "depth": 7}
if obj[2]>2:
return 'True'
elif obj[2]<=2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[9]<=0:
return 'True'
else: return 'True'
elif obj[12]>1.0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.5917, "depth": 5}
if obj[14]>0.0:
return 'False'
elif obj[14]<=0.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[10]>10:
return 'True'
else: return 'True'
elif obj[13]<=1.0:
# {"feature": "Age", "instances": 19, "metric_value": 0.6292, "depth": 3}
if obj[6]>0:
# {"feature": "Children", "instances": 18, "metric_value": 0.5033, "depth": 4}
if obj[8]<=0:
return 'False'
elif obj[8]>0:
# {"feature": "Time", "instances": 4, "metric_value": 1.0, "depth": 5}
if obj[2]<=0:
return 'True'
elif obj[2]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[11]>7:
return 'True'
else: return 'True' | 0.067443 | 0.267947 |
import numpy as np
from os import listdir
from matplotlib import pyplot as plt
def apply_spectrum(data, pca, numinput=256, power=1.0):
colored = data.dot(np.diag(np.power(pca.sValues[:numinput], power)))
return colored/colored.std()
def get_params_and_errors(net, toy, nunits=256, folder='.',
filestart='toy', ds=1.0):
filelist = listdir(folder)
goodfiles = []
firing_rates = []
gains = []
errors = []
modfit = []
peaks = []
peakmodfits = []
exceptions = []
for file in filelist:
dsflag = False
if 'ds' in file:
dsflag = file.split('ds')[1].startswith(str(ds))
if file.endswith('.pickle') and file.startswith(filestart) and dsflag:
file = folder+file
try:
net.load(file)
except BaseException as ee:
exceptions.append(file)
continue
try:
fit = np.load(file + 'fit.npy')
except FileNotFoundError:
fit = net.modfits
ok = net.nunits == nunits
directtest = toy.test_fit(net.Q)
ok = ok and not (directtest - fit[-1]) > 0.01 and fit[-1] != 0
if ok:
modfit.append(fit[-1])
err = np.mean(net.errorhist[-1000:])
goodfiles.append(file)
errors.append(err)
firing_rates.append(net.p)
gains.append(net.gain)
peaks.append(np.min(net.errorhist))
peakmodfits.append(np.max(fit))
else:
exceptions.append(file)
print('Errors on ', str(len(exceptions)), ' files')
if len(goodfiles) == 0:
if len(exceptions) == 0:
raise FileNotFoundError('No valid files found.')
raise BaseException(exceptions[0])
return (goodfiles, firing_rates, gains, errors, peaks, modfit, peakmodfits)
def hp_scatter(firing_rates, gains, modfits, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
modfits = [0 if np.isnan(mf) else mf for mf in modfits]
sc = ax.scatter(firing_rates, gains, c=modfits, cmap='viridis', s=200)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, np.max(firing_rates)*1.2])
ax.set_ylim([np.min(gains)*0.8, np.max(gains)*1.2])
fig.colorbar(sc, ax=ax)
def err_hp_scatter(firing_rates, gains, errors, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
goodfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
goodg = [gains[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
badfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
badg = [gains[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
errors = [er for er in errors if er < 1.0]
sc = ax.scatter(goodfr, goodg, c=errors, cmap='viridis_r', s=200)
fig.colorbar(sc, ax=ax)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.scatter(badfr, badg, c='r', s=50, marker='x')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, np.max(firing_rates)*1.2])
ax.set_ylim([np.min(gains)*0.8, np.max(gains)*1.2])
def Q_and_svals(Q, pca, ds=1.0, ax=None, errorbars=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
means = np.abs(Q).mean(0)
scale = np.max(means)
means /= scale
if errorbars:
stds = np.abs(Q).std(0)/scale
qline, = ax.errorbar(np.arange(Q.shape[0]), means, yerr=stds, fmt='b.')
else:
qline, = ax.plot(np.arange(Q.shape[1]), means, 'b.')
svals = np.power(pca.sValues[:Q.shape[1]], ds)
svals /= np.max(svals)
sline, = ax.plot(svals, 'g')
# ax.set_title('SAILnet PC usage follows singular values')
ax.set_xlabel('Singular value rank')
ax.set_ylabel('Normalized value')
ax.legend([qline, sline], ['Mean ff weight magnitude', 'Singular value'])
def alt_Q_and_svals(Q, pca, ds=1.0, ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
stds = np.abs(Q).std(0)
stds /= np.max(stds)
ax.plot(stds, 'b.')
svals = np.power(pca.sValues[:256], ds)
svals /= np.max(svals)
ax.plot(svals, 'g')
def desphere_results(net, toy, pca_obj, desphere=1.0, folder='Pickles/4oc/'):
(goodfiles, firing_rates, gains, errors, peaks, modfit,
peakmodfits) = get_params_and_errors(net, toy, folder=folder, ds=desphere)
fig = plt.figure()
fitscat = fig.add_subplot(221)
hp_scatter(firing_rates, gains, modfit, fig=fig, ax=fitscat)
fitscat.set_title('Model fits')
errscat = fig.add_subplot(223)
err_hp_scatter(firing_rates, gains, errors, fig=fig, ax=errscat)
errscat.set_title('Mean squared error')
ind = np.nanargmax(modfit)
print('Parameters of the best-fitting model: ')
print('p = ', firing_rates[ind])
print('gain = ', gains[ind])
print('mse = ', errors[ind])
print('model fit = ', modfit[ind])
winner = goodfiles[ind]
msewinner = goodfiles[np.nanargmin(errors)]
try:
fittrace = net.modfits
except:
fittrace = np.load(winner+'fit.npy')
fitax = fig.add_subplot(222)
fitax.plot(fittrace)
fitax.set_xlabel('Training batches')
fitax.set_ylabel('Model fit')
# fitax.set_title('Time course of best model recovery')
net.load(winner)
Q_and_svals(net.Q, pca_obj, ds=desphere, ax=fig.add_subplot(224))
fig.tight_layout()
return winner, msewinner, fig | whitening/utils.py | import numpy as np
from os import listdir
from matplotlib import pyplot as plt
def apply_spectrum(data, pca, numinput=256, power=1.0):
colored = data.dot(np.diag(np.power(pca.sValues[:numinput], power)))
return colored/colored.std()
def get_params_and_errors(net, toy, nunits=256, folder='.',
filestart='toy', ds=1.0):
filelist = listdir(folder)
goodfiles = []
firing_rates = []
gains = []
errors = []
modfit = []
peaks = []
peakmodfits = []
exceptions = []
for file in filelist:
dsflag = False
if 'ds' in file:
dsflag = file.split('ds')[1].startswith(str(ds))
if file.endswith('.pickle') and file.startswith(filestart) and dsflag:
file = folder+file
try:
net.load(file)
except BaseException as ee:
exceptions.append(file)
continue
try:
fit = np.load(file + 'fit.npy')
except FileNotFoundError:
fit = net.modfits
ok = net.nunits == nunits
directtest = toy.test_fit(net.Q)
ok = ok and not (directtest - fit[-1]) > 0.01 and fit[-1] != 0
if ok:
modfit.append(fit[-1])
err = np.mean(net.errorhist[-1000:])
goodfiles.append(file)
errors.append(err)
firing_rates.append(net.p)
gains.append(net.gain)
peaks.append(np.min(net.errorhist))
peakmodfits.append(np.max(fit))
else:
exceptions.append(file)
print('Errors on ', str(len(exceptions)), ' files')
if len(goodfiles) == 0:
if len(exceptions) == 0:
raise FileNotFoundError('No valid files found.')
raise BaseException(exceptions[0])
return (goodfiles, firing_rates, gains, errors, peaks, modfit, peakmodfits)
def hp_scatter(firing_rates, gains, modfits, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
modfits = [0 if np.isnan(mf) else mf for mf in modfits]
sc = ax.scatter(firing_rates, gains, c=modfits, cmap='viridis', s=200)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, np.max(firing_rates)*1.2])
ax.set_ylim([np.min(gains)*0.8, np.max(gains)*1.2])
fig.colorbar(sc, ax=ax)
def err_hp_scatter(firing_rates, gains, errors, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
goodfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
goodg = [gains[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
badfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
badg = [gains[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
errors = [er for er in errors if er < 1.0]
sc = ax.scatter(goodfr, goodg, c=errors, cmap='viridis_r', s=200)
fig.colorbar(sc, ax=ax)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.scatter(badfr, badg, c='r', s=50, marker='x')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, np.max(firing_rates)*1.2])
ax.set_ylim([np.min(gains)*0.8, np.max(gains)*1.2])
def Q_and_svals(Q, pca, ds=1.0, ax=None, errorbars=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
means = np.abs(Q).mean(0)
scale = np.max(means)
means /= scale
if errorbars:
stds = np.abs(Q).std(0)/scale
qline, = ax.errorbar(np.arange(Q.shape[0]), means, yerr=stds, fmt='b.')
else:
qline, = ax.plot(np.arange(Q.shape[1]), means, 'b.')
svals = np.power(pca.sValues[:Q.shape[1]], ds)
svals /= np.max(svals)
sline, = ax.plot(svals, 'g')
# ax.set_title('SAILnet PC usage follows singular values')
ax.set_xlabel('Singular value rank')
ax.set_ylabel('Normalized value')
ax.legend([qline, sline], ['Mean ff weight magnitude', 'Singular value'])
def alt_Q_and_svals(Q, pca, ds=1.0, ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
stds = np.abs(Q).std(0)
stds /= np.max(stds)
ax.plot(stds, 'b.')
svals = np.power(pca.sValues[:256], ds)
svals /= np.max(svals)
ax.plot(svals, 'g')
def desphere_results(net, toy, pca_obj, desphere=1.0, folder='Pickles/4oc/'):
(goodfiles, firing_rates, gains, errors, peaks, modfit,
peakmodfits) = get_params_and_errors(net, toy, folder=folder, ds=desphere)
fig = plt.figure()
fitscat = fig.add_subplot(221)
hp_scatter(firing_rates, gains, modfit, fig=fig, ax=fitscat)
fitscat.set_title('Model fits')
errscat = fig.add_subplot(223)
err_hp_scatter(firing_rates, gains, errors, fig=fig, ax=errscat)
errscat.set_title('Mean squared error')
ind = np.nanargmax(modfit)
print('Parameters of the best-fitting model: ')
print('p = ', firing_rates[ind])
print('gain = ', gains[ind])
print('mse = ', errors[ind])
print('model fit = ', modfit[ind])
winner = goodfiles[ind]
msewinner = goodfiles[np.nanargmin(errors)]
try:
fittrace = net.modfits
except:
fittrace = np.load(winner+'fit.npy')
fitax = fig.add_subplot(222)
fitax.plot(fittrace)
fitax.set_xlabel('Training batches')
fitax.set_ylabel('Model fit')
# fitax.set_title('Time course of best model recovery')
net.load(winner)
Q_and_svals(net.Q, pca_obj, ds=desphere, ax=fig.add_subplot(224))
fig.tight_layout()
return winner, msewinner, fig | 0.29584 | 0.318339 |
from DB import DataBase
class Users:
def __init__(self, id_user=0, name="", email="", tel="", postal_code="",
state="", city="", district="", street="", number="", residence_type=""):
self.id_user = id_user
self.name = name
self.email = email
self.tel = tel
self.postal_code = postal_code
self.state = state
self.city = city
self.district = district
self.street = street
self.number = number
self.residence_type = residence_type
def insert_user(self):
sql = DataBase()
try:
query = "INSERT INTO users (name, email, tel, postal_code, state, city, district, street, number, " \
"residence_type) values ('" + self.name + "', '" + self.email + \
"', '" + self.tel + "', '" + self.postal_code + \
"', '" + self.state + "', '" + self.city + \
"', '" + self.district + "', '" + self.street + \
"', '" + self.number + "', '" + self.residence_type + "' )"
sql.dml(query)
return "Usuário cadastrado com sucesso!"
except:
return "Ocorreu um erro na inserção do usuário."
def update_user(self):
sql = DataBase()
try:
query = "update users set name = '" + self.name + "', email = '" + self.email + "', tel = '" + self.tel + \
"', postal_code = '" + self.postal_code + "', state = '" + self.state + \
"', city = '" + self.city + "', district = '" + self.district + \
"', street = '" + self.street + "', number = '" + self.number + \
"', residence_type = '" + self.residence_type + "' where id_user = " + str(self.id_user) + " "
sql.dml(query)
return "Usuário atualizado com sucesso!"
except:
return "Ocorreu um erro na alteração do usuário"
def delete_user(self):
sql = DataBase()
try:
query = "delete from users where id_user = '" + str(self.id_user) + "' "
sql.dml(query)
return "Usuário excluído com sucesso!"
except:
return "Ocorreu um erro na exclusão do usuário"
def select_user(self, id_user):
sql = DataBase()
try:
query = "select * from users where id_user = '" + id_user + "' "
cursor = sql.dql(query)
for row in cursor:
self.id_user = row[0]
self.name = row[1]
self.email = row[2]
self.tel = row[3]
self.postal_code = row[4]
self.state = row[5]
self.city = row[6]
self.district = row[7]
self.street = row[8]
self.number = row[9]
self.residence_type = row[10]
return "Usuário buscado com sucesso!"
except:
return "Ocorreu um erro na busca do usuário" | Users.py | from DB import DataBase
class Users:
def __init__(self, id_user=0, name="", email="", tel="", postal_code="",
state="", city="", district="", street="", number="", residence_type=""):
self.id_user = id_user
self.name = name
self.email = email
self.tel = tel
self.postal_code = postal_code
self.state = state
self.city = city
self.district = district
self.street = street
self.number = number
self.residence_type = residence_type
def insert_user(self):
sql = DataBase()
try:
query = "INSERT INTO users (name, email, tel, postal_code, state, city, district, street, number, " \
"residence_type) values ('" + self.name + "', '" + self.email + \
"', '" + self.tel + "', '" + self.postal_code + \
"', '" + self.state + "', '" + self.city + \
"', '" + self.district + "', '" + self.street + \
"', '" + self.number + "', '" + self.residence_type + "' )"
sql.dml(query)
return "Usuário cadastrado com sucesso!"
except:
return "Ocorreu um erro na inserção do usuário."
def update_user(self):
sql = DataBase()
try:
query = "update users set name = '" + self.name + "', email = '" + self.email + "', tel = '" + self.tel + \
"', postal_code = '" + self.postal_code + "', state = '" + self.state + \
"', city = '" + self.city + "', district = '" + self.district + \
"', street = '" + self.street + "', number = '" + self.number + \
"', residence_type = '" + self.residence_type + "' where id_user = " + str(self.id_user) + " "
sql.dml(query)
return "Usuário atualizado com sucesso!"
except:
return "Ocorreu um erro na alteração do usuário"
def delete_user(self):
sql = DataBase()
try:
query = "delete from users where id_user = '" + str(self.id_user) + "' "
sql.dml(query)
return "Usuário excluído com sucesso!"
except:
return "Ocorreu um erro na exclusão do usuário"
def select_user(self, id_user):
sql = DataBase()
try:
query = "select * from users where id_user = '" + id_user + "' "
cursor = sql.dql(query)
for row in cursor:
self.id_user = row[0]
self.name = row[1]
self.email = row[2]
self.tel = row[3]
self.postal_code = row[4]
self.state = row[5]
self.city = row[6]
self.district = row[7]
self.street = row[8]
self.number = row[9]
self.residence_type = row[10]
return "Usuário buscado com sucesso!"
except:
return "Ocorreu um erro na busca do usuário" | 0.421314 | 0.256122 |
import dash
from dash import html
import dash_bootstrap_components as dbc
from dash import dcc
from dash.dependencies import Input, Output, State
from db import connect_to_db, query_fg, query_tokens, query_futures
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG])
CONTAINER_STYLE = {
"margin": "5rem 18rem",
"padding": "2rem 1rem",
}
content = html.Div([
html.Div([
dbc.Row([
dbc.Col([
dbc.Label(
"Select token"
),
dcc.Dropdown(
options=[
{'label': 'Bitcoin', 'value': 'btc-bitcoin'},
{'label': 'Ethereum', 'value': 'eth-ethereum'},
{'label': 'Solana', 'value': 'sol-solana'},
{'label': 'Cardano', 'value': 'ada-cardano'},
{'label': 'XRP', 'value': 'xrp-xrp'},
{'label': 'Doge', 'value': 'doge-dogecoin'},
{'label': 'Dot', 'value': 'dot-polkadot'},
{'label': 'Uniswap', 'value': 'uni-uniswap'},
{'label': 'Litecoin', 'value': 'ltc-litecoin'},
{'label': 'Luna', 'value': 'luna-terra'},
{'label': 'Link', 'value': 'link-chainlink'},
{'label': 'ICP', 'value': 'icp-internet-computer'},
{'label': 'Matic', 'value': 'matic-polygon'},
{'label': 'Avax', 'value': 'avax-avalanche'},
{'label': 'Vechain', 'value': 'vet-vechain'}
],
value=['btc-bitcoin'],
id='dropdown-tokens'
)
], width=3)
], style={"padding-top": "1em"})
]),
dcc.Graph(id='fg')
], id="page-content")
heatmap = html.Div(
[dbc.Row([
dbc.Col([
dcc.Graph(id='heatmap')
], align="center")
], style={"margin": "auto", "width": "50%" })]
)
futures = html.Div([
dbc.Row([
dbc.Col([
dbc.Label(
"Select token"
),
dcc.Dropdown(
options=[
{'label': 'Bitcoin', 'value': 'BTC-PERP'},
{'label': 'Ethereum', 'value': 'ETH-PERP'},
{'label': 'XRP', 'value': 'XRP-PERP'},
{'label': 'Chainlink', 'value': 'LINK-PERP'},
{'label': 'Litecoin', 'value': 'LTC-PERP'},
{'label': 'Cardano', 'value': 'ADA-PERP'},
{'label': 'EOS', 'value': 'EOS-PERP'},
{'label': 'BNB', 'value': 'BNB-PERP'}
],
value=['btc-bitcoin'],
id='dropdown-futures'
),
], width=3)
], style={"padding-top": "1em", "padding-bottom": ".5em"}),
dcc.Graph(id='futures')
])
app.layout = dbc.Container([
html.H2("Crypto Dashboard"),
dcc.Store(id="store"),
dbc.Row([
dbc.Col([
dbc.Label(
"Select timeframe"
),
dcc.Dropdown(
options=[
{'label': '7 days', 'value': 7},
{'label': '1 month', 'value': 31},
{'label': '1 year', 'value': 365}
],
value=7,
id='dropdown'
),
], width='auto')
], style={"padding-bottom": "2em"}),
dbc.Tabs(
[
dbc.Tab(futures, label="Futures", tab_id="futures"),
dbc.Tab(content, label="Fear & Greed", tab_id="fg"),
dbc.Tab(heatmap, label="Heatmap", tab_id="heatmap"),
],
id="tabs",
active_tab="futures",
),
html.Div(id="tab-content", className="p-4"),
dcc.Location(id="url"),
], style=CONTAINER_STYLE)
db = connect_to_db()
# Generating the fear and greed tba
@app.callback(
Output('fg', 'figure'),
Input('dropdown', 'value'),
Input("dropdown-tokens", "value"),)
def update_output(day_value, token_value):
df_fg, df_filtered_tokens = query_fg(day_value, token_value, db)
fig1 = make_subplots(specs=[[{"secondary_y": True}]])
fig1.add_trace(
go.Scatter(x=df_filtered_tokens['date'], y=df_filtered_tokens['price'], name="Price"),
secondary_y=False
)
fig1.add_trace(
go.Scatter(x=df_fg['ts'], y=df_fg['value'], name="Fear & Greed"),
secondary_y=True,
)
fig1.layout.template = 'plotly_dark'
fig1.update_layout(
title_text="Token + Fear & Greed"
)
fig1.update_layout(showlegend=False, paper_bgcolor='rgb(6,6,6)')
fig1.update_yaxes(
title_text="Token",
secondary_y=False)
fig1.update_yaxes(
title_text="Fear & Greed",
secondary_y=True)
return fig1
# Generating the heatmap tab
@app.callback(
Output('heatmap', 'figure'),
Input('dropdown', 'value'))
def generate_heatmap(day_value):
df_tokens = query_tokens(day_value, db)
reshape = df_tokens.pivot(index='date', columns='symbol', values='price')
corrM = reshape.corr()
fig2 = px.imshow(corrM)
fig2.layout.template = 'plotly_dark'
fig2.update_xaxes(side="top")
fig2.update_layout(width=700, height=700, paper_bgcolor='rgb(6,6,6)')
return fig2
# Generating the futures tab plot
@app.callback(
Output('futures', 'figure'),
Input('dropdown', 'value'),
Input('dropdown-futures', 'value'))
def generate_futures(days, value):
df_tokens = query_futures(days, value, db)
fig = px.line(df_tokens,
x="date", y="open", color='exchange', template='plotly_dark')
fig.update_layout(paper_bgcolor='rgb(6,6,6)')
return fig
if __name__ == '__main__':
app.run_server(debug=True) | dash/app.py | import dash
from dash import html
import dash_bootstrap_components as dbc
from dash import dcc
from dash.dependencies import Input, Output, State
from db import connect_to_db, query_fg, query_tokens, query_futures
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG])
CONTAINER_STYLE = {
"margin": "5rem 18rem",
"padding": "2rem 1rem",
}
content = html.Div([
html.Div([
dbc.Row([
dbc.Col([
dbc.Label(
"Select token"
),
dcc.Dropdown(
options=[
{'label': 'Bitcoin', 'value': 'btc-bitcoin'},
{'label': 'Ethereum', 'value': 'eth-ethereum'},
{'label': 'Solana', 'value': 'sol-solana'},
{'label': 'Cardano', 'value': 'ada-cardano'},
{'label': 'XRP', 'value': 'xrp-xrp'},
{'label': 'Doge', 'value': 'doge-dogecoin'},
{'label': 'Dot', 'value': 'dot-polkadot'},
{'label': 'Uniswap', 'value': 'uni-uniswap'},
{'label': 'Litecoin', 'value': 'ltc-litecoin'},
{'label': 'Luna', 'value': 'luna-terra'},
{'label': 'Link', 'value': 'link-chainlink'},
{'label': 'ICP', 'value': 'icp-internet-computer'},
{'label': 'Matic', 'value': 'matic-polygon'},
{'label': 'Avax', 'value': 'avax-avalanche'},
{'label': 'Vechain', 'value': 'vet-vechain'}
],
value=['btc-bitcoin'],
id='dropdown-tokens'
)
], width=3)
], style={"padding-top": "1em"})
]),
dcc.Graph(id='fg')
], id="page-content")
heatmap = html.Div(
[dbc.Row([
dbc.Col([
dcc.Graph(id='heatmap')
], align="center")
], style={"margin": "auto", "width": "50%" })]
)
futures = html.Div([
dbc.Row([
dbc.Col([
dbc.Label(
"Select token"
),
dcc.Dropdown(
options=[
{'label': 'Bitcoin', 'value': 'BTC-PERP'},
{'label': 'Ethereum', 'value': 'ETH-PERP'},
{'label': 'XRP', 'value': 'XRP-PERP'},
{'label': 'Chainlink', 'value': 'LINK-PERP'},
{'label': 'Litecoin', 'value': 'LTC-PERP'},
{'label': 'Cardano', 'value': 'ADA-PERP'},
{'label': 'EOS', 'value': 'EOS-PERP'},
{'label': 'BNB', 'value': 'BNB-PERP'}
],
value=['btc-bitcoin'],
id='dropdown-futures'
),
], width=3)
], style={"padding-top": "1em", "padding-bottom": ".5em"}),
dcc.Graph(id='futures')
])
app.layout = dbc.Container([
html.H2("Crypto Dashboard"),
dcc.Store(id="store"),
dbc.Row([
dbc.Col([
dbc.Label(
"Select timeframe"
),
dcc.Dropdown(
options=[
{'label': '7 days', 'value': 7},
{'label': '1 month', 'value': 31},
{'label': '1 year', 'value': 365}
],
value=7,
id='dropdown'
),
], width='auto')
], style={"padding-bottom": "2em"}),
dbc.Tabs(
[
dbc.Tab(futures, label="Futures", tab_id="futures"),
dbc.Tab(content, label="Fear & Greed", tab_id="fg"),
dbc.Tab(heatmap, label="Heatmap", tab_id="heatmap"),
],
id="tabs",
active_tab="futures",
),
html.Div(id="tab-content", className="p-4"),
dcc.Location(id="url"),
], style=CONTAINER_STYLE)
db = connect_to_db()
# Generating the fear and greed tba
@app.callback(
Output('fg', 'figure'),
Input('dropdown', 'value'),
Input("dropdown-tokens", "value"),)
def update_output(day_value, token_value):
df_fg, df_filtered_tokens = query_fg(day_value, token_value, db)
fig1 = make_subplots(specs=[[{"secondary_y": True}]])
fig1.add_trace(
go.Scatter(x=df_filtered_tokens['date'], y=df_filtered_tokens['price'], name="Price"),
secondary_y=False
)
fig1.add_trace(
go.Scatter(x=df_fg['ts'], y=df_fg['value'], name="Fear & Greed"),
secondary_y=True,
)
fig1.layout.template = 'plotly_dark'
fig1.update_layout(
title_text="Token + Fear & Greed"
)
fig1.update_layout(showlegend=False, paper_bgcolor='rgb(6,6,6)')
fig1.update_yaxes(
title_text="Token",
secondary_y=False)
fig1.update_yaxes(
title_text="Fear & Greed",
secondary_y=True)
return fig1
# Generating the heatmap tab
@app.callback(
Output('heatmap', 'figure'),
Input('dropdown', 'value'))
def generate_heatmap(day_value):
df_tokens = query_tokens(day_value, db)
reshape = df_tokens.pivot(index='date', columns='symbol', values='price')
corrM = reshape.corr()
fig2 = px.imshow(corrM)
fig2.layout.template = 'plotly_dark'
fig2.update_xaxes(side="top")
fig2.update_layout(width=700, height=700, paper_bgcolor='rgb(6,6,6)')
return fig2
# Generating the futures tab plot
@app.callback(
Output('futures', 'figure'),
Input('dropdown', 'value'),
Input('dropdown-futures', 'value'))
def generate_futures(days, value):
df_tokens = query_futures(days, value, db)
fig = px.line(df_tokens,
x="date", y="open", color='exchange', template='plotly_dark')
fig.update_layout(paper_bgcolor='rgb(6,6,6)')
return fig
if __name__ == '__main__':
app.run_server(debug=True) | 0.625209 | 0.157687 |
import numpy as np
import pytest
from numpy import testing as npt
from metriculous.evaluators._classification_utils import (
ClassificationData,
ProbabilityMatrix,
)
class TestProbabilityMatrix:
def test_that_it_can_be_initialized_from_nested_list(self) -> None:
_ = ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1], [0.2, 0.2, 0.6]]
)
def test_that_it_can_be_initialized_from_array(self) -> None:
_ = ProbabilityMatrix(distributions=np.eye(5))
def test_that_it_raises_error_if_not_normalized(self) -> None:
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[0.9, 0.11, 0.0]])
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[0.9, 0.0, 0.0]])
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[]])
def test_that_it_raises_error_if_wrong_shape(self) -> None:
with pytest.raises(ValueError, match="two-dimensional"):
_ = ProbabilityMatrix(distributions=[])
with pytest.raises(ValueError, match="two-dimensional") as info:
# noinspection PyTypeChecker
_ = ProbabilityMatrix(distributions=[[[]]]) # type: ignore
assert info.match("Expected a two-dimensional")
assert info.match("but received array of shape")
assert info.match("(1, 1, 0)")
def test_properties(self) -> None:
pm = ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
)
assert pm.n_classes == 3
assert pm.n_samples == 4
npt.assert_equal(pm.argmaxes, desired=[0, 2, 1, 1])
npt.assert_equal(
pm.argmaxes_one_hot,
np.array(
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
),
)
class TestClassificationData:
def test_that_it_does_not_smoke(self) -> None:
cd = ClassificationData(
target=ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
),
pred=ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
),
)
assert cd.n_samples == 4
assert cd.n_classes == 3
def test_that_it_raises_if_n_samples_different(self) -> None:
with pytest.raises(ValueError, match="samples") as info:
_ = ClassificationData(
target=ProbabilityMatrix(distributions=[[0.8, 0.1, 0.1]]),
pred=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1]]
),
)
assert info.match("1")
assert info.match("2")
assert info.match("samples")
def test_that_it_raises_if_n_classes_different(self) -> None:
with pytest.raises(ValueError, match="classes") as info:
_ = ClassificationData(
target=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1, 0.0], [0.8, 0.1, 0.1, 0.0]]
),
pred=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1]]
),
)
assert info.match("4 classes")
assert info.match("3 classes") | src/metriculous/evaluators/_classification_utils_test.py | import numpy as np
import pytest
from numpy import testing as npt
from metriculous.evaluators._classification_utils import (
ClassificationData,
ProbabilityMatrix,
)
class TestProbabilityMatrix:
def test_that_it_can_be_initialized_from_nested_list(self) -> None:
_ = ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1], [0.2, 0.2, 0.6]]
)
def test_that_it_can_be_initialized_from_array(self) -> None:
_ = ProbabilityMatrix(distributions=np.eye(5))
def test_that_it_raises_error_if_not_normalized(self) -> None:
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[0.9, 0.11, 0.0]])
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[0.9, 0.0, 0.0]])
with pytest.raises(AssertionError, match="1.0"):
_ = ProbabilityMatrix(distributions=[[]])
def test_that_it_raises_error_if_wrong_shape(self) -> None:
with pytest.raises(ValueError, match="two-dimensional"):
_ = ProbabilityMatrix(distributions=[])
with pytest.raises(ValueError, match="two-dimensional") as info:
# noinspection PyTypeChecker
_ = ProbabilityMatrix(distributions=[[[]]]) # type: ignore
assert info.match("Expected a two-dimensional")
assert info.match("but received array of shape")
assert info.match("(1, 1, 0)")
def test_properties(self) -> None:
pm = ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
)
assert pm.n_classes == 3
assert pm.n_samples == 4
npt.assert_equal(pm.argmaxes, desired=[0, 2, 1, 1])
npt.assert_equal(
pm.argmaxes_one_hot,
np.array(
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
),
)
class TestClassificationData:
def test_that_it_does_not_smoke(self) -> None:
cd = ClassificationData(
target=ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
),
pred=ProbabilityMatrix(
distributions=[
[0.8, 0.1, 0.1],
[0.2, 0.2, 0.6],
[0.0, 0.9, 0.1],
[0.0, 0.9, 0.1],
]
),
)
assert cd.n_samples == 4
assert cd.n_classes == 3
def test_that_it_raises_if_n_samples_different(self) -> None:
with pytest.raises(ValueError, match="samples") as info:
_ = ClassificationData(
target=ProbabilityMatrix(distributions=[[0.8, 0.1, 0.1]]),
pred=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1]]
),
)
assert info.match("1")
assert info.match("2")
assert info.match("samples")
def test_that_it_raises_if_n_classes_different(self) -> None:
with pytest.raises(ValueError, match="classes") as info:
_ = ClassificationData(
target=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1, 0.0], [0.8, 0.1, 0.1, 0.0]]
),
pred=ProbabilityMatrix(
distributions=[[0.8, 0.1, 0.1], [0.0, 0.9, 0.1]]
),
)
assert info.match("4 classes")
assert info.match("3 classes") | 0.808067 | 0.791902 |
import base64
import os
import os.path
import re
import stat
import string
import subprocess
import sys
import zlib
def _FileMatches(path, excl_regexps):
"""Returns true if the specified path matches none of the
specified exclude regular expresions.
"""
for r in excl_regexps:
if re.match(r, path):
return False
return True
def _GetFilesRecursively(path, excl_regexps=[
r"#.*",
r"\..+",
r".*~$",
r".*\.pyc$",
r".*_test.py$",
r".*_pkg.py$"]):
"""Recursively walks the source directory and locates matching
files. Returns a list of files.
"""
entries = os.listdir(path)
dirs = [e for e in entries if os.path.isdir(os.path.join(path, e))
and _FileMatches(e, excl_regexps)]
files = [os.path.join(path, e) for e in entries if
os.path.isfile(os.path.join(path, e)) and _FileMatches(e, excl_regexps)]
for d in dirs:
files += _GetFilesRecursively(os.path.join(path, d), excl_regexps)
return files
def _ReadFile(path):
return open(path, 'rb').read()
def _Package(template, package_out, files):
"""Package up all files into a self-extracting python script
suffixed by _pkg.py. This script has one potentially long data
string, which is the encoded, gzipped contents of the deployment
files. The files are a .tgz tarball and are un-archived into their
original directory structure and permissions using subprocess to
invoke tar.
Once this is complete, the decode.py script is invoked.
"""
# Roll a tgz tarball with files as arguments.
p = subprocess.Popen([ "tar", "czfH", "-" ] + list(files),
bufsize=4096, stdout=subprocess.PIPE)
(tarball, stderr) = p.communicate()
out = open(package_out, "w")
out.write("#!/usr/bin/env python\n")
out.write("import subprocess, base64, zlib, os, stat, sys\n")
out.write("tarball = base64.b64decode('")
out.write(base64.b64encode(tarball))
out.write("')\n")
out.write("os.mkdir('viewfinder')\n")
out.write("os.chdir('viewfinder')\n")
out.write("p = subprocess.Popen([ \"tar\", \"xzf\", \"-\" ],\n")
out.write(" bufsize=4096, stdin=subprocess.PIPE)\n")
out.write("p.communicate(input=tarball)\n")
out.write("os.execv('./backend/prod/deploy.py', "
"['./backend/prod/deploy.py', '{0}'])\n".format(template))
out.close()
os.chmod(package_out, stat.S_IRWXU)
def main():
"""Extract the deployment template from command line arguments,
import it, gather the list files to package, and run the packer.
"""
try:
# Import the deploy template, as we must access its servers
# list to search for python files to bundle for deployment.
deploy_template = sys.argv[1]
assert os.path.isfile(deploy_template), \
"deployment template {0} is not a file".format(deploy_template)
exec(open(deploy_template, 'rb'))
deploy_name = os.path.splitext(os.path.basename(deploy_template))[0]
package_out = os.path.join(os.path.dirname(deploy_template),
deploy_name + "_pkg.py")
# Get the full list of deployed files
files = _GetFilesRecursively("./")
# Package the contents into a deployment executable.
_Package(deploy_name, package_out, files)
print "{0} deployment packaged into {1}".format(
deploy_template, package_out)
return 0
except (IndexError, AssertionError), err:
print("Error: {0}, Usage: {1} <deploy-template>".format(
str(err), os.path.basename(sys.argv[0])))
return -1
if __name__ == "__main__":
sys.exit(main()) | backend/prod/package.py | import base64
import os
import os.path
import re
import stat
import string
import subprocess
import sys
import zlib
def _FileMatches(path, excl_regexps):
"""Returns true if the specified path matches none of the
specified exclude regular expresions.
"""
for r in excl_regexps:
if re.match(r, path):
return False
return True
def _GetFilesRecursively(path, excl_regexps=[
r"#.*",
r"\..+",
r".*~$",
r".*\.pyc$",
r".*_test.py$",
r".*_pkg.py$"]):
"""Recursively walks the source directory and locates matching
files. Returns a list of files.
"""
entries = os.listdir(path)
dirs = [e for e in entries if os.path.isdir(os.path.join(path, e))
and _FileMatches(e, excl_regexps)]
files = [os.path.join(path, e) for e in entries if
os.path.isfile(os.path.join(path, e)) and _FileMatches(e, excl_regexps)]
for d in dirs:
files += _GetFilesRecursively(os.path.join(path, d), excl_regexps)
return files
def _ReadFile(path):
return open(path, 'rb').read()
def _Package(template, package_out, files):
"""Package up all files into a self-extracting python script
suffixed by _pkg.py. This script has one potentially long data
string, which is the encoded, gzipped contents of the deployment
files. The files are a .tgz tarball and are un-archived into their
original directory structure and permissions using subprocess to
invoke tar.
Once this is complete, the decode.py script is invoked.
"""
# Roll a tgz tarball with files as arguments.
p = subprocess.Popen([ "tar", "czfH", "-" ] + list(files),
bufsize=4096, stdout=subprocess.PIPE)
(tarball, stderr) = p.communicate()
out = open(package_out, "w")
out.write("#!/usr/bin/env python\n")
out.write("import subprocess, base64, zlib, os, stat, sys\n")
out.write("tarball = base64.b64decode('")
out.write(base64.b64encode(tarball))
out.write("')\n")
out.write("os.mkdir('viewfinder')\n")
out.write("os.chdir('viewfinder')\n")
out.write("p = subprocess.Popen([ \"tar\", \"xzf\", \"-\" ],\n")
out.write(" bufsize=4096, stdin=subprocess.PIPE)\n")
out.write("p.communicate(input=tarball)\n")
out.write("os.execv('./backend/prod/deploy.py', "
"['./backend/prod/deploy.py', '{0}'])\n".format(template))
out.close()
os.chmod(package_out, stat.S_IRWXU)
def main():
"""Extract the deployment template from command line arguments,
import it, gather the list files to package, and run the packer.
"""
try:
# Import the deploy template, as we must access its servers
# list to search for python files to bundle for deployment.
deploy_template = sys.argv[1]
assert os.path.isfile(deploy_template), \
"deployment template {0} is not a file".format(deploy_template)
exec(open(deploy_template, 'rb'))
deploy_name = os.path.splitext(os.path.basename(deploy_template))[0]
package_out = os.path.join(os.path.dirname(deploy_template),
deploy_name + "_pkg.py")
# Get the full list of deployed files
files = _GetFilesRecursively("./")
# Package the contents into a deployment executable.
_Package(deploy_name, package_out, files)
print "{0} deployment packaged into {1}".format(
deploy_template, package_out)
return 0
except (IndexError, AssertionError), err:
print("Error: {0}, Usage: {1} <deploy-template>".format(
str(err), os.path.basename(sys.argv[0])))
return -1
if __name__ == "__main__":
sys.exit(main()) | 0.412885 | 0.197174 |
class Restaurant():
def __init__(self, rest_name, rest_cuisine):
self.rest_name = rest_name
self.rest_cuisine = rest_cuisine
self.number_served = 0
def describe_restaurant(self):
print(f"The restaurant name is {self.rest_name} and cuisine is {self.rest_cuisine}")
def open_restaurant(self):
print("The restaurant is opened")
def set_number_served(self, number_served):
self.number_served = number_served
print(f"number of served visitors equal {number_served}")
def increment_number_served(self, number_served):
self.number_served += number_served
print(f"number of served visitors incremented on {number_served}")
class IceCreamStand(Restaurant):
def __init__(self, rest_name, rest_cuisine):
super().__init__(rest_name, rest_cuisine)
self.flavors = ['icecream1', 'icecream2', 'icecrea3']
def show_icecreams(self):
print('Stand icecreams: ' + str(self.flavors))
restaurant = Restaurant("Vasilek", 'Rus')
restaurant.describe_restaurant()
print(restaurant.number_served)
restaurant.number_served = 3
print(restaurant.number_served)
restaurant.set_number_served(5)
print(restaurant.number_served)
restaurant.increment_number_served(10)
print(restaurant.number_served)
class User():
def __init__(self, fn, ln, age):
self.fn = fn
self.ln = ln
self.age =age
self.login_attempt = 0
def increment_login_attempts(self, attempts):
self.login_attempt += attempts
print(f'Login attempts incremented {attempts}')
def reset_login_attempts(self):
self.login_attempt = 0
print(f'Login attempts reset')
def show_login_attempts(self):
print(f'Number of login attempts {self.login_attempt}')
def describe_user(self):
print(f'User info: name: {self.fn}, last name: {self.ln}, age: {self.age}')
def greet_user(self):
print(f'Hello {self.fn} {self.ln}')
class Admin(User):
def __init__(self, fn, ln, age):
super().__init__(fn, ln, age)
self.priviliges = AdminPriviliges()
def show_priviliges(self):
print(self.fn + ' ' + self.ln + " has the next priviliges: " + str(self.priviliges.admin_priviliges))
class Client(User):
def __init__(self, fn, ln, age):
super().__init__(fn, ln, age)
self.priviliges = ClientPriviliges()
def show_priviliges(self):
print(self.fn + ' ' + self.ln + " has the next priviliges: " + str(self.priviliges.client_priviliges))
class AdminPriviliges():
def __init__(self):
self.admin_priviliges = ["разрешено добавлять сообщения", "«разрешено удалять пользователей»",
"«разрешено банить пользователей»"]
class ClientPriviliges():
def __init__(self):
self.client_priviliges = ["Client prvilige 1", "Client prvilige 3",
"Client prvilige 2"]
def show_priviliges(self):
print("This user has the next priviliges: " + str(self.client_priviliges))
me = User('dima', 'nov', 35)
me.describe_user()
me.greet_user()
jake = User("Jake", "Narrow", 34)
jake.describe_user()
print(jake.login_attempt)
jake.show_login_attempts()
jake.increment_login_attempts(3)
print(jake.login_attempt)
jake.show_login_attempts()
jake.reset_login_attempts()
print(jake.login_attempt)
jake.show_login_attempts()
my_stand = IceCreamStand('Stand_1', 'Minsk')
print(my_stand.flavors)
my_stand.show_icecreams()
admin = Admin('Igor', 'Ivanov', 23)
admin.show_priviliges()
client = Client('Bob', 'Marley', 38)
client.show_priviliges()
client.priviliges.show_priviliges() | Learn Python Book/Stage 9 (Classes)/restaurant.py | class Restaurant():
def __init__(self, rest_name, rest_cuisine):
self.rest_name = rest_name
self.rest_cuisine = rest_cuisine
self.number_served = 0
def describe_restaurant(self):
print(f"The restaurant name is {self.rest_name} and cuisine is {self.rest_cuisine}")
def open_restaurant(self):
print("The restaurant is opened")
def set_number_served(self, number_served):
self.number_served = number_served
print(f"number of served visitors equal {number_served}")
def increment_number_served(self, number_served):
self.number_served += number_served
print(f"number of served visitors incremented on {number_served}")
class IceCreamStand(Restaurant):
def __init__(self, rest_name, rest_cuisine):
super().__init__(rest_name, rest_cuisine)
self.flavors = ['icecream1', 'icecream2', 'icecrea3']
def show_icecreams(self):
print('Stand icecreams: ' + str(self.flavors))
restaurant = Restaurant("Vasilek", 'Rus')
restaurant.describe_restaurant()
print(restaurant.number_served)
restaurant.number_served = 3
print(restaurant.number_served)
restaurant.set_number_served(5)
print(restaurant.number_served)
restaurant.increment_number_served(10)
print(restaurant.number_served)
class User():
def __init__(self, fn, ln, age):
self.fn = fn
self.ln = ln
self.age =age
self.login_attempt = 0
def increment_login_attempts(self, attempts):
self.login_attempt += attempts
print(f'Login attempts incremented {attempts}')
def reset_login_attempts(self):
self.login_attempt = 0
print(f'Login attempts reset')
def show_login_attempts(self):
print(f'Number of login attempts {self.login_attempt}')
def describe_user(self):
print(f'User info: name: {self.fn}, last name: {self.ln}, age: {self.age}')
def greet_user(self):
print(f'Hello {self.fn} {self.ln}')
class Admin(User):
def __init__(self, fn, ln, age):
super().__init__(fn, ln, age)
self.priviliges = AdminPriviliges()
def show_priviliges(self):
print(self.fn + ' ' + self.ln + " has the next priviliges: " + str(self.priviliges.admin_priviliges))
class Client(User):
def __init__(self, fn, ln, age):
super().__init__(fn, ln, age)
self.priviliges = ClientPriviliges()
def show_priviliges(self):
print(self.fn + ' ' + self.ln + " has the next priviliges: " + str(self.priviliges.client_priviliges))
class AdminPriviliges():
def __init__(self):
self.admin_priviliges = ["разрешено добавлять сообщения", "«разрешено удалять пользователей»",
"«разрешено банить пользователей»"]
class ClientPriviliges():
def __init__(self):
self.client_priviliges = ["Client prvilige 1", "Client prvilige 3",
"Client prvilige 2"]
def show_priviliges(self):
print("This user has the next priviliges: " + str(self.client_priviliges))
me = User('dima', 'nov', 35)
me.describe_user()
me.greet_user()
jake = User("Jake", "Narrow", 34)
jake.describe_user()
print(jake.login_attempt)
jake.show_login_attempts()
jake.increment_login_attempts(3)
print(jake.login_attempt)
jake.show_login_attempts()
jake.reset_login_attempts()
print(jake.login_attempt)
jake.show_login_attempts()
my_stand = IceCreamStand('Stand_1', 'Minsk')
print(my_stand.flavors)
my_stand.show_icecreams()
admin = Admin('Igor', 'Ivanov', 23)
admin.show_priviliges()
client = Client('Bob', 'Marley', 38)
client.show_priviliges()
client.priviliges.show_priviliges() | 0.386648 | 0.213521 |
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from PINSoftware.MachineState import MachineState
def SingleSwitch(id_, label, default=False):
"""
This returns a dash component containing a single switch (a nice looking checkbox)
(not a part of a Checklist) using bootstrap.
`id` is the id of the Checkbox component inside.
`label` is the label shown beside it.
`default` is the default value of the switch.
"""
return html.Div([
dbc.Checkbox(id=id_, className='custom-control-input', checked=default),
html.Label(label, htmlFor=id_, className='custom-control-label'),
],
className='custom-control custom-switch ml-2',
style=dict(display='inline')
)
def get_base_graph_callbacks(app : dash.Dash, name : str):
"""
This adds the most basic callbacks to a graph. It takes car of two things.
Disabling the interval when the "Update graph" switch is off and disabling the
"Update graph" when the "Show graph" button is off.
"""
@app.callback([
Output(name + '-row', 'style'),
Output(name + '-clock-toggle', 'disabled'),
Output(name + '-clock-toggle', 'checked'),
], [Input(name + '-show', 'checked')], [State(name + '-clock-toggle', 'checked')])
def disable_update_on_show(show_checked, update_checked):
if not show_checked:
return [{'display': 'none'}, True, False]
else:
return [{}, False, update_checked]
@app.callback(Output(name + '-clock', 'disabled'), [Input(name + '-clock-toggle', 'checked')])
def toggle_interval(checked):
return not checked
base_graph_callbacks_done = []
def BaseGraph(app : dash.Dash, name : str, title : str, interval : int = 2000, additional_controls=[], base_fig={}):
"""
This creates a dash component with the most basic stuff for an updating graph.
It sets up the container, the controls and the `dash_core_components.Interval`.
It also sets up the basic callbacks using `get_base_graph_callbacks`.
`app` is the app to add the callbacks to.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
`base_fig` is what to set the `figure` property of the `dash_core_components.Graph` (it can be changed later though).
"""
if name not in base_graph_callbacks_done:
get_base_graph_callbacks(app, name)
base_graph_callbacks_done.append(name)
return dbc.Container([
dbc.Row(html.H4(title), justify='center', className='mt-1'),
dbc.Row([
SingleSwitch(name + '-show', "Show graph", default=True),
SingleSwitch(name + '-clock-toggle', "Update graph", default=True)
],
justify='center',
className='mt-1')
] + additional_controls + [
dbc.Row(dbc.Col([
dcc.Graph(
id=name,
config={'displayModeBar': True},
animate=False,
figure=base_fig
),
dcc.Interval(id=name + '-clock', interval=interval, n_intervals=0)
]),
id=name + '-row',
justify='center')
],
id=name + '-container',
className='border mt-3 mb-3 pb-2'
)
def get_full_redraw_graph_callbacks(app : dash.Dash, ms : MachineState, name : str, fig_func, **kwargs):
"""
Adds callbacks to a `FullRedrawGraph`. Specifically, whenever the `dash_core_components.Interval`
triggers, if there is `ms.data` then the `figure_func` is called and its result is set as the new figure.
`app` the dash app to add the callbacks to.
If `fig_func_output` is a keyword argument, then its value is set as the output of the interval
trigger callback (this is the one where `fig_func` is used).
If `fig_func_state` is a keyword argument, then its value is set as the state of the interval
trigger callback (this is the one where `fig_func` is used).
The only thing that is fixed is the callback inputs.
"""
fig_func_output = kwargs.setdefault('fig_func_output', Output(name, 'figure'))
fig_func_state = kwargs.setdefault('fig_func_state', [])
@app.callback(fig_func_output, [Input(name + '-clock', 'n_intervals')], fig_func_state)
def graph_update(n, *args):
if not ms.data:
raise PreventUpdate()
return fig_func(n, *args)
full_redraw_graph_callbacks_done = []
def FullRedrawGraph(app : dash.Dash, ms : MachineState, name : str, title : str, fig_func,
interval : int = 2000, additional_controls=[], **kwargs):
"""
Get a graph which whenever it is updated, the whole figure is changed and passed over the network.
`app` the dash app to add the callbacks to.
`ms` the `PINSoftware.MachineState.MachineState` to use for checking for data.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`fig_func` is the function to call on update. It is only called when `ms.data` is not None.
Its output is the graph figure by default but can be changed using the `fig_func_output` keyword
argument. It can also get more arguments, the callback state can be changed by the `fig_func_state`
keyword argument.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
"""
if name not in full_redraw_graph_callbacks_done:
get_full_redraw_graph_callbacks(app, ms, name, fig_func, **kwargs)
full_redraw_graph_callbacks_done.append(name)
return BaseGraph(app, name, title, interval, additional_controls)
def get_extendable_graph_callbacks(app : dash.Dash, ms : MachineState, name : str, extend_func,
base_fig, **kwargs):
"""
Adds callbacks to a `ExtendableGraph`. It adds two callbacks, Firstly, whenever
the "Stop" button is enabled, the graphs figure is set to `base_fig`. The second one is
that whenever the interval triggers, the graph is extended using `extend_func`.
`app` the dash app to add the callbacks to.
If `extend_func_output` is a keyword argument, then its value is set as the output of the callback
using `extend_func` (the interval one). The default is the graph 'extendData'.
If `extend_func_state` is a keyword argument, then its value is set as the state of the callback
using `extend_func` (the interval one). The default is an empty list.
The only thing that is fixed is the callback inputs.
"""
extend_func_output = kwargs.setdefault('extend_func_output', Output(name, 'extendData'))
extend_func_state = kwargs.setdefault('extend_func_state', [])
@app.callback(Output(name, 'figure'), [
Input('cp-stop', 'disabled')
])
def set_base_fig(is_unstoppable):
if not is_unstoppable:
return base_fig
else:
raise PreventUpdate()
@app.callback(extend_func_output, [Input(name + '-clock', 'n_intervals')], extend_func_state)
def graph_update(n, *args):
if not ms.data:
raise PreventUpdate()
return extend_func(n, *args)
extendable_graph_callbacks_done = []
def ExtendableGraph(app : dash.Dash, ms : MachineState, name : str, title : str,
base_fig, extend_func, interval : int = 2000, additional_controls=[], **kwargs):
"""
Get a graph to which new data is added instead of redrawing it completely. Its figure
is first set to `base_fig` and is then updated using `extend_func`
`app` the dash app to add the callbacks to.
`ms` the `PINSoftware.MachineState.MachineState` to use for checking for data.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`base_fig` is the basic figure. This is what the figure property of the `dash_core_components.Graph`
is set to when the page loads and whenever the "Stop" button becomes enabled (this is to reset when the
next acquisition starts) (however this means that it will not reset on new data acquisition for users who
are not controlling the setup at that point! Just keep that in mind).
`extend_func` is the function to call on 'update', this is whenever the interval triggers. Its output
is what is sent to the `dash_core_components.Graph`s `extendData` property by default but can be
changed using the `extend_func_output` keyword argument. It can also get more arguments, the callback
state can be changed by the `extend_func_state` keyword argument.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
"""
if name not in extendable_graph_callbacks_done:
get_extendable_graph_callbacks(app, ms, name, extend_func, base_fig, **kwargs)
extendable_graph_callbacks_done.append(name)
return BaseGraph(app, name, title, interval, additional_controls, base_fig=base_fig) | PINSoftware/DashComponents.py | import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from PINSoftware.MachineState import MachineState
def SingleSwitch(id_, label, default=False):
"""
This returns a dash component containing a single switch (a nice looking checkbox)
(not a part of a Checklist) using bootstrap.
`id` is the id of the Checkbox component inside.
`label` is the label shown beside it.
`default` is the default value of the switch.
"""
return html.Div([
dbc.Checkbox(id=id_, className='custom-control-input', checked=default),
html.Label(label, htmlFor=id_, className='custom-control-label'),
],
className='custom-control custom-switch ml-2',
style=dict(display='inline')
)
def get_base_graph_callbacks(app : dash.Dash, name : str):
"""
This adds the most basic callbacks to a graph. It takes car of two things.
Disabling the interval when the "Update graph" switch is off and disabling the
"Update graph" when the "Show graph" button is off.
"""
@app.callback([
Output(name + '-row', 'style'),
Output(name + '-clock-toggle', 'disabled'),
Output(name + '-clock-toggle', 'checked'),
], [Input(name + '-show', 'checked')], [State(name + '-clock-toggle', 'checked')])
def disable_update_on_show(show_checked, update_checked):
if not show_checked:
return [{'display': 'none'}, True, False]
else:
return [{}, False, update_checked]
@app.callback(Output(name + '-clock', 'disabled'), [Input(name + '-clock-toggle', 'checked')])
def toggle_interval(checked):
return not checked
base_graph_callbacks_done = []
def BaseGraph(app : dash.Dash, name : str, title : str, interval : int = 2000, additional_controls=[], base_fig={}):
"""
This creates a dash component with the most basic stuff for an updating graph.
It sets up the container, the controls and the `dash_core_components.Interval`.
It also sets up the basic callbacks using `get_base_graph_callbacks`.
`app` is the app to add the callbacks to.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
`base_fig` is what to set the `figure` property of the `dash_core_components.Graph` (it can be changed later though).
"""
if name not in base_graph_callbacks_done:
get_base_graph_callbacks(app, name)
base_graph_callbacks_done.append(name)
return dbc.Container([
dbc.Row(html.H4(title), justify='center', className='mt-1'),
dbc.Row([
SingleSwitch(name + '-show', "Show graph", default=True),
SingleSwitch(name + '-clock-toggle', "Update graph", default=True)
],
justify='center',
className='mt-1')
] + additional_controls + [
dbc.Row(dbc.Col([
dcc.Graph(
id=name,
config={'displayModeBar': True},
animate=False,
figure=base_fig
),
dcc.Interval(id=name + '-clock', interval=interval, n_intervals=0)
]),
id=name + '-row',
justify='center')
],
id=name + '-container',
className='border mt-3 mb-3 pb-2'
)
def get_full_redraw_graph_callbacks(app : dash.Dash, ms : MachineState, name : str, fig_func, **kwargs):
"""
Adds callbacks to a `FullRedrawGraph`. Specifically, whenever the `dash_core_components.Interval`
triggers, if there is `ms.data` then the `figure_func` is called and its result is set as the new figure.
`app` the dash app to add the callbacks to.
If `fig_func_output` is a keyword argument, then its value is set as the output of the interval
trigger callback (this is the one where `fig_func` is used).
If `fig_func_state` is a keyword argument, then its value is set as the state of the interval
trigger callback (this is the one where `fig_func` is used).
The only thing that is fixed is the callback inputs.
"""
fig_func_output = kwargs.setdefault('fig_func_output', Output(name, 'figure'))
fig_func_state = kwargs.setdefault('fig_func_state', [])
@app.callback(fig_func_output, [Input(name + '-clock', 'n_intervals')], fig_func_state)
def graph_update(n, *args):
if not ms.data:
raise PreventUpdate()
return fig_func(n, *args)
full_redraw_graph_callbacks_done = []
def FullRedrawGraph(app : dash.Dash, ms : MachineState, name : str, title : str, fig_func,
interval : int = 2000, additional_controls=[], **kwargs):
"""
Get a graph which whenever it is updated, the whole figure is changed and passed over the network.
`app` the dash app to add the callbacks to.
`ms` the `PINSoftware.MachineState.MachineState` to use for checking for data.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`fig_func` is the function to call on update. It is only called when `ms.data` is not None.
Its output is the graph figure by default but can be changed using the `fig_func_output` keyword
argument. It can also get more arguments, the callback state can be changed by the `fig_func_state`
keyword argument.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
"""
if name not in full_redraw_graph_callbacks_done:
get_full_redraw_graph_callbacks(app, ms, name, fig_func, **kwargs)
full_redraw_graph_callbacks_done.append(name)
return BaseGraph(app, name, title, interval, additional_controls)
def get_extendable_graph_callbacks(app : dash.Dash, ms : MachineState, name : str, extend_func,
base_fig, **kwargs):
"""
Adds callbacks to a `ExtendableGraph`. It adds two callbacks, Firstly, whenever
the "Stop" button is enabled, the graphs figure is set to `base_fig`. The second one is
that whenever the interval triggers, the graph is extended using `extend_func`.
`app` the dash app to add the callbacks to.
If `extend_func_output` is a keyword argument, then its value is set as the output of the callback
using `extend_func` (the interval one). The default is the graph 'extendData'.
If `extend_func_state` is a keyword argument, then its value is set as the state of the callback
using `extend_func` (the interval one). The default is an empty list.
The only thing that is fixed is the callback inputs.
"""
extend_func_output = kwargs.setdefault('extend_func_output', Output(name, 'extendData'))
extend_func_state = kwargs.setdefault('extend_func_state', [])
@app.callback(Output(name, 'figure'), [
Input('cp-stop', 'disabled')
])
def set_base_fig(is_unstoppable):
if not is_unstoppable:
return base_fig
else:
raise PreventUpdate()
@app.callback(extend_func_output, [Input(name + '-clock', 'n_intervals')], extend_func_state)
def graph_update(n, *args):
if not ms.data:
raise PreventUpdate()
return extend_func(n, *args)
extendable_graph_callbacks_done = []
def ExtendableGraph(app : dash.Dash, ms : MachineState, name : str, title : str,
base_fig, extend_func, interval : int = 2000, additional_controls=[], **kwargs):
"""
Get a graph to which new data is added instead of redrawing it completely. Its figure
is first set to `base_fig` and is then updated using `extend_func`
`app` the dash app to add the callbacks to.
`ms` the `PINSoftware.MachineState.MachineState` to use for checking for data.
`name` is the `dash_core_components.Graph` component id.
`title` is the title of the graph, shown at the top of the container.
`base_fig` is the basic figure. This is what the figure property of the `dash_core_components.Graph`
is set to when the page loads and whenever the "Stop" button becomes enabled (this is to reset when the
next acquisition starts) (however this means that it will not reset on new data acquisition for users who
are not controlling the setup at that point! Just keep that in mind).
`extend_func` is the function to call on 'update', this is whenever the interval triggers. Its output
is what is sent to the `dash_core_components.Graph`s `extendData` property by default but can be
changed using the `extend_func_output` keyword argument. It can also get more arguments, the callback
state can be changed by the `extend_func_state` keyword argument.
`interval` is the `dash_core_components.Interval` interval in milliseconds.
`additional_controls` is a list of dash components with should be added in the container,
this is useful when you want to add additional controls to the graph.
"""
if name not in extendable_graph_callbacks_done:
get_extendable_graph_callbacks(app, ms, name, extend_func, base_fig, **kwargs)
extendable_graph_callbacks_done.append(name)
return BaseGraph(app, name, title, interval, additional_controls, base_fig=base_fig) | 0.770378 | 0.272526 |
from revelation.isa import decode
from revelation.instruction import Instruction
import opcode_factory
import pytest
@pytest.mark.parametrize('name,instr',
[('add32', opcode_factory.add32(rd=0, rn=0, rm=0)),
('add16', opcode_factory.add16(rd=0, rn=0, rm=0)),
('add32', opcode_factory.add32(rd=1, rn=1, rm=1)),
('add16', opcode_factory.add16(rd=1, rn=1, rm=1)),
('sub32', opcode_factory.sub32(rd=0, rn=0, rm=0)),
('sub16', opcode_factory.sub16(rd=0, rn=0, rm=0)),
('sub32', opcode_factory.sub32(rd=1, rn=1, rm=1)),
('sub16', opcode_factory.sub16(rd=1, rn=1, rm=1)),
('add32', opcode_factory.add32_immediate(rd=1, rn=0, imm=0b01010101010)),
('add16', opcode_factory.add16_immediate(rd=1, rn=0, imm=0b0101)),
('sub32', opcode_factory.sub32_immediate(rd=1, rn=0, imm=0b01010101010)),
('sub16', opcode_factory.sub16_immediate(rd=1, rn=0, imm=0b0101)),
('and32', opcode_factory.and32(rd=1, rn=1, rm=1)),
('and16', opcode_factory.and16(rd=1, rn=1, rm=1)),
('orr32', opcode_factory.orr32(rd=1, rn=1, rm=1)),
('orr16', opcode_factory.orr16(rd=1, rn=1, rm=1)),
('eor32', opcode_factory.eor32(rd=1, rn=1, rm=1)),
('eor16', opcode_factory.eor16(rd=1, rn=1, rm=1)),
('asr32', opcode_factory.asr32(rd=1, rn=1, rm=1)),
('asr16', opcode_factory.asr16(rd=1, rn=1, rm=1)),
('lsr32', opcode_factory.lsr32(rd=1, rn=1, rm=1)),
('lsr16', opcode_factory.lsr16(rd=1, rn=1, rm=1)),
('lsl32', opcode_factory.lsl32(rd=1, rn=1, rm=1)),
('lsl16', opcode_factory.lsl16(rd=1, rn=1, rm=1)),
('lsrimm16', opcode_factory.lsr16_immediate(rd=1, rn=1, imm=1)),
('lslimm16', opcode_factory.lsl16_immediate(rd=1, rn=1, imm=1)),
('asrimm16', opcode_factory.asr16_immediate(rd=1, rn=1, imm=1)),
('bitrimm16', opcode_factory.bitr16_immediate(rd=1, rn=1, imm=1)),
('lsrimm32', opcode_factory.lsr32_immediate(rd=1, rn=1, imm=1)),
('lslimm32', opcode_factory.lsl32_immediate(rd=1, rn=1, imm=1)),
('asrimm32', opcode_factory.asr32_immediate(rd=1, rn=1, imm=1)),
('bitrimm32', opcode_factory.bitr32_immediate(rd=1, rn=1, imm=1)),
('jr32', opcode_factory.jr32(rn=0)),
('jr16', opcode_factory.jr16(rn=0)),
('jalr32', opcode_factory.jalr32(rn=0)),
('jalr16', opcode_factory.jalr16(rn=0)),
('bcond32', opcode_factory.bcond32(condition=0b1111, imm=0)),
('bcond16', opcode_factory.bcond16(condition=0b1111, imm=0)),
('ldstrpmd32', opcode_factory.ldstrpmd32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)),
('ldstrdisp16', opcode_factory.ldstrdisp16(rd=1, rn=0, imm=0b010, bb=0b11, s=1)),
('ldstrdisp32', opcode_factory.ldstrdisp32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)),
('ldstrpm16', opcode_factory.ldstrpm16(rd=1, rn=0, rm=0, bb=0b11, s=1)),
('ldstrpm32', opcode_factory.ldstrpm32(rd=1, rn=0, rm=0, sub=1, bb=0b11, s=1)),
('ldstrind16', opcode_factory.ldstrind16(rd=1, rn=0, rm=0, bb=0b11, s=1)),
('ldstrind32', opcode_factory.ldstrind32(rd=1, rn=0, rm=0, sub=1, bb=0b11, s=1)),
('testset32', opcode_factory.testset32(rd=1, rn=0, rm=0, sub=1, bb=0b11)),
('fadd16', opcode_factory.fadd16(rd=1, rn=0, rm=0)),
('fsub16', opcode_factory.fsub16(rd=1, rn=0, rm=0)),
('fmul16', opcode_factory.fmul16(rd=1, rn=0, rm=0)),
('fmadd16', opcode_factory.fmadd16(rd=1, rn=0, rm=0)),
('fmsub16', opcode_factory.fmsub16(rd=1, rn=0, rm=0)),
('float16', opcode_factory.float16(rd=1, rn=0, rm=0)),
('fix16', opcode_factory.fix16(rd=1, rn=0, rm=0)),
('fabs16', opcode_factory.fabs16(rd=1, rn=0, rm=0)),
('fadd32', opcode_factory.fadd32(rd=1, rn=0, rm=0)),
('fsub32', opcode_factory.fsub32(rd=1, rn=0, rm=0)),
('fmul32', opcode_factory.fmul32(rd=1, rn=0, rm=0)),
('fmadd32', opcode_factory.fmadd32(rd=1, rn=0, rm=0)),
('fmsub32', opcode_factory.fmsub32(rd=1, rn=0, rm=0)),
('float32', opcode_factory.float32(rd=1, rn=0, rm=0)),
('fix32', opcode_factory.fix32(rd=1, rn=0, rm=0)),
('fabs32', opcode_factory.fabs32(rd=1, rn=0, rm=0)),
('movcond32', opcode_factory.movcond32(condition=0b0000, rd=0, rn=0)),
('movcond16', opcode_factory.movcond16(condition=0b0000, rd=0, rn=0)),
('movtimm32', opcode_factory.movtimm32(rd=0b1111, imm=0)),
('movimm32', opcode_factory.movimm32(rd=0b1111, imm=0)),
('movimm16', opcode_factory.movimm16(rd=0b1111, imm=0)),
('movfs32', opcode_factory.movfs32(rn=0b110, rd='IRET')),
('movfs16', opcode_factory.movfs16(rn=0b110, rd='IRET')),
('movts32', opcode_factory.movts32(rn='IRET', rd=0b011)),
('movts16', opcode_factory.movts16(rn='IRET', rd=0)),
('gie16', opcode_factory.gie16()),
('gid16', opcode_factory.gid16()),
('nop16', opcode_factory.nop16()),
('idle16', opcode_factory.idle16()),
('bkpt16', opcode_factory.bkpt16()),
('mbkpt16', opcode_factory.mbkpt16()),
('sync16', opcode_factory.sync16()),
('rti16', opcode_factory.rti16()),
('wand16', opcode_factory.wand16()),
('trap16', opcode_factory.trap16(trap=0b111111)),
('unimpl', opcode_factory.unimpl()),
])
def test_decode(name, instr):
decoded_name, _ = decode(instr)
assert decoded_name == name
def test_bit32_imm():
instr = Instruction(opcode_factory.bitr32_immediate(rd=0b110110, rn=0b101101, imm=0b11111),
None)
assert instr.imm5 == 0b11111
assert instr.rd == 0b110110
assert instr.rn == 0b101101
instr = Instruction(opcode_factory.lsr32_immediate(rd=0, rn=0, imm=0b01011),
None)
assert instr.imm5 == 0b01011
def test_bit16_imm():
instr = Instruction(opcode_factory.bitr16_immediate(rd=0b110, rn=0b101, imm=0b11111),
None)
assert instr.imm5 == 0b11111
assert instr.rd == 0b110
assert instr.rn == 0b101
instr = Instruction(opcode_factory.lsr16_immediate(rd=0, rn=0, imm=0b01011),
None)
assert instr.imm5 == 0b01011
def test_decode_ldstrpmd32():
instr = opcode_factory.ldstrpmd32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)
name, executefn = decode(instr)
assert Instruction(instr, '').sub == 1
assert Instruction(instr, '').s == 1
assert Instruction(instr, '').size == 0b11
def test_decode_add32_immediate_argument():
instr = Instruction(opcode_factory.add32_immediate(rd=1, rn=0, imm=0b01010101010), '')
assert instr.rd == 1
assert instr.rn == 0
assert instr.imm11 == 0b01010101010
def test_mov_special_registers():
# Note that in the MOV 'special' instructions rd and rn are swapped.
instr = Instruction(opcode_factory.movfs16(rn=0, rd='CONFIG'), '')
assert instr.rd == 0
assert instr.rn == 0
instr = Instruction(opcode_factory.movfs32(rn=0, rd='IRET'), '')
assert instr.rn == 0
assert instr.rd == 8
instr = Instruction(opcode_factory.movts16(rn='CONFIG', rd=0), '')
assert instr.rd == 0
assert instr.rn == 0
instr = Instruction(opcode_factory.movts32(rn='IRET', rd=0), '')
assert instr.rn == 8
assert instr.rd == 0 | revelation/test/test_decode.py | from revelation.isa import decode
from revelation.instruction import Instruction
import opcode_factory
import pytest
@pytest.mark.parametrize('name,instr',
[('add32', opcode_factory.add32(rd=0, rn=0, rm=0)),
('add16', opcode_factory.add16(rd=0, rn=0, rm=0)),
('add32', opcode_factory.add32(rd=1, rn=1, rm=1)),
('add16', opcode_factory.add16(rd=1, rn=1, rm=1)),
('sub32', opcode_factory.sub32(rd=0, rn=0, rm=0)),
('sub16', opcode_factory.sub16(rd=0, rn=0, rm=0)),
('sub32', opcode_factory.sub32(rd=1, rn=1, rm=1)),
('sub16', opcode_factory.sub16(rd=1, rn=1, rm=1)),
('add32', opcode_factory.add32_immediate(rd=1, rn=0, imm=0b01010101010)),
('add16', opcode_factory.add16_immediate(rd=1, rn=0, imm=0b0101)),
('sub32', opcode_factory.sub32_immediate(rd=1, rn=0, imm=0b01010101010)),
('sub16', opcode_factory.sub16_immediate(rd=1, rn=0, imm=0b0101)),
('and32', opcode_factory.and32(rd=1, rn=1, rm=1)),
('and16', opcode_factory.and16(rd=1, rn=1, rm=1)),
('orr32', opcode_factory.orr32(rd=1, rn=1, rm=1)),
('orr16', opcode_factory.orr16(rd=1, rn=1, rm=1)),
('eor32', opcode_factory.eor32(rd=1, rn=1, rm=1)),
('eor16', opcode_factory.eor16(rd=1, rn=1, rm=1)),
('asr32', opcode_factory.asr32(rd=1, rn=1, rm=1)),
('asr16', opcode_factory.asr16(rd=1, rn=1, rm=1)),
('lsr32', opcode_factory.lsr32(rd=1, rn=1, rm=1)),
('lsr16', opcode_factory.lsr16(rd=1, rn=1, rm=1)),
('lsl32', opcode_factory.lsl32(rd=1, rn=1, rm=1)),
('lsl16', opcode_factory.lsl16(rd=1, rn=1, rm=1)),
('lsrimm16', opcode_factory.lsr16_immediate(rd=1, rn=1, imm=1)),
('lslimm16', opcode_factory.lsl16_immediate(rd=1, rn=1, imm=1)),
('asrimm16', opcode_factory.asr16_immediate(rd=1, rn=1, imm=1)),
('bitrimm16', opcode_factory.bitr16_immediate(rd=1, rn=1, imm=1)),
('lsrimm32', opcode_factory.lsr32_immediate(rd=1, rn=1, imm=1)),
('lslimm32', opcode_factory.lsl32_immediate(rd=1, rn=1, imm=1)),
('asrimm32', opcode_factory.asr32_immediate(rd=1, rn=1, imm=1)),
('bitrimm32', opcode_factory.bitr32_immediate(rd=1, rn=1, imm=1)),
('jr32', opcode_factory.jr32(rn=0)),
('jr16', opcode_factory.jr16(rn=0)),
('jalr32', opcode_factory.jalr32(rn=0)),
('jalr16', opcode_factory.jalr16(rn=0)),
('bcond32', opcode_factory.bcond32(condition=0b1111, imm=0)),
('bcond16', opcode_factory.bcond16(condition=0b1111, imm=0)),
('ldstrpmd32', opcode_factory.ldstrpmd32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)),
('ldstrdisp16', opcode_factory.ldstrdisp16(rd=1, rn=0, imm=0b010, bb=0b11, s=1)),
('ldstrdisp32', opcode_factory.ldstrdisp32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)),
('ldstrpm16', opcode_factory.ldstrpm16(rd=1, rn=0, rm=0, bb=0b11, s=1)),
('ldstrpm32', opcode_factory.ldstrpm32(rd=1, rn=0, rm=0, sub=1, bb=0b11, s=1)),
('ldstrind16', opcode_factory.ldstrind16(rd=1, rn=0, rm=0, bb=0b11, s=1)),
('ldstrind32', opcode_factory.ldstrind32(rd=1, rn=0, rm=0, sub=1, bb=0b11, s=1)),
('testset32', opcode_factory.testset32(rd=1, rn=0, rm=0, sub=1, bb=0b11)),
('fadd16', opcode_factory.fadd16(rd=1, rn=0, rm=0)),
('fsub16', opcode_factory.fsub16(rd=1, rn=0, rm=0)),
('fmul16', opcode_factory.fmul16(rd=1, rn=0, rm=0)),
('fmadd16', opcode_factory.fmadd16(rd=1, rn=0, rm=0)),
('fmsub16', opcode_factory.fmsub16(rd=1, rn=0, rm=0)),
('float16', opcode_factory.float16(rd=1, rn=0, rm=0)),
('fix16', opcode_factory.fix16(rd=1, rn=0, rm=0)),
('fabs16', opcode_factory.fabs16(rd=1, rn=0, rm=0)),
('fadd32', opcode_factory.fadd32(rd=1, rn=0, rm=0)),
('fsub32', opcode_factory.fsub32(rd=1, rn=0, rm=0)),
('fmul32', opcode_factory.fmul32(rd=1, rn=0, rm=0)),
('fmadd32', opcode_factory.fmadd32(rd=1, rn=0, rm=0)),
('fmsub32', opcode_factory.fmsub32(rd=1, rn=0, rm=0)),
('float32', opcode_factory.float32(rd=1, rn=0, rm=0)),
('fix32', opcode_factory.fix32(rd=1, rn=0, rm=0)),
('fabs32', opcode_factory.fabs32(rd=1, rn=0, rm=0)),
('movcond32', opcode_factory.movcond32(condition=0b0000, rd=0, rn=0)),
('movcond16', opcode_factory.movcond16(condition=0b0000, rd=0, rn=0)),
('movtimm32', opcode_factory.movtimm32(rd=0b1111, imm=0)),
('movimm32', opcode_factory.movimm32(rd=0b1111, imm=0)),
('movimm16', opcode_factory.movimm16(rd=0b1111, imm=0)),
('movfs32', opcode_factory.movfs32(rn=0b110, rd='IRET')),
('movfs16', opcode_factory.movfs16(rn=0b110, rd='IRET')),
('movts32', opcode_factory.movts32(rn='IRET', rd=0b011)),
('movts16', opcode_factory.movts16(rn='IRET', rd=0)),
('gie16', opcode_factory.gie16()),
('gid16', opcode_factory.gid16()),
('nop16', opcode_factory.nop16()),
('idle16', opcode_factory.idle16()),
('bkpt16', opcode_factory.bkpt16()),
('mbkpt16', opcode_factory.mbkpt16()),
('sync16', opcode_factory.sync16()),
('rti16', opcode_factory.rti16()),
('wand16', opcode_factory.wand16()),
('trap16', opcode_factory.trap16(trap=0b111111)),
('unimpl', opcode_factory.unimpl()),
])
def test_decode(name, instr):
decoded_name, _ = decode(instr)
assert decoded_name == name
def test_bit32_imm():
instr = Instruction(opcode_factory.bitr32_immediate(rd=0b110110, rn=0b101101, imm=0b11111),
None)
assert instr.imm5 == 0b11111
assert instr.rd == 0b110110
assert instr.rn == 0b101101
instr = Instruction(opcode_factory.lsr32_immediate(rd=0, rn=0, imm=0b01011),
None)
assert instr.imm5 == 0b01011
def test_bit16_imm():
instr = Instruction(opcode_factory.bitr16_immediate(rd=0b110, rn=0b101, imm=0b11111),
None)
assert instr.imm5 == 0b11111
assert instr.rd == 0b110
assert instr.rn == 0b101
instr = Instruction(opcode_factory.lsr16_immediate(rd=0, rn=0, imm=0b01011),
None)
assert instr.imm5 == 0b01011
def test_decode_ldstrpmd32():
instr = opcode_factory.ldstrpmd32(rd=1, rn=0, sub=1, imm=0b1010101010, bb=0b11, s=1)
name, executefn = decode(instr)
assert Instruction(instr, '').sub == 1
assert Instruction(instr, '').s == 1
assert Instruction(instr, '').size == 0b11
def test_decode_add32_immediate_argument():
instr = Instruction(opcode_factory.add32_immediate(rd=1, rn=0, imm=0b01010101010), '')
assert instr.rd == 1
assert instr.rn == 0
assert instr.imm11 == 0b01010101010
def test_mov_special_registers():
# Note that in the MOV 'special' instructions rd and rn are swapped.
instr = Instruction(opcode_factory.movfs16(rn=0, rd='CONFIG'), '')
assert instr.rd == 0
assert instr.rn == 0
instr = Instruction(opcode_factory.movfs32(rn=0, rd='IRET'), '')
assert instr.rn == 0
assert instr.rd == 8
instr = Instruction(opcode_factory.movts16(rn='CONFIG', rd=0), '')
assert instr.rd == 0
assert instr.rn == 0
instr = Instruction(opcode_factory.movts32(rn='IRET', rd=0), '')
assert instr.rn == 8
assert instr.rd == 0 | 0.332961 | 0.123577 |
import re
from elram.repository.services import CommandException
class CommandParser:
_commands_mapping = {
'add_attendee': (
r'^(?P<nickname>\w+) vino$',
r'^(?P<nickname>\w+) viene$',
r'^(?P<nickname>\w+) va$',
r'^(?P<nickname>\w+) fue$',
r'^vino (?P<nickname>\w+)$',
r'^viene (?P<nickname>\w+)$',
r'^va (?P<nickname>\w+)$',
r'^fue (?P<nickname>\w+)$',
),
'remove_attendee': (
r'^(?P<nickname>\w+) no vino$',
r'^(?P<nickname>\w+) falto$',
r'^(?P<nickname>\w+) no viene$',
r'^(?P<nickname>\w+) no va$',
r'^(?P<nickname>\w+) no fue$',
r'^no vino (?P<nickname>\w+)$',
r'^no viene (?P<nickname>\w+)$',
r'^no va (?P<nickname>\w+)$',
r'^no fue (?P<nickname>\w+)$',
),
'replace_host': (
r'^organiza (?P<nickname>\w+)$',
r'^organizó (?P<nickname>\w+)$',
r'^la hizo (?P<nickname>\w+)$',
r'^la hace (?P<nickname>\w+)$',
r'^es de (?P<nickname>\w+)$',
),
'add_expense': (
r'^(?P<nickname>\w+) gastó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) gasto (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) gastó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) en (?P<description>.+)$',
r'^(?P<nickname>\w+) gasto (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) en (?P<description>.+)$',
),
'add_payment': (
r'^(?P<nickname>\w+) pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<to_nickname>\w+)$',
r'^(?P<nickname>\w+) pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<to_nickname>\w+)$',
),
'add_refund': (
r'^el fondo pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<nickname>\w+)$',
r'^el fondo pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<nickname>\w+)$',
),
'next_event': (
r'proxima peña$',
r'próxima peña$',
r'proxima pena$',
r'próxima pena$',
),
'previous_event': (
r'peña anterior$',
r'pena anterior$',
),
'find_event': (
r'^mostrame la peña (?P<event_code>\d+)$',
r'^quiero ver la peña (?P<event_code>\d+)$',
r'^ver peña (?P<event_code>\d+)$',
r'^mostrame la pena (?P<event_code>\d+)$',
r'^quiero ver la pena (?P<event_code>\d+)$',
r'^ver pena (?P<event_code>\d+)$',
),
'active_event': (
r'proxima peña$',
r'próxima peña$',
r'proxima pena$',
r'próxima pena$',
)
}
def __call__(self, message):
message = self._clean_message(message)
for command in self._commands_mapping.keys():
result = self._is_command(command, message)
if result is not None:
return result
raise CommandException('mmmm no entendí')
@staticmethod
def _clean_message(message):
return message.lower().strip()
def _is_command(self, command, message):
for patter in self._commands_mapping[command]:
regex = re.compile(patter)
match = regex.search(message)
if not match:
continue
kwargs = {key: match.group(key) for key in regex.groupindex.keys()}
return command, kwargs | elram/conversations/command_parser.py | import re
from elram.repository.services import CommandException
class CommandParser:
_commands_mapping = {
'add_attendee': (
r'^(?P<nickname>\w+) vino$',
r'^(?P<nickname>\w+) viene$',
r'^(?P<nickname>\w+) va$',
r'^(?P<nickname>\w+) fue$',
r'^vino (?P<nickname>\w+)$',
r'^viene (?P<nickname>\w+)$',
r'^va (?P<nickname>\w+)$',
r'^fue (?P<nickname>\w+)$',
),
'remove_attendee': (
r'^(?P<nickname>\w+) no vino$',
r'^(?P<nickname>\w+) falto$',
r'^(?P<nickname>\w+) no viene$',
r'^(?P<nickname>\w+) no va$',
r'^(?P<nickname>\w+) no fue$',
r'^no vino (?P<nickname>\w+)$',
r'^no viene (?P<nickname>\w+)$',
r'^no va (?P<nickname>\w+)$',
r'^no fue (?P<nickname>\w+)$',
),
'replace_host': (
r'^organiza (?P<nickname>\w+)$',
r'^organizó (?P<nickname>\w+)$',
r'^la hizo (?P<nickname>\w+)$',
r'^la hace (?P<nickname>\w+)$',
r'^es de (?P<nickname>\w+)$',
),
'add_expense': (
r'^(?P<nickname>\w+) gastó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) gasto (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) gastó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) en (?P<description>.+)$',
r'^(?P<nickname>\w+) gasto (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) en (?P<description>.+)$',
),
'add_payment': (
r'^(?P<nickname>\w+) pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+))$',
r'^(?P<nickname>\w+) pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<to_nickname>\w+)$',
r'^(?P<nickname>\w+) pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<to_nickname>\w+)$',
),
'add_refund': (
r'^el fondo pagó (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<nickname>\w+)$',
r'^el fondo pago (?P<amount>([1-9][0-9]*\.?[0-9]*)|(\.[0-9]+)) a (?P<nickname>\w+)$',
),
'next_event': (
r'proxima peña$',
r'próxima peña$',
r'proxima pena$',
r'próxima pena$',
),
'previous_event': (
r'peña anterior$',
r'pena anterior$',
),
'find_event': (
r'^mostrame la peña (?P<event_code>\d+)$',
r'^quiero ver la peña (?P<event_code>\d+)$',
r'^ver peña (?P<event_code>\d+)$',
r'^mostrame la pena (?P<event_code>\d+)$',
r'^quiero ver la pena (?P<event_code>\d+)$',
r'^ver pena (?P<event_code>\d+)$',
),
'active_event': (
r'proxima peña$',
r'próxima peña$',
r'proxima pena$',
r'próxima pena$',
)
}
def __call__(self, message):
message = self._clean_message(message)
for command in self._commands_mapping.keys():
result = self._is_command(command, message)
if result is not None:
return result
raise CommandException('mmmm no entendí')
@staticmethod
def _clean_message(message):
return message.lower().strip()
def _is_command(self, command, message):
for patter in self._commands_mapping[command]:
regex = re.compile(patter)
match = regex.search(message)
if not match:
continue
kwargs = {key: match.group(key) for key in regex.groupindex.keys()}
return command, kwargs | 0.400046 | 0.305529 |
import pyautogui, os, psutil, sys, subprocess, cv2, signal, patterns
from time import time
from time import sleep
PROCESS_BNET = "Battle.net.exe"
EXE_BNET = "C:\Program Files (x86)\Battle.net\Battle.net Launcher.exe"
PROCESS_WOW = "Wow.exe"
SECONDS_MAX_WAIT = 15
SECONDS_MAX_WAIT_PROCESS = 30
SECONDS_SLEEP = 15
SECONDS_CHAR_SCREEN = 30
SECONDS_LOOP_WAIT = 1
STATE_INIT = "INIT"
STATE_LAUNCH_WOW = "LAUNCHING_WOW"
STATE_WAIT_WOW = "WAITING_WOW"
STATE_LAUNCH_BNET = "LAUNCHING_BNET"
STATE_WAIT_BNET = "WAITING_BNET"
STATE_CHAR_SCREEN = "CHARACTER_SCREEN"
STATE_REALM_WAIT = "REALM_WAIT"
STATE_REALM_LIST = "REALM_LIST"
STATE_REALM_QUE = "REALM_QUE"
STATE_GAMESERVER_WAIT = "GAMESERVER_WAIT"
STATE_CONNECTING_WAIT = "CONNECTING_WAIT"
STATE_HAPPINESS = r"""
WELCOME HOME, FRIENDS!!!
Also, dying now. Enjoy...
.(O). /
o.` `.o
/_ _ _ _ _ _\
|==|===|==|
|==|===|==|
|_=|===|=_|
.-._.-[]\_____/[]-O
-' /=|=|=|=|=\
|o |o|o|o| o|
|o |o|o|o| o|
|o |o|o|o| o|
'-.|o|o|o|.-'
'--.=====.--'
"""
def main():
state = STATE_INIT
timer = time()
last = timer
timeout = 0
while True:
if time() <= last + SECONDS_LOOP_WAIT:
continue
# print("\nSTATE:", state, "\nEXE_TIME:", (time() - last), "\nTIMER:", (timer - time()), "\n")
last = time()
if state == STATE_INIT:
if not get_is_wow_running():
if get_is_bnet_running() and get_is_bnet_visible():
state = STATE_LAUNCH_WOW
else:
state = STATE_LAUNCH_BNET
else:
if get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
if get_is_gameserver_wait():
state = STATE_GAMESERVER_WAIT
if get_is_realm_que():
state = STATE_REALM_QUE
if get_is_realm_list():
state = STATE_REALM_LIST
if get_is_realm_wait():
state = STATE_REALM_WAIT
if get_is_connecting():
state = STATE_CONNECTING_WAIT
if state == STATE_INIT:
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
timeout = 0
continue
elif state == STATE_LAUNCH_BNET:
if launch_bnet():
state = STATE_WAIT_BNET
timer = time() + SECONDS_MAX_WAIT_PROCESS
continue
elif state == STATE_WAIT_BNET:
if get_is_bnet_running() and get_is_bnet_visible():
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif time() >= timer:
state = STATE_LAUNCH_BNET
continue
elif state == STATE_LAUNCH_WOW:
if (not get_is_bnet_running() or not get_is_bnet_visible()) and not get_is_wow_running():
state = STATE_LAUNCH_BNET
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif launch_wow():
state = STATE_WAIT_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif time() >= timer:
state = STATE_LAUNCH_BNET
continue
elif state == STATE_WAIT_WOW:
if get_is_wow_running():
state = STATE_CONNECTING_WAIT
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
continue
elif state == STATE_CONNECTING_WAIT or state == STATE_REALM_WAIT:
if get_is_connecting() or get_is_realm_wait():
# msleep(SECONDS_SLEEP, "Waiting for connection/realm...")
print("Waiting for connection...")
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_list():
state = STATE_REALM_LIST
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
print("Error waiting for connection/realm listing... Restarting...")
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_REALM_LIST:
if load_realm():
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
print("Error finding realm listing... Restarting...")
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_REALM_QUE:
if get_is_gameserver_wait():
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
elif get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_que():
print("Waiting for realm queue...")
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_GAMESERVER_WAIT:
if get_is_gameserver_wait():
print("Waiting for gameserver...")
timer = time() + SECONDS_MAX_WAIT
elif get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_list():
state = STATE_REALM_LIST
print("Back to realmlist for some reason.")
elif time() >= timer:
print("Error timed out waiting for gameserver... Restarting...")
state = STATE_INIT
continue
elif state == STATE_CHAR_SCREEN:
salvation = get_is_live()
if salvation:
click(salvation)
print(STATE_HAPPINESS)
sys.exit(0)
elif get_is_char_screen():
if timeout == 0:
timeout = time() + 60*30
print("Time till timeout: 30 minutes")
else:
doom = int((timeout - time()))
if doom % 300 == 0:
print("Time till timeout: %s minutes" % int((timeout - time()) / 60))
timer = time() + SECONDS_CHAR_SCREEN
elif not get_is_wow_running():
state = STATE_INIT
elif time() >= timer and not get_is_char_screen():
print("Error confirming character screen... Restarting...")
state = STATE_INIT
elif time() >= timer - (SECONDS_CHAR_SCREEN/2):
print("Losing character screen...")
def msleep(count=1, msg=""):
print("%s - sleeping for %s seconds" % (msg, count) if msg else "Sleeping for %s seconds..." % count)
sleep(count)
def get_is_live():
return find_pattern(patterns.CHAR_SCREEN_LIVE, .7, grayscale=False)
def get_is_bnet_running():
return process_exists(PROCESS_BNET)
def get_is_bnet_visible():
return find_pattern(patterns.BNET_PLAY, .6)
def get_is_wow_running():
return process_exists(PROCESS_WOW)
def get_is_char_screen():
return find_pattern(patterns.CHAR_SCREEN_DOWN, .5) or get_is_live()
def get_is_realm_que():
return find_pattern(patterns.REALM_QUE, .3)
def get_is_gameserver_wait():
return find_pattern(patterns.GAMESERVER_WAIT, .4)
def get_is_realm_list():
return find_pattern(patterns.REALM_LIST, .3)
def get_is_connecting():
return find_pattern(patterns.CONNECTING, .6)
def get_is_realm_wait():
return find_pattern(patterns.REALM_WAIT, .6)
def process_exists(process):
try:
if psutil.pid_exists(process):
return process
except:
for proc in psutil.process_iter():
try:
if proc.name() == process:
return True
except Exception as ex:
print("Error", ex)
return False
def process_kill(process):
try:
os.system("taskkill /f /im " + process + " >NUL")
print("Killing rogue '%s'" % process)
except:
print("Error can't kill '%s'" % process)
def find_pattern(pattern, confidence=.7, grayscale=True):
try:
return pyautogui.locateCenterOnScreen(pattern, grayscale=grayscale, confidence=confidence)
except:
return False
def click(coords):
try:
pyautogui.doubleClick(coords)
except:
print("Error can't click idk weird ...")
sys.exit(1)
def launch_bnet():
if get_is_bnet_running():
process_kill(PROCESS_BNET)
try:
subprocess.Popen([EXE_BNET])
print("Launching %s" % EXE_BNET)
return True
except Exception as ex:
print("Error launching %s" % EXE_BNET, ex)
sys.exit(1)
return False
def launch_wow():
if get_is_wow_running():
process_kill(PROCESS_WOW)
coords = get_is_bnet_visible()
if coords:
click(coords)
print("Launching %s" % PROCESS_WOW)
return True
return False
def load_realm():
coords = get_is_realm_list()
if coords:
click([coords[0] + 30, coords[1] + 60])
print("Logging into Realm")
return True
return False
if __name__ == "__main__":
main() | wow-launcher-script.py |
import pyautogui, os, psutil, sys, subprocess, cv2, signal, patterns
from time import time
from time import sleep
PROCESS_BNET = "Battle.net.exe"
EXE_BNET = "C:\Program Files (x86)\Battle.net\Battle.net Launcher.exe"
PROCESS_WOW = "Wow.exe"
SECONDS_MAX_WAIT = 15
SECONDS_MAX_WAIT_PROCESS = 30
SECONDS_SLEEP = 15
SECONDS_CHAR_SCREEN = 30
SECONDS_LOOP_WAIT = 1
STATE_INIT = "INIT"
STATE_LAUNCH_WOW = "LAUNCHING_WOW"
STATE_WAIT_WOW = "WAITING_WOW"
STATE_LAUNCH_BNET = "LAUNCHING_BNET"
STATE_WAIT_BNET = "WAITING_BNET"
STATE_CHAR_SCREEN = "CHARACTER_SCREEN"
STATE_REALM_WAIT = "REALM_WAIT"
STATE_REALM_LIST = "REALM_LIST"
STATE_REALM_QUE = "REALM_QUE"
STATE_GAMESERVER_WAIT = "GAMESERVER_WAIT"
STATE_CONNECTING_WAIT = "CONNECTING_WAIT"
STATE_HAPPINESS = r"""
WELCOME HOME, FRIENDS!!!
Also, dying now. Enjoy...
.(O). /
o.` `.o
/_ _ _ _ _ _\
|==|===|==|
|==|===|==|
|_=|===|=_|
.-._.-[]\_____/[]-O
-' /=|=|=|=|=\
|o |o|o|o| o|
|o |o|o|o| o|
|o |o|o|o| o|
'-.|o|o|o|.-'
'--.=====.--'
"""
def main():
state = STATE_INIT
timer = time()
last = timer
timeout = 0
while True:
if time() <= last + SECONDS_LOOP_WAIT:
continue
# print("\nSTATE:", state, "\nEXE_TIME:", (time() - last), "\nTIMER:", (timer - time()), "\n")
last = time()
if state == STATE_INIT:
if not get_is_wow_running():
if get_is_bnet_running() and get_is_bnet_visible():
state = STATE_LAUNCH_WOW
else:
state = STATE_LAUNCH_BNET
else:
if get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
if get_is_gameserver_wait():
state = STATE_GAMESERVER_WAIT
if get_is_realm_que():
state = STATE_REALM_QUE
if get_is_realm_list():
state = STATE_REALM_LIST
if get_is_realm_wait():
state = STATE_REALM_WAIT
if get_is_connecting():
state = STATE_CONNECTING_WAIT
if state == STATE_INIT:
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
timeout = 0
continue
elif state == STATE_LAUNCH_BNET:
if launch_bnet():
state = STATE_WAIT_BNET
timer = time() + SECONDS_MAX_WAIT_PROCESS
continue
elif state == STATE_WAIT_BNET:
if get_is_bnet_running() and get_is_bnet_visible():
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif time() >= timer:
state = STATE_LAUNCH_BNET
continue
elif state == STATE_LAUNCH_WOW:
if (not get_is_bnet_running() or not get_is_bnet_visible()) and not get_is_wow_running():
state = STATE_LAUNCH_BNET
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif launch_wow():
state = STATE_WAIT_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
elif time() >= timer:
state = STATE_LAUNCH_BNET
continue
elif state == STATE_WAIT_WOW:
if get_is_wow_running():
state = STATE_CONNECTING_WAIT
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT_PROCESS
continue
elif state == STATE_CONNECTING_WAIT or state == STATE_REALM_WAIT:
if get_is_connecting() or get_is_realm_wait():
# msleep(SECONDS_SLEEP, "Waiting for connection/realm...")
print("Waiting for connection...")
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_list():
state = STATE_REALM_LIST
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
print("Error waiting for connection/realm listing... Restarting...")
state = STATE_LAUNCH_WOW
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_REALM_LIST:
if load_realm():
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
print("Error finding realm listing... Restarting...")
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_REALM_QUE:
if get_is_gameserver_wait():
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
elif get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_que():
print("Waiting for realm queue...")
timer = time() + SECONDS_MAX_WAIT
elif time() >= timer:
state = STATE_GAMESERVER_WAIT
timer = time() + SECONDS_MAX_WAIT
continue
elif state == STATE_GAMESERVER_WAIT:
if get_is_gameserver_wait():
print("Waiting for gameserver...")
timer = time() + SECONDS_MAX_WAIT
elif get_is_char_screen():
print("Found character screen")
state = STATE_CHAR_SCREEN
timer = time() + SECONDS_MAX_WAIT
elif get_is_realm_list():
state = STATE_REALM_LIST
print("Back to realmlist for some reason.")
elif time() >= timer:
print("Error timed out waiting for gameserver... Restarting...")
state = STATE_INIT
continue
elif state == STATE_CHAR_SCREEN:
salvation = get_is_live()
if salvation:
click(salvation)
print(STATE_HAPPINESS)
sys.exit(0)
elif get_is_char_screen():
if timeout == 0:
timeout = time() + 60*30
print("Time till timeout: 30 minutes")
else:
doom = int((timeout - time()))
if doom % 300 == 0:
print("Time till timeout: %s minutes" % int((timeout - time()) / 60))
timer = time() + SECONDS_CHAR_SCREEN
elif not get_is_wow_running():
state = STATE_INIT
elif time() >= timer and not get_is_char_screen():
print("Error confirming character screen... Restarting...")
state = STATE_INIT
elif time() >= timer - (SECONDS_CHAR_SCREEN/2):
print("Losing character screen...")
def msleep(count=1, msg=""):
print("%s - sleeping for %s seconds" % (msg, count) if msg else "Sleeping for %s seconds..." % count)
sleep(count)
def get_is_live():
return find_pattern(patterns.CHAR_SCREEN_LIVE, .7, grayscale=False)
def get_is_bnet_running():
return process_exists(PROCESS_BNET)
def get_is_bnet_visible():
return find_pattern(patterns.BNET_PLAY, .6)
def get_is_wow_running():
return process_exists(PROCESS_WOW)
def get_is_char_screen():
return find_pattern(patterns.CHAR_SCREEN_DOWN, .5) or get_is_live()
def get_is_realm_que():
return find_pattern(patterns.REALM_QUE, .3)
def get_is_gameserver_wait():
return find_pattern(patterns.GAMESERVER_WAIT, .4)
def get_is_realm_list():
return find_pattern(patterns.REALM_LIST, .3)
def get_is_connecting():
return find_pattern(patterns.CONNECTING, .6)
def get_is_realm_wait():
return find_pattern(patterns.REALM_WAIT, .6)
def process_exists(process):
try:
if psutil.pid_exists(process):
return process
except:
for proc in psutil.process_iter():
try:
if proc.name() == process:
return True
except Exception as ex:
print("Error", ex)
return False
def process_kill(process):
try:
os.system("taskkill /f /im " + process + " >NUL")
print("Killing rogue '%s'" % process)
except:
print("Error can't kill '%s'" % process)
def find_pattern(pattern, confidence=.7, grayscale=True):
try:
return pyautogui.locateCenterOnScreen(pattern, grayscale=grayscale, confidence=confidence)
except:
return False
def click(coords):
try:
pyautogui.doubleClick(coords)
except:
print("Error can't click idk weird ...")
sys.exit(1)
def launch_bnet():
if get_is_bnet_running():
process_kill(PROCESS_BNET)
try:
subprocess.Popen([EXE_BNET])
print("Launching %s" % EXE_BNET)
return True
except Exception as ex:
print("Error launching %s" % EXE_BNET, ex)
sys.exit(1)
return False
def launch_wow():
if get_is_wow_running():
process_kill(PROCESS_WOW)
coords = get_is_bnet_visible()
if coords:
click(coords)
print("Launching %s" % PROCESS_WOW)
return True
return False
def load_realm():
coords = get_is_realm_list()
if coords:
click([coords[0] + 30, coords[1] + 60])
print("Logging into Realm")
return True
return False
if __name__ == "__main__":
main() | 0.106935 | 0.0545 |
import pickle
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from source.Dataset.PawsDataset import PawsDataset
class PawsDataModule(pl.LightningDataModule):
"""
Paws DataModule
"""
def __init__(self, params, st1_tokenizer, st2_tokenizer, fold):
super(PawsDataModule, self).__init__()
self.params = params
self.st1_tokenizer = st1_tokenizer
self.st2_tokenizer = st2_tokenizer
self.fold = fold
def prepare_data(self):
with open(self.params.dir + f"samples.pkl", "rb") as dataset_file:
self.samples = pickle.load(dataset_file)
def setup(self, stage=None):
if stage == 'fit':
self.train_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/train.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
self.val_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/val.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
if stage == 'test' or stage is "predict":
self.test_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/val.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.params.batch_size,
shuffle=True,
num_workers=self.params.num_workers
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers
)
def predict_dataloader(self):
return self.test_dataloader() | source/DataModule/PawsDataModule.py | import pickle
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from source.Dataset.PawsDataset import PawsDataset
class PawsDataModule(pl.LightningDataModule):
"""
Paws DataModule
"""
def __init__(self, params, st1_tokenizer, st2_tokenizer, fold):
super(PawsDataModule, self).__init__()
self.params = params
self.st1_tokenizer = st1_tokenizer
self.st2_tokenizer = st2_tokenizer
self.fold = fold
def prepare_data(self):
with open(self.params.dir + f"samples.pkl", "rb") as dataset_file:
self.samples = pickle.load(dataset_file)
def setup(self, stage=None):
if stage == 'fit':
self.train_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/train.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
self.val_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/val.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
if stage == 'test' or stage is "predict":
self.test_dataset = PawsDataset(
samples=self.samples,
ids_path=self.params.dir + f"fold_{self.fold}/val.pkl",
st1_tokenizer=self.st1_tokenizer,
st2_tokenizer=self.st2_tokenizer,
st1_max_length=self.params.st1_max_length,
st2_max_length=self.params.st2_max_length
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.params.batch_size,
shuffle=True,
num_workers=self.params.num_workers
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers
)
def predict_dataloader(self):
return self.test_dataloader() | 0.805823 | 0.286144 |
import inspect
import logging
class logger(object):
def __init__(self, name, level):
"""
Initialize the class with some basic attributes.
"""
self.level = level
self.name = name
self.setLogLevel(self.level)
def setLogLevel(self,level):
try:
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
except:
pass
# create logger
self.logger = logging.getLogger(self.name)
self.logger.setLevel(level)
# create console handler and set level to debug
self.ch = logging.StreamHandler()
self.ch.setLevel(level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
self.ch.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(self.ch)
def debug(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.debug("%s - %s" % (
func.co_name,
message
))
def info(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.info("%s - %s" % (
func.co_name,
message
))
def warning(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.warning("%s - %s" % (
func.co_name,
message
))
def error(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.error("%s - %s" % (
func.co_name,
message
))
def critical(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.critical("%s - %s in %s:%i" % (
func.co_name,
message,
func.co_filename,
func.co_firstlineno
))
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0 | logger.py | import inspect
import logging
class logger(object):
def __init__(self, name, level):
"""
Initialize the class with some basic attributes.
"""
self.level = level
self.name = name
self.setLogLevel(self.level)
def setLogLevel(self,level):
try:
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
except:
pass
# create logger
self.logger = logging.getLogger(self.name)
self.logger.setLevel(level)
# create console handler and set level to debug
self.ch = logging.StreamHandler()
self.ch.setLevel(level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
self.ch.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(self.ch)
def debug(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.debug("%s - %s" % (
func.co_name,
message
))
def info(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.info("%s - %s" % (
func.co_name,
message
))
def warning(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.warning("%s - %s" % (
func.co_name,
message
))
def error(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.error("%s - %s" % (
func.co_name,
message
))
def critical(self, message):
func = inspect.currentframe().f_back.f_code
self.logger.critical("%s - %s in %s:%i" % (
func.co_name,
message,
func.co_filename,
func.co_firstlineno
))
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0 | 0.336876 | 0.063395 |
# [SET_DRESS_CHANGED] [00 00 ]
sm.giveAndEquip(1352601)
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(0)
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
sm.forcedInput(4)
OBJECT_1 = sm.sendNpcController(3000141, -150, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_1, "summon", 0)
OBJECT_2 = sm.sendNpcController(3000115, 200, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_2, "summon", 0)
OBJECT_3 = sm.sendNpcController(3000111, 300, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_3, "summon", 0)
sm.sendDelay(1200)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.showEffect("Effect/BasicEff.img/Kaiser_Transform4_S", 0, 0, 0, 0, OBJECT_1, False, 0)
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("W-what is this madness?!")
sm.setSpeakerID(3000111)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("How could a mere child have that kind of power?!")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("He seems to be unconscious. We are lucky.")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.setSpeakerID(3000111)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("They came out of nowhere. We must eliminate them before more come.")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.showEffect("Effect/Direction10.img/effect/story/BalloonMsg0/1", 1200, 0, -100, -2, -2, False, 0)
sm.sendDelay(900)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.forcedInput(0)
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("He's waking up!")
sm.sendDelay(150)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.forcedAction(497, 0)
sm.showEffect("Skill/6512.img/skill/65121002/effect", 0, 0, 0, -2, -2, False, 0)
sm.playSound("Angelicburster/65121002", 100)
sm.sendDelay(900)
sm.showFieldEffect("demonSlayer/whiteOut", 0)
sm.showNpcSpecialActionByObjectId(OBJECT_2, "die1", 0)
sm.showNpcSpecialActionByObjectId(OBJECT_3, "die1", 0)
sm.sendDelay(1200)
sm.sendNpcController(OBJECT_2, False)
sm.sendNpcController(OBJECT_3, False)
sm.sendDelay(720)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_1, False)
sm.warp(940011050, 0) | scripts/field/angelic_tuto4.py |
# [SET_DRESS_CHANGED] [00 00 ]
sm.giveAndEquip(1352601)
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(0)
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
sm.forcedInput(4)
OBJECT_1 = sm.sendNpcController(3000141, -150, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_1, "summon", 0)
OBJECT_2 = sm.sendNpcController(3000115, 200, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_2, "summon", 0)
OBJECT_3 = sm.sendNpcController(3000111, 300, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_3, "summon", 0)
sm.sendDelay(1200)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.showEffect("Effect/BasicEff.img/Kaiser_Transform4_S", 0, 0, 0, 0, OBJECT_1, False, 0)
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("W-what is this madness?!")
sm.setSpeakerID(3000111)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("How could a mere child have that kind of power?!")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("He seems to be unconscious. We are lucky.")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.setSpeakerID(3000111)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("They came out of nowhere. We must eliminate them before more come.")
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.showEffect("Effect/Direction10.img/effect/story/BalloonMsg0/1", 1200, 0, -100, -2, -2, False, 0)
sm.sendDelay(900)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.forcedInput(0)
sm.setSpeakerID(3000115)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("He's waking up!")
sm.sendDelay(150)
sm.reservedEffect("Effect/Direction10.img/angelicTuto/Scene3")
sm.forcedAction(497, 0)
sm.showEffect("Skill/6512.img/skill/65121002/effect", 0, 0, 0, -2, -2, False, 0)
sm.playSound("Angelicburster/65121002", 100)
sm.sendDelay(900)
sm.showFieldEffect("demonSlayer/whiteOut", 0)
sm.showNpcSpecialActionByObjectId(OBJECT_2, "die1", 0)
sm.showNpcSpecialActionByObjectId(OBJECT_3, "die1", 0)
sm.sendDelay(1200)
sm.sendNpcController(OBJECT_2, False)
sm.sendNpcController(OBJECT_3, False)
sm.sendDelay(720)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_1, False)
sm.warp(940011050, 0) | 0.348645 | 0.198666 |
import numpy as np
import OpenGL.GL as gl
import math
import ctypes
import hashlib
cache = {}
class base(object):
"""Base class for 2d geometries with modelview and projection transforms"""
version = """#version 300 es\n"""
vertex_code = """
uniform mat4 modelview;
uniform mat4 projection;
uniform vec4 objcolor;
in highp vec4 color;
in highp vec2 position;
out highp vec4 v_color;
void main()
{
gl_Position = projection * modelview * vec4(position,0.0,1.0);
v_color = objcolor * color;
} """
fragment_code = """
in highp vec4 v_color;
out highp vec4 f_color;
void main()
{
f_color = v_color;
} """
attributes = { 'color' : 4, 'position' : 2 }
instanceAttributes = {}
primitive = gl.GL_TRIANGLES
srcblend = gl.GL_SRC_ALPHA
dstblend = gl.GL_ONE_MINUS_SRC_ALPHA
program = None
def __init__(self):
global cache
# Cache the program based on the hash of the sahder
codehash = hashlib.sha256(self.vertex_code.encode('utf-8') + self.fragment_code.encode('utf-8')).digest()
if codehash in cache:
self.program = cache[codehash]
else:
self.program = self.loadShaderProgram()
cache[codehash] = self.program
identity = np.eye(4, dtype=np.float32)
self.setModelView(identity);
self.setProjection(identity);
(self.vertexBuffer, self.vertices, self.offsets, self.stride) = self.loadGeometry()
(self.instanceBuffer, self.instances, self.instanceOffsets, self.instanceStride) = self.loadInstances()
self.color = (1,1,1,1)
def __del__(self):
try:
gl.glDeleteBuffers(1, [self.vertexBuffer])
except: pass
def getVertices(self):
"""Override for useful geometry"""
return { 'color' : [], 'position' : [] }
def getInstances(self):
"""Override for instancing"""
return {}
def reloadInstanceData(self):
self.instances = self.loadAttribData(self.instanceBuffer, self.instanceAttributes, self.getInstances())
def loadShaderProgram(self):
# Request a program and shader slots from GPU
program = gl.glCreateProgram()
vertex = gl.glCreateShader(gl.GL_VERTEX_SHADER)
fragment = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
# Set shaders source
gl.glShaderSource(vertex, self.version + self.vertex_code)
gl.glShaderSource(fragment, self.version + self.fragment_code)
# Compile shaders
gl.glCompileShader(vertex)
gl.glCompileShader(fragment)
log = gl.glGetShaderInfoLog(vertex)
if log:
print('Vertex shader')
print(self.vertex_code)
print(log.decode('ascii'))
raise RuntimeError('Shader compiliation failed')
log = gl.glGetShaderInfoLog(fragment)
if log:
print('Fragment shader')
print(self.fragment_code)
print(log.decode('ascii'))
raise RuntimeError('Shader compiliation failed')
# Attach shader objects to the program
gl.glAttachShader(program, vertex)
gl.glAttachShader(program, fragment)
# Build program
gl.glLinkProgram(program)
# Get rid of shaders (no more needed)
gl.glDetachShader(program, vertex)
gl.glDetachShader(program, fragment)
return program
def loadGeometry(self):
attrData = self.getVertices()
return self.loadAttribs(self.attributes, attrData)
def loadInstances(self):
instanceData = self.getInstances()
return self.loadAttribs(self.instanceAttributes, instanceData)
def loadAttribData(self, buffer, attrNames, attrData):
# Make this buffer the default one
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffer)
format = []
for attrib in attrNames:
format.append( (attrib, np.float32, attrNames[attrib]) )
size = None
for attrib, data in attrData.items():
if size == None:
size = len(data)
continue
if size != len(data):
raise RuntimeError('not all attribute arrays have the same length')
data = np.zeros(size, format)
for attrib in attrNames:
if attrData[attrib]:
data[attrib] = attrData[attrib]
# Upload data
gl.glBufferData(gl.GL_ARRAY_BUFFER, data.nbytes, data, gl.GL_DYNAMIC_DRAW)
return size
def loadAttribs(self, attrNames, attrData):
if not attrNames:
return (None, 1, 0, 0)
# Request a buffer slot from GPU
buffer = gl.glGenBuffers(1)
format = []
for attrib in attrNames:
format.append( (attrib, np.float32, attrNames[attrib]) )
data = np.zeros(1, format)
offset = 0
offsets = {}
for attrib in attrNames:
offsets[attrib] = ctypes.c_void_p(offset)
offset += data.dtype[attrib].itemsize
stride = data.strides[0]
# Upload data
if attrNames:
size = self.loadAttribData(buffer, attrNames, attrData)
return buffer, size, offsets, stride
def setModelView(self, M):
self.modelview = M
def setProjection(self, M):
self.projection = M
def render(self):
# Select our shaders
gl.glUseProgram(self.program)
# Use correct modelview
loc = gl.glGetUniformLocation(self.program, "modelview")
gl.glUniformMatrix4fv(loc, 1, False, self.modelview)
# Use correct projection
loc = gl.glGetUniformLocation(self.program, "projection")
gl.glUniformMatrix4fv(loc, 1, False, self.projection)
# Use correct color
loc = gl.glGetUniformLocation(self.program, "objcolor")
gl.glUniform4fv(loc, 1, self.color)
gl.glBlendFunc(self.srcblend, self.dstblend)
for attrib in self.attributes:
loc = gl.glGetAttribLocation(self.program, attrib)
if loc < 0:
raise RuntimeError('Attribute %s not found in program' % attrib)
gl.glEnableVertexAttribArray(loc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexBuffer)
gl.glVertexAttribPointer(loc, self.attributes[attrib], gl.GL_FLOAT, False, self.stride, self.offsets[attrib])
gl.glVertexAttribDivisor(loc, 0);
for attrib in self.instanceAttributes:
loc = gl.glGetAttribLocation(self.program, attrib)
if loc < 0:
raise RuntimeError('Instance attribute %s not found in program' % attrib)
gl.glEnableVertexAttribArray(loc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instanceBuffer)
gl.glVertexAttribPointer(loc, self.instanceAttributes[attrib], gl.GL_FLOAT, False, self.instanceStride, self.instanceOffsets[attrib])
gl.glVertexAttribDivisor(loc, 1);
self.draw()
def draw(self):
gl.glDrawArraysInstanced(self.primitive, 0, self.vertices, self.instances) | geometry/__init__.py | import numpy as np
import OpenGL.GL as gl
import math
import ctypes
import hashlib
cache = {}
class base(object):
"""Base class for 2d geometries with modelview and projection transforms"""
version = """#version 300 es\n"""
vertex_code = """
uniform mat4 modelview;
uniform mat4 projection;
uniform vec4 objcolor;
in highp vec4 color;
in highp vec2 position;
out highp vec4 v_color;
void main()
{
gl_Position = projection * modelview * vec4(position,0.0,1.0);
v_color = objcolor * color;
} """
fragment_code = """
in highp vec4 v_color;
out highp vec4 f_color;
void main()
{
f_color = v_color;
} """
attributes = { 'color' : 4, 'position' : 2 }
instanceAttributes = {}
primitive = gl.GL_TRIANGLES
srcblend = gl.GL_SRC_ALPHA
dstblend = gl.GL_ONE_MINUS_SRC_ALPHA
program = None
def __init__(self):
global cache
# Cache the program based on the hash of the sahder
codehash = hashlib.sha256(self.vertex_code.encode('utf-8') + self.fragment_code.encode('utf-8')).digest()
if codehash in cache:
self.program = cache[codehash]
else:
self.program = self.loadShaderProgram()
cache[codehash] = self.program
identity = np.eye(4, dtype=np.float32)
self.setModelView(identity);
self.setProjection(identity);
(self.vertexBuffer, self.vertices, self.offsets, self.stride) = self.loadGeometry()
(self.instanceBuffer, self.instances, self.instanceOffsets, self.instanceStride) = self.loadInstances()
self.color = (1,1,1,1)
def __del__(self):
try:
gl.glDeleteBuffers(1, [self.vertexBuffer])
except: pass
def getVertices(self):
"""Override for useful geometry"""
return { 'color' : [], 'position' : [] }
def getInstances(self):
"""Override for instancing"""
return {}
def reloadInstanceData(self):
self.instances = self.loadAttribData(self.instanceBuffer, self.instanceAttributes, self.getInstances())
def loadShaderProgram(self):
# Request a program and shader slots from GPU
program = gl.glCreateProgram()
vertex = gl.glCreateShader(gl.GL_VERTEX_SHADER)
fragment = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
# Set shaders source
gl.glShaderSource(vertex, self.version + self.vertex_code)
gl.glShaderSource(fragment, self.version + self.fragment_code)
# Compile shaders
gl.glCompileShader(vertex)
gl.glCompileShader(fragment)
log = gl.glGetShaderInfoLog(vertex)
if log:
print('Vertex shader')
print(self.vertex_code)
print(log.decode('ascii'))
raise RuntimeError('Shader compiliation failed')
log = gl.glGetShaderInfoLog(fragment)
if log:
print('Fragment shader')
print(self.fragment_code)
print(log.decode('ascii'))
raise RuntimeError('Shader compiliation failed')
# Attach shader objects to the program
gl.glAttachShader(program, vertex)
gl.glAttachShader(program, fragment)
# Build program
gl.glLinkProgram(program)
# Get rid of shaders (no more needed)
gl.glDetachShader(program, vertex)
gl.glDetachShader(program, fragment)
return program
def loadGeometry(self):
attrData = self.getVertices()
return self.loadAttribs(self.attributes, attrData)
def loadInstances(self):
instanceData = self.getInstances()
return self.loadAttribs(self.instanceAttributes, instanceData)
def loadAttribData(self, buffer, attrNames, attrData):
# Make this buffer the default one
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffer)
format = []
for attrib in attrNames:
format.append( (attrib, np.float32, attrNames[attrib]) )
size = None
for attrib, data in attrData.items():
if size == None:
size = len(data)
continue
if size != len(data):
raise RuntimeError('not all attribute arrays have the same length')
data = np.zeros(size, format)
for attrib in attrNames:
if attrData[attrib]:
data[attrib] = attrData[attrib]
# Upload data
gl.glBufferData(gl.GL_ARRAY_BUFFER, data.nbytes, data, gl.GL_DYNAMIC_DRAW)
return size
def loadAttribs(self, attrNames, attrData):
if not attrNames:
return (None, 1, 0, 0)
# Request a buffer slot from GPU
buffer = gl.glGenBuffers(1)
format = []
for attrib in attrNames:
format.append( (attrib, np.float32, attrNames[attrib]) )
data = np.zeros(1, format)
offset = 0
offsets = {}
for attrib in attrNames:
offsets[attrib] = ctypes.c_void_p(offset)
offset += data.dtype[attrib].itemsize
stride = data.strides[0]
# Upload data
if attrNames:
size = self.loadAttribData(buffer, attrNames, attrData)
return buffer, size, offsets, stride
def setModelView(self, M):
self.modelview = M
def setProjection(self, M):
self.projection = M
def render(self):
# Select our shaders
gl.glUseProgram(self.program)
# Use correct modelview
loc = gl.glGetUniformLocation(self.program, "modelview")
gl.glUniformMatrix4fv(loc, 1, False, self.modelview)
# Use correct projection
loc = gl.glGetUniformLocation(self.program, "projection")
gl.glUniformMatrix4fv(loc, 1, False, self.projection)
# Use correct color
loc = gl.glGetUniformLocation(self.program, "objcolor")
gl.glUniform4fv(loc, 1, self.color)
gl.glBlendFunc(self.srcblend, self.dstblend)
for attrib in self.attributes:
loc = gl.glGetAttribLocation(self.program, attrib)
if loc < 0:
raise RuntimeError('Attribute %s not found in program' % attrib)
gl.glEnableVertexAttribArray(loc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertexBuffer)
gl.glVertexAttribPointer(loc, self.attributes[attrib], gl.GL_FLOAT, False, self.stride, self.offsets[attrib])
gl.glVertexAttribDivisor(loc, 0);
for attrib in self.instanceAttributes:
loc = gl.glGetAttribLocation(self.program, attrib)
if loc < 0:
raise RuntimeError('Instance attribute %s not found in program' % attrib)
gl.glEnableVertexAttribArray(loc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instanceBuffer)
gl.glVertexAttribPointer(loc, self.instanceAttributes[attrib], gl.GL_FLOAT, False, self.instanceStride, self.instanceOffsets[attrib])
gl.glVertexAttribDivisor(loc, 1);
self.draw()
def draw(self):
gl.glDrawArraysInstanced(self.primitive, 0, self.vertices, self.instances) | 0.694717 | 0.221888 |
import boto3
from services.common import *
iam = boto3.client('iam')
def _process_create_user(event: dict, set_tag: bool = False) -> list:
""" Process CreateUser event. """
user_name = event['responseElements']['user']['userName']
if set_tag is True:
tags = iam.list_user_tags(UserName=user_name)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_user(UserName=user_name,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [user_name]
def _process_create_role(event: dict, set_tag: bool = False) -> list:
""" Process CreateRole event. """
role_name = event['responseElements']['role']['arn'].split(':')[-1]
if set_tag is True:
tags = iam.list_role_tags(RoleName=role_name.split('/')[-1])['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_role(RoleName=role_name.split('/')[-1],
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [role_name]
def _process_create_policy(event: dict, set_tag: bool = False) -> list:
""" Process CreatePolicy event. """
if set_tag is True:
policy_arn = event['responseElements']['policy']['arn']
tags = iam.list_policy_tags(PolicyArn=policy_arn)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_policy(PolicyArn=policy_arn,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [event['responseElements']['policy']['policyName']]
def _process_create_instance_profile(event: dict, set_tag: bool = False) -> list:
""" Process CreateInstanceProfile event. """
instance_profile_name = event['responseElements']['instanceProfile']['instanceProfileName']
if set_tag is True:
tags = iam.list_instance_profile_tags(InstanceProfileName=instance_profile_name)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_instance_profile(InstanceProfileName=instance_profile_name,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [instance_profile_name]
def _process_create_policy_version(event: dict) -> list:
""" Process CreatePolicyVersion event. This function doesn't set tags. """
policy_name = event['requestParameters']['policyArn'].split('/')[-1]
policy_new_version = event['responseElements']['policyVersion']['versionId']
return [f"{policy_name}:{policy_new_version}"]
def _process_create_group(event: dict) -> list:
""" Process CreateGroup event. This function doesn't set tags. """
return [event['responseElements']['group']['groupName']]
def process_event(event: dict) -> dict:
""" Process CloudTrail event for IAM services """
result = {
"resource_id": None,
"identity": get_user_identity(event),
"region": event['awsRegion'],
"source_ip_address": event['sourceIPAddress'],
"event_name": event['eventName'],
"event_source": get_service_name(event)
}
if event['responseElements'] is None:
result['error'] = f"response is None: check CloudTrail event - {event['eventID']}"
return result
set_tag = check_set_mandatory_tag()
if event['eventName'] == "CreateUser":
result['resource_id'] = _process_create_user(event, set_tag)
elif event['eventName'] == "CreateRole":
result['resource_id'] = _process_create_role(event, set_tag)
elif event['eventName'] == "CreatePolicy":
result['resource_id'] = _process_create_policy(event, set_tag)
elif event['eventName'] == "CreateInstanceProfile":
result['resource_id'] = _process_create_instance_profile(event, set_tag)
elif event['eventName'] == "CreatePolicyVersion":
result['resource_id'] = _process_create_policy_version(event)
elif event['eventName'] == "CreateGroup":
result['resource_id'] = _process_create_group(event)
else:
message = f"Cannot process event: {event['eventName']}, eventID: f{event['eventID']}"
result['error'] = message
return result | functions/watcher/services/iam.py | import boto3
from services.common import *
iam = boto3.client('iam')
def _process_create_user(event: dict, set_tag: bool = False) -> list:
""" Process CreateUser event. """
user_name = event['responseElements']['user']['userName']
if set_tag is True:
tags = iam.list_user_tags(UserName=user_name)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_user(UserName=user_name,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [user_name]
def _process_create_role(event: dict, set_tag: bool = False) -> list:
""" Process CreateRole event. """
role_name = event['responseElements']['role']['arn'].split(':')[-1]
if set_tag is True:
tags = iam.list_role_tags(RoleName=role_name.split('/')[-1])['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_role(RoleName=role_name.split('/')[-1],
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [role_name]
def _process_create_policy(event: dict, set_tag: bool = False) -> list:
""" Process CreatePolicy event. """
if set_tag is True:
policy_arn = event['responseElements']['policy']['arn']
tags = iam.list_policy_tags(PolicyArn=policy_arn)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_policy(PolicyArn=policy_arn,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [event['responseElements']['policy']['policyName']]
def _process_create_instance_profile(event: dict, set_tag: bool = False) -> list:
""" Process CreateInstanceProfile event. """
instance_profile_name = event['responseElements']['instanceProfile']['instanceProfileName']
if set_tag is True:
tags = iam.list_instance_profile_tags(InstanceProfileName=instance_profile_name)['Tags']
if check_contain_mandatory_tag_list(tags) is False:
iam.tag_instance_profile(InstanceProfileName=instance_profile_name,
Tags=[{
'Key': 'User',
'Value': get_user_identity(event)
}])
return [instance_profile_name]
def _process_create_policy_version(event: dict) -> list:
""" Process CreatePolicyVersion event. This function doesn't set tags. """
policy_name = event['requestParameters']['policyArn'].split('/')[-1]
policy_new_version = event['responseElements']['policyVersion']['versionId']
return [f"{policy_name}:{policy_new_version}"]
def _process_create_group(event: dict) -> list:
""" Process CreateGroup event. This function doesn't set tags. """
return [event['responseElements']['group']['groupName']]
def process_event(event: dict) -> dict:
""" Process CloudTrail event for IAM services """
result = {
"resource_id": None,
"identity": get_user_identity(event),
"region": event['awsRegion'],
"source_ip_address": event['sourceIPAddress'],
"event_name": event['eventName'],
"event_source": get_service_name(event)
}
if event['responseElements'] is None:
result['error'] = f"response is None: check CloudTrail event - {event['eventID']}"
return result
set_tag = check_set_mandatory_tag()
if event['eventName'] == "CreateUser":
result['resource_id'] = _process_create_user(event, set_tag)
elif event['eventName'] == "CreateRole":
result['resource_id'] = _process_create_role(event, set_tag)
elif event['eventName'] == "CreatePolicy":
result['resource_id'] = _process_create_policy(event, set_tag)
elif event['eventName'] == "CreateInstanceProfile":
result['resource_id'] = _process_create_instance_profile(event, set_tag)
elif event['eventName'] == "CreatePolicyVersion":
result['resource_id'] = _process_create_policy_version(event)
elif event['eventName'] == "CreateGroup":
result['resource_id'] = _process_create_group(event)
else:
message = f"Cannot process event: {event['eventName']}, eventID: f{event['eventID']}"
result['error'] = message
return result | 0.375592 | 0.152379 |
import numpy as np
import datetime
from matplotlib import pyplot as plt
from betellib import tweet, build_string, get_mags_from_AAVSO
def make_plot(days_ago, dates, mag):
print('Making plot...')
time_span = np.max(dates) - np.min(dates)
min_plot = 0
max_plot = 1.4
x_days = 2000
# Make bins
bin_width = 10
nights = np.arange(0, max(days_ago), bin_width)
bin_mags = []
errors = []
for night in nights:
selector = np.where((days_ago<night+bin_width) & (days_ago>night))
n_obs = np.size(mag[selector])
flux = np.mean(mag[selector])
error = np.std(mag[selector]) / np.sqrt(n_obs)
if error > 0.2:
error = 0
bin_mags.append(flux)
errors.append(error)
print(night, flux, error, n_obs, np.std(mag[selector]))
# Convert magnitudes to fluxes
bin_mags = np.array(bin_mags)
flux = 1 / (10**(0.4 * (bin_mags - baseline_mag)))
print(flux)
# Convert days to digital years
date = datetime.datetime.now()
digi_year = (float(date.strftime("%j"))-1) / 366 + float(date.strftime("%Y"))
days = nights+bin_width/2
years_before = digi_year - (days / 365.2524)
fig, ax = plt.subplots()
plt.errorbar(years_before, flux, yerr=errors, fmt='.k')
plt.xlabel('Year')
plt.ylabel('Normalized flux')
mid = np.median(mag)
plt.ylim(min_plot, max_plot)
plt.xlim(2015, digi_year+0.25)
date_text = datetime.datetime.now().strftime("%d %b %Y")
plt.text(2015.1, 0.03, 'AAVSO visual (by-eye) 10-day bins. Update: '+date_text)
plt.savefig(plot_file, bbox_inches='tight', dpi=300)
print('Plot made')
# Pull the last 10 pages from AAVSO and collate the dates and mags
plot_file = 'plot5y.png'
url_base = 'https://www.aavso.org/apps/webobs/results/?star=betelgeuse&num_results=200&obs_types=vis&page='
baseline_mag = 0.5
pages = np.arange(1, 25, 1)
all_dates = np.array([])
all_mags = np.array([])
for page in pages:
url = url_base + str(page)
print(url)
dates, mags = get_mags_from_AAVSO(url)
all_dates = np.concatenate((all_dates, dates))
all_mags = np.concatenate((all_mags, mags))
dates = all_dates
mags = all_mags
days_ago = np.max(dates) - dates
data_last24hrs = np.where(days_ago<1)
mean_last24hrs = np.median(mags[data_last24hrs])
flux = 1 / (10**(0.4 * (mean_last24hrs - baseline_mag)))
percentage = str(int(round(flux * 100, 0)))
text = "Now at " + percentage + r"% of my usual brightness! #Betelgeuse"
print(text)
if text is not None:
make_plot(days_ago, dates, mags)
tweet(text, plot_file) | betel5y.py | import numpy as np
import datetime
from matplotlib import pyplot as plt
from betellib import tweet, build_string, get_mags_from_AAVSO
def make_plot(days_ago, dates, mag):
print('Making plot...')
time_span = np.max(dates) - np.min(dates)
min_plot = 0
max_plot = 1.4
x_days = 2000
# Make bins
bin_width = 10
nights = np.arange(0, max(days_ago), bin_width)
bin_mags = []
errors = []
for night in nights:
selector = np.where((days_ago<night+bin_width) & (days_ago>night))
n_obs = np.size(mag[selector])
flux = np.mean(mag[selector])
error = np.std(mag[selector]) / np.sqrt(n_obs)
if error > 0.2:
error = 0
bin_mags.append(flux)
errors.append(error)
print(night, flux, error, n_obs, np.std(mag[selector]))
# Convert magnitudes to fluxes
bin_mags = np.array(bin_mags)
flux = 1 / (10**(0.4 * (bin_mags - baseline_mag)))
print(flux)
# Convert days to digital years
date = datetime.datetime.now()
digi_year = (float(date.strftime("%j"))-1) / 366 + float(date.strftime("%Y"))
days = nights+bin_width/2
years_before = digi_year - (days / 365.2524)
fig, ax = plt.subplots()
plt.errorbar(years_before, flux, yerr=errors, fmt='.k')
plt.xlabel('Year')
plt.ylabel('Normalized flux')
mid = np.median(mag)
plt.ylim(min_plot, max_plot)
plt.xlim(2015, digi_year+0.25)
date_text = datetime.datetime.now().strftime("%d %b %Y")
plt.text(2015.1, 0.03, 'AAVSO visual (by-eye) 10-day bins. Update: '+date_text)
plt.savefig(plot_file, bbox_inches='tight', dpi=300)
print('Plot made')
# Pull the last 10 pages from AAVSO and collate the dates and mags
plot_file = 'plot5y.png'
url_base = 'https://www.aavso.org/apps/webobs/results/?star=betelgeuse&num_results=200&obs_types=vis&page='
baseline_mag = 0.5
pages = np.arange(1, 25, 1)
all_dates = np.array([])
all_mags = np.array([])
for page in pages:
url = url_base + str(page)
print(url)
dates, mags = get_mags_from_AAVSO(url)
all_dates = np.concatenate((all_dates, dates))
all_mags = np.concatenate((all_mags, mags))
dates = all_dates
mags = all_mags
days_ago = np.max(dates) - dates
data_last24hrs = np.where(days_ago<1)
mean_last24hrs = np.median(mags[data_last24hrs])
flux = 1 / (10**(0.4 * (mean_last24hrs - baseline_mag)))
percentage = str(int(round(flux * 100, 0)))
text = "Now at " + percentage + r"% of my usual brightness! #Betelgeuse"
print(text)
if text is not None:
make_plot(days_ago, dates, mags)
tweet(text, plot_file) | 0.616705 | 0.377512 |
from __future__ import annotations
from dataclasses import dataclass, field
from itertools import chain, count, repeat
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple
from dataslots import with_slots
import tsim.core.index as Index
from tsim.core.entity import DeleteResult, EntityRef
from tsim.core.geometry import (BoundingRect, Point, Polygon, Vector,
calc_bounding_rect, line_intersection,
midpoint, point_in_polygon)
from tsim.core.network.entity import NetworkEntity
from tsim.core.network.intersection import Intersection, LaneConnection
from tsim.core.network.lane import LANE_WIDTH, LaneRef
from tsim.core.network.way import Endpoint, OrientedWay, Way
from tsim.utils.cachedproperty import add_cached, cached_property
@add_cached
class Node(NetworkEntity):
"""A node of the network.
A `Node` can be the endpoint of a `Way` or a junction of 3 or more ways. A
node that connects two ways can be dissolved into a waypoint with the
`dissolve` method, unless the two ways have different lane configurations
and can't be merged.
"""
__slots__ = 'position', 'level', 'starts', 'ends'
position: Point
level: int
starts: List[EntityRef[Way]]
ends: List[EntityRef[Way]]
def __init__(self, position: Point, level: int = 0):
self.position = position
self.level = level
self.starts = []
self.ends = []
super().__init__()
@cached_property
def geometry(self) -> NodeGeometry:
"""Get the geometry info for the node."""
return NodeGeometry(self)
@cached_property
def intersection(self) -> Intersection:
"""Get intersection info for the node."""
return Intersection(self)
@property
def neighbors(self) -> Iterable[NetworkEntity]:
"""Get iterable with entities directly connected to this node."""
return self.ways
@property
def polygon(self) -> Polygon:
"""Get polygon from the node geometry."""
return self.geometry.polygon
@property
def max_lanes(self) -> int:
"""Maximum number of lanes in incident ways."""
return max(e.value.total_lane_count
for e in chain(self.starts, self.ends))
@property
def total_way_connections(self) -> int:
"""Get number of way connections to this node."""
return len(self.starts) + len(self.ends)
@property
def oriented_ways(self) -> Iterator[OrientedWay]:
"""Get incident ways with orentation (endpoint)."""
return (OrientedWay.build(r.value, e) for r, e in chain(
zip(self.starts, repeat(Endpoint.START)),
zip(self.ends, repeat(Endpoint.END))))
@property
def ways(self) -> Set[Way]:
"""Get all incident ways."""
return {r.value for r in chain(self.starts, self.ends)}
def calc_bounding_rect(self,
accumulated: BoundingRect = None) -> BoundingRect:
"""Calculate the bounding rect of the node."""
return self.geometry.calc_bounding_rect(accumulated)
def distance(self, point: Point, squared: bool = False) -> float:
"""Calculate distance from the node to a point."""
if squared:
return self.position.distance_squared(point)
return self.position.distance(point)
def way_connections(self, source: OrientedWay) -> Set[OrientedWay]:
"""Get the outgoing way connections coming from `source`."""
connections = self.intersection.way_connections.get(source, None)
return connections if connections is not None else set()
def sorted_ways(self) -> List[OrientedWay]:
"""Get incident ways sorted in counterclockwise order."""
return sorted(
self.oriented_ways,
key=lambda t: (t.way.direction_from_node(self, t.endpoint)
.sorting_key()))
def ways_closest_to(self, oriented_way: OrientedWay) \
-> Tuple[OrientedWay, OrientedWay]:
"""Get incident way closest to given way in each direction.
Returns a tuple where the first value is the closest way in the
clockwise direction and the second in the counterclockwise direction.
If the way passed is the only way incident to the node, it will be
returned as both values in the tuple. If there is only one other way,
it will likewise be on both values.
"""
sorted_ways = self.sorted_ways()
index = sorted_ways.index(oriented_way)
return (sorted_ways[index - 1],
sorted_ways[(index + 1) % len(sorted_ways)])
def get_lane_connection(self, source: LaneRef, dest: OrientedWay) \
-> Optional[LaneConnection]:
"""Get the lane connection leading to the given destination.
The source is a LaneRef, representing the current position for an
agent. The destination is an OrientedWay, considering there's no need
to reach the destination way on a specific lane. This method tries to
get a connection from the source lane, but may return a connection from
a different lane if there is no connection from the given lane.
Useful navigating a path that does not contain lane information.
"""
return self.intersection.connection_map.get((source, dest), None)
def dissolve(self, delete_if_dissolved=False):
"""Remove a node joining the two ways it connects."""
two_ways = len(self.starts) + len(self.ends) == 2
loops = (self.starts and self.ends and
self.starts[0].value is self.ends[0].value)
if not two_ways or loops:
raise ValueError(
'Can only dissolve nodes connected to exactly two ways.')
ways = [r.value for r in chain(self.ends, self.starts)]
assert len(ways) == 2
if any(not w.is_valid for w in ways):
raise ValueError(
'Can only dissolve nodes connected to valid ways.')
start, end = (w.other(self) for w in ways)
if not self.level == start.level == end.level:
raise ValueError('Can not dissolve nodes in different levels.')
same_dir = (((ways[0].end is self) and (ways[1].start is self)) or
((ways[0].start is self) and (ways[1].end is self)))
if ((same_dir and ways[0].lane_count != ways[1].lane_count)
or (not same_dir
and ways[0].lane_count != ways[1].swapped_lane_count)):
raise ValueError('Can not dissolve nodes with lane changes.')
waypoints1 = list(ways[0].waypoints if ways[0].end is self
else reversed(ways[0].waypoints))
waypoints2 = list(ways[1].waypoints if ways[1].start is self
else reversed(ways[1].waypoints))
# Only add node as waypoint if it's far enough from neighbors
if waypoints1 and waypoints2 and any(
p1.close_to(p2, threshold=0.5)
for p1, p2 in ((waypoints1[-1], self.position),
(waypoints2[0], self.position))):
waypoint = []
else:
waypoint = [self.position]
waypoints = chain(waypoints1, waypoint, waypoints2)
ways[0].disconnect(start)
ways[1].disconnect(end)
lane_count = (ways[0].lane_count if ways[0].end is self
else ways[0].swapped_lane_count)
way = Way(start, end, lane_count=lane_count,
waypoints=tuple(waypoints))
way.xid = ways[0].xid if ways[0].xid is not None else ways[1].xid
if delete_if_dissolved:
Index.INSTANCE.delete(self)
def clear_intersecting_waypoints(self) -> int:
"""Clear waypoints from incident ways intersecting the node polygon.
With each waypoint removed, the geometry is updated for the node and
for the incident ways. Returns the number of waypoints cleared.
"""
def clear_waypoint():
for way in self.ways:
if not way.waypoints:
continue
for index in (0, -1):
if point_in_polygon(way.waypoints[index], self.polygon):
waypoints = list(way.waypoints)
del waypoints[index]
way.waypoints = tuple(waypoints)
self.clear_cache(True)
return True
return False
for i in count():
if not clear_waypoint():
return i
def on_delete(self) -> DeleteResult:
"""Disconnect this node from the network.
Returns the ways that must be disconnected to free this node.
"""
for way in map(lambda r: r.value, self.starts):
way.start = None
for way in map(lambda r: r.value, self.ends):
way.end = None
to_delete = {r.value for r in set(chain(self.starts, self.ends))}
Index.INSTANCE.rebuild_path_map()
return DeleteResult(to_delete, ())
@with_slots
@dataclass(eq=False)
class NodeGeometry:
"""Information on the geometric shape of a node.
The points that form the shape of the node, calculated from the ways that
are adjacent to this node.
"""
node_ref: EntityRef[Node]
way_indexes: Dict[OrientedWay, int] = field(init=False)
way_distances: List[float] = field(init=False)
polygon: Polygon = field(init=False, default_factory=list)
def __post_init__(self) -> NodeGeometry:
"""Create a new `NodeGeometry` instance for the given node."""
if not isinstance(self.node_ref, EntityRef):
self.node_ref = EntityRef(self.node_ref)
node = self.node_ref()
ways = node.sorted_ways()
self.way_indexes = {w: i for i, w in enumerate(ways)}
self.way_distances = [None for _ in ways]
self.polygon = build_polygon(node, ways, self.way_distances)
@property
def node(self):
"""Get the referenced node."""
return self.node_ref()
def distance(self, oriented_way: OrientedWay) -> float:
"""Get distance from node center to where way should start or end."""
return self.way_distances[self.way_indexes[oriented_way]]
def calc_bounding_rect(self,
accumulated: BoundingRect = None) -> BoundingRect:
"""Calculate the bounding rect of the node."""
if not self.polygon:
return self.node.position.calc_bounding_rect(accumulated)
return calc_bounding_rect(self.polygon, accumulated)
def build_polygon(node: Node, ways: List[OrientedWay],
way_distances: List[float]) -> Polygon:
"""Calculate points for the geometric bounds of the node."""
polygon = [None] * (2 * len(ways))
directions = tuple(w().direction_from_node(node, e).normalized()
for w, e in ways)
for i, (way, _) in enumerate(ways):
half_widths = tuple(w().total_lane_count * LANE_WIDTH / 2
for w in (ways[i - 1].way_ref, way))
# Points relative to the node position.
points = (directions[i - 1].rotated_left() * half_widths[0],
directions[i].rotated_right() * half_widths[1])
try:
point = line_intersection(points[0], directions[i - 1],
points[1], directions[i])
proportion = (points[0].distance(point)
/ points[0].distance(points[1]))
if ((len(directions) == 2 and proportion > 2.0)
or proportion > 5.0):
point = midpoint(*points)
except ZeroDivisionError:
point = midpoint(*points)
polygon[2 * i - 1] = point
for i, (_, direction) in enumerate(zip(ways, directions)):
farthest: Vector = max(
polygon[2 * i - 1], polygon[2 * i + 1],
key=lambda v, d=direction: v.dot_product(d))
projection, reflection = farthest.projection_reflection(direction)
way_distances[i] = projection.norm()
polygon[2 * i] = reflection
return [p + node.position for p in polygon] | tsim/core/network/node.py |
from __future__ import annotations
from dataclasses import dataclass, field
from itertools import chain, count, repeat
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple
from dataslots import with_slots
import tsim.core.index as Index
from tsim.core.entity import DeleteResult, EntityRef
from tsim.core.geometry import (BoundingRect, Point, Polygon, Vector,
calc_bounding_rect, line_intersection,
midpoint, point_in_polygon)
from tsim.core.network.entity import NetworkEntity
from tsim.core.network.intersection import Intersection, LaneConnection
from tsim.core.network.lane import LANE_WIDTH, LaneRef
from tsim.core.network.way import Endpoint, OrientedWay, Way
from tsim.utils.cachedproperty import add_cached, cached_property
@add_cached
class Node(NetworkEntity):
"""A node of the network.
A `Node` can be the endpoint of a `Way` or a junction of 3 or more ways. A
node that connects two ways can be dissolved into a waypoint with the
`dissolve` method, unless the two ways have different lane configurations
and can't be merged.
"""
__slots__ = 'position', 'level', 'starts', 'ends'
position: Point
level: int
starts: List[EntityRef[Way]]
ends: List[EntityRef[Way]]
def __init__(self, position: Point, level: int = 0):
self.position = position
self.level = level
self.starts = []
self.ends = []
super().__init__()
@cached_property
def geometry(self) -> NodeGeometry:
"""Get the geometry info for the node."""
return NodeGeometry(self)
@cached_property
def intersection(self) -> Intersection:
"""Get intersection info for the node."""
return Intersection(self)
@property
def neighbors(self) -> Iterable[NetworkEntity]:
"""Get iterable with entities directly connected to this node."""
return self.ways
@property
def polygon(self) -> Polygon:
"""Get polygon from the node geometry."""
return self.geometry.polygon
@property
def max_lanes(self) -> int:
"""Maximum number of lanes in incident ways."""
return max(e.value.total_lane_count
for e in chain(self.starts, self.ends))
@property
def total_way_connections(self) -> int:
"""Get number of way connections to this node."""
return len(self.starts) + len(self.ends)
@property
def oriented_ways(self) -> Iterator[OrientedWay]:
"""Get incident ways with orentation (endpoint)."""
return (OrientedWay.build(r.value, e) for r, e in chain(
zip(self.starts, repeat(Endpoint.START)),
zip(self.ends, repeat(Endpoint.END))))
@property
def ways(self) -> Set[Way]:
"""Get all incident ways."""
return {r.value for r in chain(self.starts, self.ends)}
def calc_bounding_rect(self,
accumulated: BoundingRect = None) -> BoundingRect:
"""Calculate the bounding rect of the node."""
return self.geometry.calc_bounding_rect(accumulated)
def distance(self, point: Point, squared: bool = False) -> float:
"""Calculate distance from the node to a point."""
if squared:
return self.position.distance_squared(point)
return self.position.distance(point)
def way_connections(self, source: OrientedWay) -> Set[OrientedWay]:
"""Get the outgoing way connections coming from `source`."""
connections = self.intersection.way_connections.get(source, None)
return connections if connections is not None else set()
def sorted_ways(self) -> List[OrientedWay]:
"""Get incident ways sorted in counterclockwise order."""
return sorted(
self.oriented_ways,
key=lambda t: (t.way.direction_from_node(self, t.endpoint)
.sorting_key()))
def ways_closest_to(self, oriented_way: OrientedWay) \
-> Tuple[OrientedWay, OrientedWay]:
"""Get incident way closest to given way in each direction.
Returns a tuple where the first value is the closest way in the
clockwise direction and the second in the counterclockwise direction.
If the way passed is the only way incident to the node, it will be
returned as both values in the tuple. If there is only one other way,
it will likewise be on both values.
"""
sorted_ways = self.sorted_ways()
index = sorted_ways.index(oriented_way)
return (sorted_ways[index - 1],
sorted_ways[(index + 1) % len(sorted_ways)])
def get_lane_connection(self, source: LaneRef, dest: OrientedWay) \
-> Optional[LaneConnection]:
"""Get the lane connection leading to the given destination.
The source is a LaneRef, representing the current position for an
agent. The destination is an OrientedWay, considering there's no need
to reach the destination way on a specific lane. This method tries to
get a connection from the source lane, but may return a connection from
a different lane if there is no connection from the given lane.
Useful navigating a path that does not contain lane information.
"""
return self.intersection.connection_map.get((source, dest), None)
def dissolve(self, delete_if_dissolved=False):
"""Remove a node joining the two ways it connects."""
two_ways = len(self.starts) + len(self.ends) == 2
loops = (self.starts and self.ends and
self.starts[0].value is self.ends[0].value)
if not two_ways or loops:
raise ValueError(
'Can only dissolve nodes connected to exactly two ways.')
ways = [r.value for r in chain(self.ends, self.starts)]
assert len(ways) == 2
if any(not w.is_valid for w in ways):
raise ValueError(
'Can only dissolve nodes connected to valid ways.')
start, end = (w.other(self) for w in ways)
if not self.level == start.level == end.level:
raise ValueError('Can not dissolve nodes in different levels.')
same_dir = (((ways[0].end is self) and (ways[1].start is self)) or
((ways[0].start is self) and (ways[1].end is self)))
if ((same_dir and ways[0].lane_count != ways[1].lane_count)
or (not same_dir
and ways[0].lane_count != ways[1].swapped_lane_count)):
raise ValueError('Can not dissolve nodes with lane changes.')
waypoints1 = list(ways[0].waypoints if ways[0].end is self
else reversed(ways[0].waypoints))
waypoints2 = list(ways[1].waypoints if ways[1].start is self
else reversed(ways[1].waypoints))
# Only add node as waypoint if it's far enough from neighbors
if waypoints1 and waypoints2 and any(
p1.close_to(p2, threshold=0.5)
for p1, p2 in ((waypoints1[-1], self.position),
(waypoints2[0], self.position))):
waypoint = []
else:
waypoint = [self.position]
waypoints = chain(waypoints1, waypoint, waypoints2)
ways[0].disconnect(start)
ways[1].disconnect(end)
lane_count = (ways[0].lane_count if ways[0].end is self
else ways[0].swapped_lane_count)
way = Way(start, end, lane_count=lane_count,
waypoints=tuple(waypoints))
way.xid = ways[0].xid if ways[0].xid is not None else ways[1].xid
if delete_if_dissolved:
Index.INSTANCE.delete(self)
def clear_intersecting_waypoints(self) -> int:
"""Clear waypoints from incident ways intersecting the node polygon.
With each waypoint removed, the geometry is updated for the node and
for the incident ways. Returns the number of waypoints cleared.
"""
def clear_waypoint():
for way in self.ways:
if not way.waypoints:
continue
for index in (0, -1):
if point_in_polygon(way.waypoints[index], self.polygon):
waypoints = list(way.waypoints)
del waypoints[index]
way.waypoints = tuple(waypoints)
self.clear_cache(True)
return True
return False
for i in count():
if not clear_waypoint():
return i
def on_delete(self) -> DeleteResult:
"""Disconnect this node from the network.
Returns the ways that must be disconnected to free this node.
"""
for way in map(lambda r: r.value, self.starts):
way.start = None
for way in map(lambda r: r.value, self.ends):
way.end = None
to_delete = {r.value for r in set(chain(self.starts, self.ends))}
Index.INSTANCE.rebuild_path_map()
return DeleteResult(to_delete, ())
@with_slots
@dataclass(eq=False)
class NodeGeometry:
"""Information on the geometric shape of a node.
The points that form the shape of the node, calculated from the ways that
are adjacent to this node.
"""
node_ref: EntityRef[Node]
way_indexes: Dict[OrientedWay, int] = field(init=False)
way_distances: List[float] = field(init=False)
polygon: Polygon = field(init=False, default_factory=list)
def __post_init__(self) -> NodeGeometry:
"""Create a new `NodeGeometry` instance for the given node."""
if not isinstance(self.node_ref, EntityRef):
self.node_ref = EntityRef(self.node_ref)
node = self.node_ref()
ways = node.sorted_ways()
self.way_indexes = {w: i for i, w in enumerate(ways)}
self.way_distances = [None for _ in ways]
self.polygon = build_polygon(node, ways, self.way_distances)
@property
def node(self):
"""Get the referenced node."""
return self.node_ref()
def distance(self, oriented_way: OrientedWay) -> float:
"""Get distance from node center to where way should start or end."""
return self.way_distances[self.way_indexes[oriented_way]]
def calc_bounding_rect(self,
accumulated: BoundingRect = None) -> BoundingRect:
"""Calculate the bounding rect of the node."""
if not self.polygon:
return self.node.position.calc_bounding_rect(accumulated)
return calc_bounding_rect(self.polygon, accumulated)
def build_polygon(node: Node, ways: List[OrientedWay],
way_distances: List[float]) -> Polygon:
"""Calculate points for the geometric bounds of the node."""
polygon = [None] * (2 * len(ways))
directions = tuple(w().direction_from_node(node, e).normalized()
for w, e in ways)
for i, (way, _) in enumerate(ways):
half_widths = tuple(w().total_lane_count * LANE_WIDTH / 2
for w in (ways[i - 1].way_ref, way))
# Points relative to the node position.
points = (directions[i - 1].rotated_left() * half_widths[0],
directions[i].rotated_right() * half_widths[1])
try:
point = line_intersection(points[0], directions[i - 1],
points[1], directions[i])
proportion = (points[0].distance(point)
/ points[0].distance(points[1]))
if ((len(directions) == 2 and proportion > 2.0)
or proportion > 5.0):
point = midpoint(*points)
except ZeroDivisionError:
point = midpoint(*points)
polygon[2 * i - 1] = point
for i, (_, direction) in enumerate(zip(ways, directions)):
farthest: Vector = max(
polygon[2 * i - 1], polygon[2 * i + 1],
key=lambda v, d=direction: v.dot_product(d))
projection, reflection = farthest.projection_reflection(direction)
way_distances[i] = projection.norm()
polygon[2 * i] = reflection
return [p + node.position for p in polygon] | 0.954223 | 0.464841 |
from collections import deque
class Solution:
# The given root is the root of the Binary Tree
# Return the root of the generated BST
def InOrder(self, root, inorder):
# code here
if(root != None):
self.InOrder(root.left, inorder)
inorder.append(root.data)
self.InOrder(root.right, inorder)
# Helper function that copies contents of sorted array
# to Binary tree
def arrayToBST(self, arr, root):
# Base Case
if root is None:
return
# First update the left subtree
self.arrayToBST(arr, root.left)
# now update root's data delete the value from array
root.data = arr[0]
arr.pop(0)
# Finally update the right subtree
self.arrayToBST(arr, root.right)
# This function converts a given binary tree to BST
def binaryTreeToBST(self, root):
# Base Case: Tree is empty
if root is None:
return
# Create the temp array and store the inorder traversal
# of tree
arr = []
self.InOrder(root, arr)
# Sort the array
arr.sort()
# copy array elements back to binary tree
self.arrayToBST(arr, root)
return root
# {
# Driver Code Starts
# Initial Template for Python 3
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
# Corner Case
if(len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size+1
# Starting from the second element
i = 1
while(size > 0 and i < len(ip)):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size-1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if(currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size+1
# For the right child
i = i+1
if(i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if(currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size+1
i = i+1
return root
def printInorder(root):
if root is None:
return
printInorder(root.left)
print(root.data, end=' ')
printInorder(root.right)
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
Solution().binaryTreeToBST(root)
printInorder(root)
print()
# } Driver Code Ends | Competitive Programming/Binary Search Trees/Convert Binary tree into BST.py | from collections import deque
class Solution:
# The given root is the root of the Binary Tree
# Return the root of the generated BST
def InOrder(self, root, inorder):
# code here
if(root != None):
self.InOrder(root.left, inorder)
inorder.append(root.data)
self.InOrder(root.right, inorder)
# Helper function that copies contents of sorted array
# to Binary tree
def arrayToBST(self, arr, root):
# Base Case
if root is None:
return
# First update the left subtree
self.arrayToBST(arr, root.left)
# now update root's data delete the value from array
root.data = arr[0]
arr.pop(0)
# Finally update the right subtree
self.arrayToBST(arr, root.right)
# This function converts a given binary tree to BST
def binaryTreeToBST(self, root):
# Base Case: Tree is empty
if root is None:
return
# Create the temp array and store the inorder traversal
# of tree
arr = []
self.InOrder(root, arr)
# Sort the array
arr.sort()
# copy array elements back to binary tree
self.arrayToBST(arr, root)
return root
# {
# Driver Code Starts
# Initial Template for Python 3
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
# Corner Case
if(len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size+1
# Starting from the second element
i = 1
while(size > 0 and i < len(ip)):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size-1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if(currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size+1
# For the right child
i = i+1
if(i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if(currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size+1
i = i+1
return root
def printInorder(root):
if root is None:
return
printInorder(root.left)
print(root.data, end=' ')
printInorder(root.right)
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
Solution().binaryTreeToBST(root)
printInorder(root)
print()
# } Driver Code Ends | 0.873026 | 0.718881 |
import numpy as np
import scipy as sp
import scipy.linalg
import pybie2d
from qfs.two_d_qfs import QFS_Evaluator
from ....annular.poisson import AnnularPoissonSolver
from ....annular.annular import ApproximateAnnularGeometry, RealAnnularGeometry
from ....derivatives import fourier, fd_x_4, fd_y_4
from ....utilities import affine_transformation
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Singular_SLP = lambda src, _: Laplace_Layer_Singular_Form(src, ifcharge=True)
Singular_DLP = lambda src, _, sign: Laplace_Layer_Singular_Form(src, ifdipole=True) - sign*0.5*np.eye(src.N)
Naive_SLP = lambda src, trg: Laplace_Layer_Form(src, trg, ifcharge=True)
Naive_DLP = lambda src, trg: Laplace_Layer_Form(src, trg, ifdipole=True)
class PoissonSolver(object):
"""
Inhomogeneous Laplace Solver on general domain
"""
def __init__(self, ebdy, bump, bump_loc, solver_type='spectral', APS=None):
"""
Type can either be 'spectral' or 'fourth'
"""
self.ebdy = ebdy
self.type = solver_type
self.interior = self.ebdy.interior
self.solver_type = solver_type
if APS is None:
AAG = ApproximateAnnularGeometry(self.ebdy.bdy.N, self.ebdy.M,
self.ebdy.radial_width, self.ebdy.approximate_radius)
APS = AnnularPoissonSolver(AAG)
self.annular_solver = APS
sp = ebdy.bdy.speed
cur = ebdy.bdy.curvature
self.RAG = RealAnnularGeometry(sp, cur, APS.AAG)
self.sign = 1 if self.interior else -1
self.Singular_DLP = lambda src, _: Singular_DLP(src, _, self.sign)
self._get_qfs()
kxv = np.fft.fftfreq(self.ebdy.grid.Nx, self.ebdy.grid.xh/(2*np.pi))
kyv = np.fft.fftfreq(self.ebdy.grid.Ny, self.ebdy.grid.yh/(2*np.pi))
self.kx, self.ky = np.meshgrid(kxv, kyv, indexing='ij')
self.ikx, self.iky = 1j*self.kx, 1j*self.ky
self.lap = -self.kx*self.kx - self.ky*self.ky
self.lap[0,0] = np.Inf
self.ilap = 1.0/self.lap
self.lap[0,0] = 0.0
if self.solver_type == 'spectral':
self.dx = lambda x: fourier(x, self.ikx)
self.dy = lambda x: fourier(x, self.iky)
else:
self.dx = lambda x: fd_x_4(x, self.ebdy.grid.xh)
self.dy = lambda x: fd_y_4(x, self.ebdy.grid.yh)
self.radp = PointSet(ebdy.radial_x.ravel(), ebdy.radial_y.ravel())
self.gridp = PointSet(ebdy.grid.xg[ebdy.grid_not_in_annulus].ravel(), ebdy.grid.yg[ebdy.grid_not_in_annulus].ravel())
self.gridpa = PointSet(ebdy.grid.xg[ebdy.phys].ravel(), ebdy.grid.yg[ebdy.phys].ravel())
self.interpolation_order = 3 if self.solver_type == 'fourth' else np.Inf
grr = np.sqrt((self.ebdy.grid.xg-bump_loc[0])**2 + (self.ebdy.grid.yg-bump_loc[1])**2)
self.bumpy = bump(affine_transformation(grr, 0, self.ebdy.radial_width, 0, 1))
self.bumpy /= np.sum(self.bumpy)*self.ebdy.grid.xh*self.ebdy.grid.yh
def _get_qfs(self):
# construct qfs evaluators for the interface
self.interface_qfs_1 = QFS_Evaluator(self.ebdy.interface_qfs,
self.interior, [Singular_SLP,],
Naive_SLP, on_surface=True, form_b2c=False)
self.interface_qfs_2 = QFS_Evaluator(self.ebdy.interface_qfs,
not self.interior, [Singular_SLP,],
Naive_SLP, on_surface=True, form_b2c=False)
def get_bv(self, ur):
return self.annular_solver.AAG.CO.obc_dirichlet[0].dot(ur)
def get_bn(self, ur):
return self.annular_solver.AAG.CO.obc_neumann[0].dot(ur)
def __call__(self, f, fr, **kwargs):
ebdy = self.ebdy
# get the grid-based solution
fc = f*ebdy.grid_step
fc -= self.bumpy*np.sum(fc)*ebdy.grid.xh*ebdy.grid.yh
uc = np.fft.ifft2(np.fft.fft2(fc)*self.ilap).real
# evaluate this on the interface
bv = ebdy.interpolate_grid_to_interface(uc, order=self.interpolation_order)
print(np.abs(bv).sum())
# take the gradient of uc and evaluate on interface
ucx, ucy = self.dx(uc), self.dy(uc)
bx = ebdy.interpolate_grid_to_interface(ucx, order=self.interpolation_order)
by = ebdy.interpolate_grid_to_interface(ucy, order=self.interpolation_order)
ucn = bx*ebdy.bdy.normal_x + by*ebdy.bdy.normal_y
# compute the radial solution
ur = self.annular_solver.solve(self.RAG, fr, bv, bv, **kwargs)
# evaluate the normal derivative of the radial solution
DER = self.annular_solver.AAG.CO.ibc_neumann[0]
urn = np.array(DER.dot(ur))
# get the single layer to smooth the interface
tau = urn - ucn
# get effective layer potentials for this
sigma1 = self.interface_qfs_1([tau,])
sigma2 = self.interface_qfs_2([tau,])
# evaluate these where they need to go
iq = ebdy.interface_qfs
rslp = Laplace_Layer_Apply(iq.exterior_source_bdy, self.radp, charge=sigma2)
gslp = Laplace_Layer_Apply(iq.interior_source_bdy, self.gridp, charge=sigma1)
bslp = Laplace_Layer_Apply(iq.exterior_source_bdy, ebdy.bdy, charge=sigma2)
# add these to the current solution
uc[ebdy.grid_not_in_annulus] += gslp
ur += rslp.reshape(ur.shape)
# interpolate ur onto uc
ebdy.interpolate_radial_to_grid(ur, uc)
uc *= ebdy.phys
return uc, ur | ipde/solvers/single_boundary/interior/poisson.py | import numpy as np
import scipy as sp
import scipy.linalg
import pybie2d
from qfs.two_d_qfs import QFS_Evaluator
from ....annular.poisson import AnnularPoissonSolver
from ....annular.annular import ApproximateAnnularGeometry, RealAnnularGeometry
from ....derivatives import fourier, fd_x_4, fd_y_4
from ....utilities import affine_transformation
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Singular_SLP = lambda src, _: Laplace_Layer_Singular_Form(src, ifcharge=True)
Singular_DLP = lambda src, _, sign: Laplace_Layer_Singular_Form(src, ifdipole=True) - sign*0.5*np.eye(src.N)
Naive_SLP = lambda src, trg: Laplace_Layer_Form(src, trg, ifcharge=True)
Naive_DLP = lambda src, trg: Laplace_Layer_Form(src, trg, ifdipole=True)
class PoissonSolver(object):
"""
Inhomogeneous Laplace Solver on general domain
"""
def __init__(self, ebdy, bump, bump_loc, solver_type='spectral', APS=None):
"""
Type can either be 'spectral' or 'fourth'
"""
self.ebdy = ebdy
self.type = solver_type
self.interior = self.ebdy.interior
self.solver_type = solver_type
if APS is None:
AAG = ApproximateAnnularGeometry(self.ebdy.bdy.N, self.ebdy.M,
self.ebdy.radial_width, self.ebdy.approximate_radius)
APS = AnnularPoissonSolver(AAG)
self.annular_solver = APS
sp = ebdy.bdy.speed
cur = ebdy.bdy.curvature
self.RAG = RealAnnularGeometry(sp, cur, APS.AAG)
self.sign = 1 if self.interior else -1
self.Singular_DLP = lambda src, _: Singular_DLP(src, _, self.sign)
self._get_qfs()
kxv = np.fft.fftfreq(self.ebdy.grid.Nx, self.ebdy.grid.xh/(2*np.pi))
kyv = np.fft.fftfreq(self.ebdy.grid.Ny, self.ebdy.grid.yh/(2*np.pi))
self.kx, self.ky = np.meshgrid(kxv, kyv, indexing='ij')
self.ikx, self.iky = 1j*self.kx, 1j*self.ky
self.lap = -self.kx*self.kx - self.ky*self.ky
self.lap[0,0] = np.Inf
self.ilap = 1.0/self.lap
self.lap[0,0] = 0.0
if self.solver_type == 'spectral':
self.dx = lambda x: fourier(x, self.ikx)
self.dy = lambda x: fourier(x, self.iky)
else:
self.dx = lambda x: fd_x_4(x, self.ebdy.grid.xh)
self.dy = lambda x: fd_y_4(x, self.ebdy.grid.yh)
self.radp = PointSet(ebdy.radial_x.ravel(), ebdy.radial_y.ravel())
self.gridp = PointSet(ebdy.grid.xg[ebdy.grid_not_in_annulus].ravel(), ebdy.grid.yg[ebdy.grid_not_in_annulus].ravel())
self.gridpa = PointSet(ebdy.grid.xg[ebdy.phys].ravel(), ebdy.grid.yg[ebdy.phys].ravel())
self.interpolation_order = 3 if self.solver_type == 'fourth' else np.Inf
grr = np.sqrt((self.ebdy.grid.xg-bump_loc[0])**2 + (self.ebdy.grid.yg-bump_loc[1])**2)
self.bumpy = bump(affine_transformation(grr, 0, self.ebdy.radial_width, 0, 1))
self.bumpy /= np.sum(self.bumpy)*self.ebdy.grid.xh*self.ebdy.grid.yh
def _get_qfs(self):
# construct qfs evaluators for the interface
self.interface_qfs_1 = QFS_Evaluator(self.ebdy.interface_qfs,
self.interior, [Singular_SLP,],
Naive_SLP, on_surface=True, form_b2c=False)
self.interface_qfs_2 = QFS_Evaluator(self.ebdy.interface_qfs,
not self.interior, [Singular_SLP,],
Naive_SLP, on_surface=True, form_b2c=False)
def get_bv(self, ur):
return self.annular_solver.AAG.CO.obc_dirichlet[0].dot(ur)
def get_bn(self, ur):
return self.annular_solver.AAG.CO.obc_neumann[0].dot(ur)
def __call__(self, f, fr, **kwargs):
ebdy = self.ebdy
# get the grid-based solution
fc = f*ebdy.grid_step
fc -= self.bumpy*np.sum(fc)*ebdy.grid.xh*ebdy.grid.yh
uc = np.fft.ifft2(np.fft.fft2(fc)*self.ilap).real
# evaluate this on the interface
bv = ebdy.interpolate_grid_to_interface(uc, order=self.interpolation_order)
print(np.abs(bv).sum())
# take the gradient of uc and evaluate on interface
ucx, ucy = self.dx(uc), self.dy(uc)
bx = ebdy.interpolate_grid_to_interface(ucx, order=self.interpolation_order)
by = ebdy.interpolate_grid_to_interface(ucy, order=self.interpolation_order)
ucn = bx*ebdy.bdy.normal_x + by*ebdy.bdy.normal_y
# compute the radial solution
ur = self.annular_solver.solve(self.RAG, fr, bv, bv, **kwargs)
# evaluate the normal derivative of the radial solution
DER = self.annular_solver.AAG.CO.ibc_neumann[0]
urn = np.array(DER.dot(ur))
# get the single layer to smooth the interface
tau = urn - ucn
# get effective layer potentials for this
sigma1 = self.interface_qfs_1([tau,])
sigma2 = self.interface_qfs_2([tau,])
# evaluate these where they need to go
iq = ebdy.interface_qfs
rslp = Laplace_Layer_Apply(iq.exterior_source_bdy, self.radp, charge=sigma2)
gslp = Laplace_Layer_Apply(iq.interior_source_bdy, self.gridp, charge=sigma1)
bslp = Laplace_Layer_Apply(iq.exterior_source_bdy, ebdy.bdy, charge=sigma2)
# add these to the current solution
uc[ebdy.grid_not_in_annulus] += gslp
ur += rslp.reshape(ur.shape)
# interpolate ur onto uc
ebdy.interpolate_radial_to_grid(ur, uc)
uc *= ebdy.phys
return uc, ur | 0.641647 | 0.511778 |
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from art.data_generators import *
from art.utils import *
from art.classifiers import *
from art.attacks import *
import numpy as np
import resnet as resnet
import argparse
import models
from tqdm import tqdm
parser = argparse.ArgumentParser()
import warnings
warnings.filterwarnings("ignore")
parser.add_argument('--s', type=str, default='mnist',help='attack on which dataset:mnist, svhn, caltech101 or caltech256')
parser.add_argument('--d', type=str, default='l2',help='attack based on which distance met:inf,l1,l2')
parser.add_argument('--m', type=str, default='fgsm',help='attack based on which method:fgsm,pgd,cw,boundary,deepfool,jsma,bim,zoo(using --a w)')
parser.add_argument('--e', type=float, default=1.5,help='max distance between adv example and the ori:inf--0.3 for mnist 8/255.0 for others,l2--1.5 for mnist')
parser.add_argument('--a',type=str,default='w',help='attack method including whitebox(w) and blackbox(b)')
parser.add_argument('--at',type=str,default=None,help='model under attack with which method to train:None, fgsm ,pgd')
parser.add_argument('--atw',type=str,default=None,help='only in blackbox attack, which method helping model used:None, fgsm, pgd')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Hyper-parameters
param = {
'test_batch_size': 100,
}
transform_test = transforms.Compose([torchvision.transforms.Resize((64, 64)),
transforms.ToTensor(),
])
if args.s=='mnist':
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
elif args.s=='svhn':
testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)
else:
print('Wrong dataset')
exit(0)
loader_test = torch.utils.data.DataLoader(testset, batch_size=param['test_batch_size'],
shuffle=True, num_workers=8)
class res_m(nn.Module):
def __init__(self, model1):
super(res_m, self).__init__()
self.m1 = model1
def forward(self, input):
_,y = self.m1(input)
return y
pydataloader=PyTorchDataGenerator(loader_test,10000,param['test_batch_size'])
pylist = []
for (x,y) in loader_test:
x=x.data.cpu().numpy()
y=y.data.cpu().numpy()
pylist.append((x, y))
# Setup model to be attacked
if args.a=='w':
net = resnet.resnet101().cuda()
if args.at is None:
checkpoint = torch.load('bestmodel_params.pkl')
net.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
res = res_m(net).eval()
loss = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adamax(res.parameters())
fmodel = PyTorchClassifier(
res,loss=loss,optimizer=optimizer,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
evalmodel = PyTorchClassifier(
res,loss=loss,optimizer=optimizer,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
elif args.a=='b':
netblack = resnet.resnet101().cuda()
net=models.resnet101().cuda()
if args.atw is None:
checkpoint = torch.load('bestmodel_params_resnet.pkl')
net.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_resnet_adv_train_%s.pkl'%args.atw)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
if args.at is None:
checkpoint = torch.load('bestmodel_params.pkl')
netblack.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)
netblack.load_state_dict(checkpoint['state_dict'])
res_black = res_m(netblack).eval()
loss1 = nn.CrossEntropyLoss().cuda()
optimizer1 = torch.optim.Adamax(net.parameters())
loss2 = nn.CrossEntropyLoss().cuda()
optimizer2 = torch.optim.Adamax(res_black.parameters())
fmodel = PyTorchClassifier(
net, loss=loss1,optimizer=optimizer1,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
evalmodel = PyTorchClassifier(
res_black, loss=loss2,optimizer=optimizer2,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
else:
print('wrong attack type')
exit(0)
ori_acc=0
adv_acc=0
loadertrain = tqdm(pylist, desc='{}'.format('attack'), ncols=0)
for x_train, y_train in loadertrain:
if args.s=='mnist':
x_train=np.stack((x_train,x_train,x_train),1)
x_train.shape=(param['test_batch_size'],3,64,64)
preds = np.argmax(fmodel.predict(x_train,batch_size=param['test_batch_size']), axis=1)
preds.shape = (param['test_batch_size'])
y=y_train.copy()
y.shape = (param['test_batch_size'])
y_train=to_categorical(y_train,10)
acc_o = np.sum(preds == y)
ori_acc+=acc_o
# Craft adversarial samples with FGSM
epsilon = args.e # Maximum perturbation
if args.m=='fgsm':
if args.d=='inf':
adv_crafter = FastGradientMethod(fmodel,norm=np.inf,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l2':
adv_crafter = FastGradientMethod(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l1':
adv_crafter = FastGradientMethod(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='pgd':
if args.d=='inf':
if args.s=='mnist':
adv_crafter = ProjectedGradientDescent(fmodel,norm=np.inf,eps=epsilon,batch_size=param['test_batch_size'])
elif args.s=='svhn':
adv_crafter = ProjectedGradientDescent(fmodel, norm=np.inf, eps=epsilon, eps_step=2 / 255.0,
max_iter=40, batch_size=param['test_batch_size'])
elif args.d=='l2':
adv_crafter = ProjectedGradientDescent(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l1':
adv_crafter = ProjectedGradientDescent(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='boundary':
if args.d=='inf':
adv_crafter = HopSkipJump(fmodel,targeted=False,norm=np.inf,max_eval=100)
elif args.d=='l2':
adv_crafter = HopSkipJump(fmodel, targeted=False,norm=2,max_eval=100)
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train)
elif args.m=='cw':
if args.d=='l2':
adv_crafter = CarliniL2Method(fmodel,batch_size=param['test_batch_size'])
elif args.d=='inf':
adv_crafter = CarliniLInfMethod(fmodel,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='deepfool':
adv_crafter = DeepFool(fmodel,batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='jsma':
adv_crafter = SaliencyMapMethod(fmodel, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m=='bim':
adv_crafter = BasicIterativeMethod(fmodel, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m == 'zoo' and args.a == 'w':
adv_crafter = ZooAttack(fmodel,nb_parallel=1024, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m == 'zoo' and args.a == 'b':
print('zoo used in --a w condition')
exit(0)
else:
print('wrong method')
exit(0)
if x_test_adv is not None:
preds = np.argmax(evalmodel.predict(x_test_adv), axis=1)
preds.shape = (param['test_batch_size'])
acc_a = np.sum(preds == y)
adv_acc += acc_a
else:
preds = np.argmax(evalmodel.predict(x_train), axis=1)
preds.shape = (param['test_batch_size'])
acc_a = np.sum(preds == y)
adv_acc += acc_a
loadertrain.set_postfix(oriacc=ori_acc,
advacc=adv_acc)
print("\nTest accuracy: %.2f%%" % (ori_acc/10000 * 100))
print("\nTest accuracy on adversarial sample: %.2f%%" % (adv_acc/10000 * 100)) | whitebox_and_black.py | import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from art.data_generators import *
from art.utils import *
from art.classifiers import *
from art.attacks import *
import numpy as np
import resnet as resnet
import argparse
import models
from tqdm import tqdm
parser = argparse.ArgumentParser()
import warnings
warnings.filterwarnings("ignore")
parser.add_argument('--s', type=str, default='mnist',help='attack on which dataset:mnist, svhn, caltech101 or caltech256')
parser.add_argument('--d', type=str, default='l2',help='attack based on which distance met:inf,l1,l2')
parser.add_argument('--m', type=str, default='fgsm',help='attack based on which method:fgsm,pgd,cw,boundary,deepfool,jsma,bim,zoo(using --a w)')
parser.add_argument('--e', type=float, default=1.5,help='max distance between adv example and the ori:inf--0.3 for mnist 8/255.0 for others,l2--1.5 for mnist')
parser.add_argument('--a',type=str,default='w',help='attack method including whitebox(w) and blackbox(b)')
parser.add_argument('--at',type=str,default=None,help='model under attack with which method to train:None, fgsm ,pgd')
parser.add_argument('--atw',type=str,default=None,help='only in blackbox attack, which method helping model used:None, fgsm, pgd')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Hyper-parameters
param = {
'test_batch_size': 100,
}
transform_test = transforms.Compose([torchvision.transforms.Resize((64, 64)),
transforms.ToTensor(),
])
if args.s=='mnist':
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
elif args.s=='svhn':
testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)
else:
print('Wrong dataset')
exit(0)
loader_test = torch.utils.data.DataLoader(testset, batch_size=param['test_batch_size'],
shuffle=True, num_workers=8)
class res_m(nn.Module):
def __init__(self, model1):
super(res_m, self).__init__()
self.m1 = model1
def forward(self, input):
_,y = self.m1(input)
return y
pydataloader=PyTorchDataGenerator(loader_test,10000,param['test_batch_size'])
pylist = []
for (x,y) in loader_test:
x=x.data.cpu().numpy()
y=y.data.cpu().numpy()
pylist.append((x, y))
# Setup model to be attacked
if args.a=='w':
net = resnet.resnet101().cuda()
if args.at is None:
checkpoint = torch.load('bestmodel_params.pkl')
net.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
res = res_m(net).eval()
loss = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adamax(res.parameters())
fmodel = PyTorchClassifier(
res,loss=loss,optimizer=optimizer,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
evalmodel = PyTorchClassifier(
res,loss=loss,optimizer=optimizer,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
elif args.a=='b':
netblack = resnet.resnet101().cuda()
net=models.resnet101().cuda()
if args.atw is None:
checkpoint = torch.load('bestmodel_params_resnet.pkl')
net.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_resnet_adv_train_%s.pkl'%args.atw)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
if args.at is None:
checkpoint = torch.load('bestmodel_params.pkl')
netblack.load_state_dict(checkpoint['state_dict'])
else:
checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)
netblack.load_state_dict(checkpoint['state_dict'])
res_black = res_m(netblack).eval()
loss1 = nn.CrossEntropyLoss().cuda()
optimizer1 = torch.optim.Adamax(net.parameters())
loss2 = nn.CrossEntropyLoss().cuda()
optimizer2 = torch.optim.Adamax(res_black.parameters())
fmodel = PyTorchClassifier(
net, loss=loss1,optimizer=optimizer1,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
evalmodel = PyTorchClassifier(
res_black, loss=loss2,optimizer=optimizer2,input_shape=(3,64,64),nb_classes=10,clip_values=(0.0, 1.0))
else:
print('wrong attack type')
exit(0)
ori_acc=0
adv_acc=0
loadertrain = tqdm(pylist, desc='{}'.format('attack'), ncols=0)
for x_train, y_train in loadertrain:
if args.s=='mnist':
x_train=np.stack((x_train,x_train,x_train),1)
x_train.shape=(param['test_batch_size'],3,64,64)
preds = np.argmax(fmodel.predict(x_train,batch_size=param['test_batch_size']), axis=1)
preds.shape = (param['test_batch_size'])
y=y_train.copy()
y.shape = (param['test_batch_size'])
y_train=to_categorical(y_train,10)
acc_o = np.sum(preds == y)
ori_acc+=acc_o
# Craft adversarial samples with FGSM
epsilon = args.e # Maximum perturbation
if args.m=='fgsm':
if args.d=='inf':
adv_crafter = FastGradientMethod(fmodel,norm=np.inf,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l2':
adv_crafter = FastGradientMethod(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l1':
adv_crafter = FastGradientMethod(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='pgd':
if args.d=='inf':
if args.s=='mnist':
adv_crafter = ProjectedGradientDescent(fmodel,norm=np.inf,eps=epsilon,batch_size=param['test_batch_size'])
elif args.s=='svhn':
adv_crafter = ProjectedGradientDescent(fmodel, norm=np.inf, eps=epsilon, eps_step=2 / 255.0,
max_iter=40, batch_size=param['test_batch_size'])
elif args.d=='l2':
adv_crafter = ProjectedGradientDescent(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])
elif args.d=='l1':
adv_crafter = ProjectedGradientDescent(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='boundary':
if args.d=='inf':
adv_crafter = HopSkipJump(fmodel,targeted=False,norm=np.inf,max_eval=100)
elif args.d=='l2':
adv_crafter = HopSkipJump(fmodel, targeted=False,norm=2,max_eval=100)
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train)
elif args.m=='cw':
if args.d=='l2':
adv_crafter = CarliniL2Method(fmodel,batch_size=param['test_batch_size'])
elif args.d=='inf':
adv_crafter = CarliniLInfMethod(fmodel,eps=epsilon,batch_size=param['test_batch_size'])
else:
print('wrong distance')
exit(0)
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='deepfool':
adv_crafter = DeepFool(fmodel,batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train,y_train)
elif args.m=='jsma':
adv_crafter = SaliencyMapMethod(fmodel, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m=='bim':
adv_crafter = BasicIterativeMethod(fmodel, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m == 'zoo' and args.a == 'w':
adv_crafter = ZooAttack(fmodel,nb_parallel=1024, batch_size=param['test_batch_size'])
x_test_adv = adv_crafter.generate(x_train, y_train)
elif args.m == 'zoo' and args.a == 'b':
print('zoo used in --a w condition')
exit(0)
else:
print('wrong method')
exit(0)
if x_test_adv is not None:
preds = np.argmax(evalmodel.predict(x_test_adv), axis=1)
preds.shape = (param['test_batch_size'])
acc_a = np.sum(preds == y)
adv_acc += acc_a
else:
preds = np.argmax(evalmodel.predict(x_train), axis=1)
preds.shape = (param['test_batch_size'])
acc_a = np.sum(preds == y)
adv_acc += acc_a
loadertrain.set_postfix(oriacc=ori_acc,
advacc=adv_acc)
print("\nTest accuracy: %.2f%%" % (ori_acc/10000 * 100))
print("\nTest accuracy on adversarial sample: %.2f%%" % (adv_acc/10000 * 100)) | 0.638723 | 0.310015 |
import os, json, logging, requests
PROTOCOL = 'http://'
IP_ADDRESS = '127.0.0.1'
IO_PORT = '8078'
class BaseRequest(object):
def __init__(self, ip=IP_ADDRESS, port=IO_PORT):
self.ip_address = ip
self.port = port
self.url_base = PROTOCOL + self.ip_address + ':' + str(self.port)
self.head_file = '/var/run/python/inside.txt'
self.headers = self.__get_headers()
def __get_headers(self):
if os.path.exists(self.head_file):
with open(self.head_file, 'r') as (f):
head = f.read()
if head:
self.headers = {'Authorization': 'Bearer ' + head}
else:
raise ValueError('value is invalid')
else:
try:
FileNotFoundError('Can not find head file')
except NameError:
print('Can not find head file')
def error_to_string(self, code):
error_code = {-4: 'Interrupted system call', -13: 'Permission denied',
-16: 'Device Busy',
-22: 'Invalid argument',
-23: 'Error Request',
-110: 'Connection timed out',
-113: 'IP Invalid'}
if code in error_code.keys():
return error_code.get(code)
return
def parse_result(self, r):
try:
data = json.loads(r.text)
except Exception as e:
try:
logging.error('Error: %s, %s' % (r.text, e))
result = self.error_to_string(-22)
return result
finally:
e = None
del e
return data
def get_url_info(self, url, params=''):
r = requests.get(url, params=params, headers=self.headers)
return self.parse_result(r)
def put_url_info(self, url, json_data):
r = requests.put(url, data=(json.dumps(json_data)), headers=self.headers)
return self.parse_result(r)
def post_url_info(self, url, json_data):
data = json.dumps(json_data)
r = requests.post(url, data=data, headers=self.headers)
return self.parse_result(r) | thingsboard_gateway/sin_edge_sdk/base_request.py | import os, json, logging, requests
PROTOCOL = 'http://'
IP_ADDRESS = '127.0.0.1'
IO_PORT = '8078'
class BaseRequest(object):
def __init__(self, ip=IP_ADDRESS, port=IO_PORT):
self.ip_address = ip
self.port = port
self.url_base = PROTOCOL + self.ip_address + ':' + str(self.port)
self.head_file = '/var/run/python/inside.txt'
self.headers = self.__get_headers()
def __get_headers(self):
if os.path.exists(self.head_file):
with open(self.head_file, 'r') as (f):
head = f.read()
if head:
self.headers = {'Authorization': 'Bearer ' + head}
else:
raise ValueError('value is invalid')
else:
try:
FileNotFoundError('Can not find head file')
except NameError:
print('Can not find head file')
def error_to_string(self, code):
error_code = {-4: 'Interrupted system call', -13: 'Permission denied',
-16: 'Device Busy',
-22: 'Invalid argument',
-23: 'Error Request',
-110: 'Connection timed out',
-113: 'IP Invalid'}
if code in error_code.keys():
return error_code.get(code)
return
def parse_result(self, r):
try:
data = json.loads(r.text)
except Exception as e:
try:
logging.error('Error: %s, %s' % (r.text, e))
result = self.error_to_string(-22)
return result
finally:
e = None
del e
return data
def get_url_info(self, url, params=''):
r = requests.get(url, params=params, headers=self.headers)
return self.parse_result(r)
def put_url_info(self, url, json_data):
r = requests.put(url, data=(json.dumps(json_data)), headers=self.headers)
return self.parse_result(r)
def post_url_info(self, url, json_data):
data = json.dumps(json_data)
r = requests.post(url, data=data, headers=self.headers)
return self.parse_result(r) | 0.294824 | 0.068787 |
import copy
import os
import time
from typing import Dict
import torch
import torch.nn as nn
class InputBlock(nn.Conv2d):
def __init__(self, nfilters, input_channels, kernel_size=3):
super().__init__(input_channels, nfilters, kernel_size=kernel_size, padding=1)
class BNConv(nn.Module):
def __init__(self, nfilters, nfilters_out=None, kernel_size=3, bias=False):
super().__init__()
self.bn = nn.BatchNorm2d(nfilters)
padding = kernel_size // 2
self.conv = nn.Conv2d(nfilters, nfilters_out or nfilters, kernel_size=kernel_size, padding=padding, bias=bias)
def forward(self, inp):
return self.conv(nn.functional.relu(self.bn(inp)))
class ResNetBlock(nn.Module):
def __init__(self, nfilters, kernel_size=3):
super().__init__()
self.conv1 = BNConv(nfilters, kernel_size=kernel_size)
self.conv2 = BNConv(nfilters, kernel_size=kernel_size)
def forward(self, inp):
out = self.conv2(self.conv1(inp))
return inp + out
class GlobalPoolingMeanMaxBias(nn.Module):
def __init__(self, nfilters, nfilters_pooled):
super().__init__()
self.nfilters = nfilters
self.nfilters_pooled = nfilters_pooled
self.bn = nn.BatchNorm2d(nfilters_pooled)
self.dense = nn.Linear(2 * self.nfilters_pooled, self.nfilters - self.nfilters_pooled)
def forward(self, inp):
tg = nn.functional.relu(self.bn(inp[:, : self.nfilters_pooled]))
pooled = torch.cat([tg.mean(dim=(2, 3)), tg.max(dim=(2, 3))], dim=1)
biases = self.dense(pooled)
tx_biased = inp[:, self.nfilters_pooled :] + biases.unsqueeze(2).unsqueeze(3)
return torch.cat([tg, tx_biased], dim=1)
class GlobalPoolingBlock(nn.Module):
def __init__(self, nfilters, nfilters_pooled, kernel_size=3, pooling_cls=GlobalPoolingMeanMaxBias):
super().__init__()
self.bias = pooling_cls(nfilters=nfilters, nfilters_pooled=nfilters_pooled)
self.conv1 = BNConv(nfilters, kernel_size)
self.conv2 = BNConv(nfilters, kernel_size)
def forward(self, inp):
out = self.conv2(self.bias(self.conv1(inp)))
return inp + out
class ValueHead(nn.Module):
def __init__(self, state_plane_size, nfilters_in, value_size, nfilters_mid=3, nunits_mid=16):
super().__init__()
self.value_conv = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=1)
self.conv_out_size = state_plane_size * nfilters_mid
self.value_bn = nn.BatchNorm2d(nfilters_mid)
self.value_dense1 = nn.Linear(nfilters_mid * state_plane_size, nunits_mid)
self.value_dense2 = nn.Linear(nunits_mid, value_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, inp):
out = self.value_bn(self.value_conv(inp))
out = out.view(-1, self.conv_out_size) # batch_size X flattened conv output
out = nn.functional.relu(self.value_dense1(out))
return self.softmax(self.value_dense2(out))
class DensePolicyHead(nn.Module):
def __init__(self, state_plane_size, nfilters_in, policy_size, nfilters_mid=16):
super().__init__()
self.policy_conv = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=1)
self.policy_bn = nn.BatchNorm2d(nfilters_mid)
self.softmax = nn.Softmax(dim=2)
self.policy_dense = nn.Linear(state_plane_size * nfilters_mid, policy_size)
self.aux_policy_dense = nn.Linear(state_plane_size * nfilters_mid, policy_size)
def forward(self, inp):
p = self.policy_bn(self.policy_conv(inp))
p = p.view(p.size(0), -1)
p = torch.stack([self.policy_dense(p), self.aux_policy_dense(p)], dim=1)
return self.softmax(p)
class ConvolutionalPolicyHead(nn.Module):
def __init__(self, nfilters_in, chead=16, nfilters_out=2):
super().__init__()
self.nfilters_out = nfilters_out
self.Pconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.Gconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.bnG = nn.BatchNorm2d(chead)
self.dense = nn.Linear(2 * chead, chead)
self.conv2 = BNConv(chead, nfilters_out=2, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
def forward(self, inp):
p = self.Pconv(inp)
g = nn.functional.relu(self.bnG(self.Gconv(inp)))
pooled = torch.cat([g.mean(dim=(2, 3)), g.max(dim=2)[0].max(dim=2)[0]], dim=1)
biases = self.dense(pooled)
p_biased = p + biases.unsqueeze(2).unsqueeze(3)
return self.softmax(self.conv2(p_biased).view(inp.size(0), self.nfilters_out, -1))
class ConvolutionalPolicyHeadWithPass(nn.Module):
def __init__(
self, nfilters_in, state_plane_size, chead=16, nfilters_out=2,
):
super().__init__()
self.nfilters_out = nfilters_out
self.Pconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.Gconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.bnG = nn.BatchNorm2d(chead)
self.dense = nn.Linear(2 * chead, chead)
self.conv2 = BNConv(chead, nfilters_out=nfilters_out, kernel_size=1)
self.pass_dense = nn.Linear(chead * state_plane_size, self.nfilters_out)
self.softmax = nn.Softmax(dim=2)
def forward(self, inp):
p = self.Pconv(inp)
g = nn.functional.relu(self.bnG(self.Gconv(inp)))
pooled = torch.cat([g.mean(dim=(2, 3)), g.max(dim=2)[0].max(dim=2)[0]], dim=1)
biases = self.dense(pooled)
p_biased = p + biases.unsqueeze(2).unsqueeze(3)
conv2_out = self.conv2(p_biased).view(inp.size(0), self.nfilters_out, -1)
pass_out = self.pass_dense(g.view(inp.size(0), -1))
policy_cat = torch.cat([conv2_out, pass_out.unsqueeze(2)], dim=2)
return self.softmax(policy_cat)
class ConvolutionalSigmoidHead(nn.Module):
def __init__(self, nfilters_in, nfilters_mid=16, kernel_sizes=(3, 1)):
super().__init__()
self.conv1 = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=kernel_sizes[0])
self.conv2 = BNConv(nfilters_mid, nfilters_out=1, kernel_size=kernel_sizes[1])
def forward(self, inp):
return torch.sigmoid(self.conv2(self.conv1(inp)))
class GameNet(nn.Module): # module is the loss
def __init__(self, game_cls, input_channels: int, nfilters: int, nblocks: int, heads: dict, cuda=True):
super().__init__()
self.input_block = InputBlock(input_channels=input_channels, nfilters=nfilters)
self.nblocks = nblocks
self.blocks = nn.ModuleList([ResNetBlock(nfilters=nfilters) for _ in range(nblocks)])
self.heads = nn.ModuleDict(heads)
self.metadata = {"filename": None, "parent": None, "game_cls": game_cls, "iteration": 0}
if cuda and torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.cuda()
else:
self.device = torch.device("cpu")
def __str__(self):
return f"{self.metadata['game_cls'].GAME_NAME}:{self.metadata['tag']}:{self.metadata['iteration']}"
@staticmethod
def data_dir(game_class, tag):
return f"data/{game_class.GAME_NAME}{tag}"
@classmethod
def list_weights(cls, game_class, tag=""):
dir = cls.data_dir(game_class, tag)
if not os.path.isdir(dir):
os.makedirs(dir, exist_ok=True)
return sorted(
[
int(file)
for file in os.listdir(dir)
if file.isdigit() and os.path.isfile(os.path.join(dir, file, "net.pt"))
]
)
def save(self, data, net_path=None, filename="net.pt"): # save net or related data like games
path, file = os.path.split(net_path or self.metadata["filename"])
os.makedirs(path, exist_ok=True)
torch.save(data, os.path.join(path, filename))
def load_weights(self, net_ts="latest", tag=""): # TODO refactor ts->filename or sth
game_class = self.metadata["game_cls"]
if net_ts == "latest":
trained_models = self.list_weights(game_class, tag)
if trained_models:
net_ts = trained_models[-1]
else:
return self.new_iteration(tag=tag)
net_filename = f"{self.data_dir(game_class,tag)}/{net_ts}/net.pt"
net_data = torch.load(net_filename, map_location=self.device)
self.load_state_dict(net_data.pop("state_dict"))
self.metadata.update({**net_data, "tag": tag})
def new_iteration(self, x_metadata: Dict = None, tag=None):
tag = tag or self.metadata.get("tag", "")
new_filename = f"{self.data_dir(self.metadata['game_cls'],tag)}/{int(time.time())}/net.pt"
new_metadata = {
**self.metadata,
**(x_metadata or {}),
"tag": tag,
"parent": self.metadata["filename"],
"filename": new_filename,
"iteration": self.metadata.get("iteration", 0) + 1,
}
self.save(data={**new_metadata, "state_dict": self.state_dict()}, net_path=new_filename)
self.metadata = new_metadata # make sure this is AFTER save
def forward(self, inp):
out = self.input_block(inp)
for b in self.blocks:
out = b(out)
return {k: head(out) for k, head in self.heads.items()}
def evaluate_sample(self, inp):
return {
k: v.detach().cpu().numpy().squeeze(0)
for k, v in self(torch.tensor(inp, dtype=torch.float32).unsqueeze(0).to(self.device)).items()
} | selfplaylab/net.py | import copy
import os
import time
from typing import Dict
import torch
import torch.nn as nn
class InputBlock(nn.Conv2d):
def __init__(self, nfilters, input_channels, kernel_size=3):
super().__init__(input_channels, nfilters, kernel_size=kernel_size, padding=1)
class BNConv(nn.Module):
def __init__(self, nfilters, nfilters_out=None, kernel_size=3, bias=False):
super().__init__()
self.bn = nn.BatchNorm2d(nfilters)
padding = kernel_size // 2
self.conv = nn.Conv2d(nfilters, nfilters_out or nfilters, kernel_size=kernel_size, padding=padding, bias=bias)
def forward(self, inp):
return self.conv(nn.functional.relu(self.bn(inp)))
class ResNetBlock(nn.Module):
def __init__(self, nfilters, kernel_size=3):
super().__init__()
self.conv1 = BNConv(nfilters, kernel_size=kernel_size)
self.conv2 = BNConv(nfilters, kernel_size=kernel_size)
def forward(self, inp):
out = self.conv2(self.conv1(inp))
return inp + out
class GlobalPoolingMeanMaxBias(nn.Module):
def __init__(self, nfilters, nfilters_pooled):
super().__init__()
self.nfilters = nfilters
self.nfilters_pooled = nfilters_pooled
self.bn = nn.BatchNorm2d(nfilters_pooled)
self.dense = nn.Linear(2 * self.nfilters_pooled, self.nfilters - self.nfilters_pooled)
def forward(self, inp):
tg = nn.functional.relu(self.bn(inp[:, : self.nfilters_pooled]))
pooled = torch.cat([tg.mean(dim=(2, 3)), tg.max(dim=(2, 3))], dim=1)
biases = self.dense(pooled)
tx_biased = inp[:, self.nfilters_pooled :] + biases.unsqueeze(2).unsqueeze(3)
return torch.cat([tg, tx_biased], dim=1)
class GlobalPoolingBlock(nn.Module):
def __init__(self, nfilters, nfilters_pooled, kernel_size=3, pooling_cls=GlobalPoolingMeanMaxBias):
super().__init__()
self.bias = pooling_cls(nfilters=nfilters, nfilters_pooled=nfilters_pooled)
self.conv1 = BNConv(nfilters, kernel_size)
self.conv2 = BNConv(nfilters, kernel_size)
def forward(self, inp):
out = self.conv2(self.bias(self.conv1(inp)))
return inp + out
class ValueHead(nn.Module):
def __init__(self, state_plane_size, nfilters_in, value_size, nfilters_mid=3, nunits_mid=16):
super().__init__()
self.value_conv = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=1)
self.conv_out_size = state_plane_size * nfilters_mid
self.value_bn = nn.BatchNorm2d(nfilters_mid)
self.value_dense1 = nn.Linear(nfilters_mid * state_plane_size, nunits_mid)
self.value_dense2 = nn.Linear(nunits_mid, value_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, inp):
out = self.value_bn(self.value_conv(inp))
out = out.view(-1, self.conv_out_size) # batch_size X flattened conv output
out = nn.functional.relu(self.value_dense1(out))
return self.softmax(self.value_dense2(out))
class DensePolicyHead(nn.Module):
def __init__(self, state_plane_size, nfilters_in, policy_size, nfilters_mid=16):
super().__init__()
self.policy_conv = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=1)
self.policy_bn = nn.BatchNorm2d(nfilters_mid)
self.softmax = nn.Softmax(dim=2)
self.policy_dense = nn.Linear(state_plane_size * nfilters_mid, policy_size)
self.aux_policy_dense = nn.Linear(state_plane_size * nfilters_mid, policy_size)
def forward(self, inp):
p = self.policy_bn(self.policy_conv(inp))
p = p.view(p.size(0), -1)
p = torch.stack([self.policy_dense(p), self.aux_policy_dense(p)], dim=1)
return self.softmax(p)
class ConvolutionalPolicyHead(nn.Module):
def __init__(self, nfilters_in, chead=16, nfilters_out=2):
super().__init__()
self.nfilters_out = nfilters_out
self.Pconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.Gconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.bnG = nn.BatchNorm2d(chead)
self.dense = nn.Linear(2 * chead, chead)
self.conv2 = BNConv(chead, nfilters_out=2, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
def forward(self, inp):
p = self.Pconv(inp)
g = nn.functional.relu(self.bnG(self.Gconv(inp)))
pooled = torch.cat([g.mean(dim=(2, 3)), g.max(dim=2)[0].max(dim=2)[0]], dim=1)
biases = self.dense(pooled)
p_biased = p + biases.unsqueeze(2).unsqueeze(3)
return self.softmax(self.conv2(p_biased).view(inp.size(0), self.nfilters_out, -1))
class ConvolutionalPolicyHeadWithPass(nn.Module):
def __init__(
self, nfilters_in, state_plane_size, chead=16, nfilters_out=2,
):
super().__init__()
self.nfilters_out = nfilters_out
self.Pconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.Gconv = nn.Conv2d(nfilters_in, chead, kernel_size=1, padding=0)
self.bnG = nn.BatchNorm2d(chead)
self.dense = nn.Linear(2 * chead, chead)
self.conv2 = BNConv(chead, nfilters_out=nfilters_out, kernel_size=1)
self.pass_dense = nn.Linear(chead * state_plane_size, self.nfilters_out)
self.softmax = nn.Softmax(dim=2)
def forward(self, inp):
p = self.Pconv(inp)
g = nn.functional.relu(self.bnG(self.Gconv(inp)))
pooled = torch.cat([g.mean(dim=(2, 3)), g.max(dim=2)[0].max(dim=2)[0]], dim=1)
biases = self.dense(pooled)
p_biased = p + biases.unsqueeze(2).unsqueeze(3)
conv2_out = self.conv2(p_biased).view(inp.size(0), self.nfilters_out, -1)
pass_out = self.pass_dense(g.view(inp.size(0), -1))
policy_cat = torch.cat([conv2_out, pass_out.unsqueeze(2)], dim=2)
return self.softmax(policy_cat)
class ConvolutionalSigmoidHead(nn.Module):
def __init__(self, nfilters_in, nfilters_mid=16, kernel_sizes=(3, 1)):
super().__init__()
self.conv1 = BNConv(nfilters_in, nfilters_out=nfilters_mid, kernel_size=kernel_sizes[0])
self.conv2 = BNConv(nfilters_mid, nfilters_out=1, kernel_size=kernel_sizes[1])
def forward(self, inp):
return torch.sigmoid(self.conv2(self.conv1(inp)))
class GameNet(nn.Module): # module is the loss
def __init__(self, game_cls, input_channels: int, nfilters: int, nblocks: int, heads: dict, cuda=True):
super().__init__()
self.input_block = InputBlock(input_channels=input_channels, nfilters=nfilters)
self.nblocks = nblocks
self.blocks = nn.ModuleList([ResNetBlock(nfilters=nfilters) for _ in range(nblocks)])
self.heads = nn.ModuleDict(heads)
self.metadata = {"filename": None, "parent": None, "game_cls": game_cls, "iteration": 0}
if cuda and torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.cuda()
else:
self.device = torch.device("cpu")
def __str__(self):
return f"{self.metadata['game_cls'].GAME_NAME}:{self.metadata['tag']}:{self.metadata['iteration']}"
@staticmethod
def data_dir(game_class, tag):
return f"data/{game_class.GAME_NAME}{tag}"
@classmethod
def list_weights(cls, game_class, tag=""):
dir = cls.data_dir(game_class, tag)
if not os.path.isdir(dir):
os.makedirs(dir, exist_ok=True)
return sorted(
[
int(file)
for file in os.listdir(dir)
if file.isdigit() and os.path.isfile(os.path.join(dir, file, "net.pt"))
]
)
def save(self, data, net_path=None, filename="net.pt"): # save net or related data like games
path, file = os.path.split(net_path or self.metadata["filename"])
os.makedirs(path, exist_ok=True)
torch.save(data, os.path.join(path, filename))
def load_weights(self, net_ts="latest", tag=""): # TODO refactor ts->filename or sth
game_class = self.metadata["game_cls"]
if net_ts == "latest":
trained_models = self.list_weights(game_class, tag)
if trained_models:
net_ts = trained_models[-1]
else:
return self.new_iteration(tag=tag)
net_filename = f"{self.data_dir(game_class,tag)}/{net_ts}/net.pt"
net_data = torch.load(net_filename, map_location=self.device)
self.load_state_dict(net_data.pop("state_dict"))
self.metadata.update({**net_data, "tag": tag})
def new_iteration(self, x_metadata: Dict = None, tag=None):
tag = tag or self.metadata.get("tag", "")
new_filename = f"{self.data_dir(self.metadata['game_cls'],tag)}/{int(time.time())}/net.pt"
new_metadata = {
**self.metadata,
**(x_metadata or {}),
"tag": tag,
"parent": self.metadata["filename"],
"filename": new_filename,
"iteration": self.metadata.get("iteration", 0) + 1,
}
self.save(data={**new_metadata, "state_dict": self.state_dict()}, net_path=new_filename)
self.metadata = new_metadata # make sure this is AFTER save
def forward(self, inp):
out = self.input_block(inp)
for b in self.blocks:
out = b(out)
return {k: head(out) for k, head in self.heads.items()}
def evaluate_sample(self, inp):
return {
k: v.detach().cpu().numpy().squeeze(0)
for k, v in self(torch.tensor(inp, dtype=torch.float32).unsqueeze(0).to(self.device)).items()
} | 0.945121 | 0.400456 |
from pyspark import Row
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("lex-spark") \
.master("local[*]") \
.getOrCreate()
sc = spark.sparkContext
file = r".\lex.txt" # 修改1
rdd = sc.textFile(file)
info = rdd.map(lambda x: x.split("\t")) # lex.txt的分割方式是/t
info_array = info.collect() # info_array就是有270个元素的数组,而每个元素又是一个数组,比如info_array[0][1]就是"858551"
cal_result = ['动画区', 'LexBurner'] # 修改2
def calculator(item, cal_result):
view_time_sum = 0 # 播放量总和
viewer_involve_sum = 0 # 观众参与度总和
viewer_support_sum = 0 # 观众支持度总和
view_time_count = 0 # 在选取的时间段内的视频个数
for i in range(0, 270): # 因为lex.txt一共270行 #修改3
year = int(item[i][0][0:4]) # 得到该视频的发布年
month = int(item[i][0][5:7]) # 得到该视频的发布月
if (year == 2018 and month >= 7) or (year >= 2019): # 筛选出我们要的时间段内的视频
view_time = int(item[i][1]) # 取得该视频的播放量
view_time_sum += view_time
viewer_involve = (int(item[i][2]) + int(item[i][3])) * 10000 // view_time # 取得该视频的参与度/0.0001
viewer_involve_sum += viewer_involve
viewer_support = (int(item[i][4]) + int(item[i][5]) + int(
item[i][6])) * 1000 // view_time # 取得该视频的支持度/0.001
viewer_support_sum += viewer_support
view_time_count += 1
cal_result.append(view_time_sum // view_time_count // 10000)
cal_result.append(viewer_involve_sum // view_time_count)
cal_result.append(viewer_support_sum // view_time_count)
calculator(info_array, cal_result)
print(cal_result)
real_result = [cal_result]
def f(item):
data = {}
data["area_name"] = item[0]
data["up_name"] = item[1]
data["traffic"] = item[2]
data["viewer_involve"] = item[3]
data["viewer_support"] = item[4]
return data
result_rdd = sc.parallelize(real_result)
result_df = result_rdd.map(lambda x: Row(**f(x))).toDF()
result_df.show()
conn_param = {}
conn_param['user'] = "root"
conn_param['password'] = "<PASSWORD>"
conn_param['driver'] = "com.mysql.jdbc.Driver"
result_df.write.jdbc("jdbc:mysql://localhost:3306/bilibili?serverTimezone=UTC", 'represent_up_situ', 'append',
conn_param)
print("执行完毕")
# 备注
# traffic 是近两年 视频平均播放量除以10000
# viewer_involve 是近两年 (弹幕+评论)/播放量 然后除以0.0001
# viewer_support 是近两年 (点赞+投币+收藏)/播放量 然后除以0.001 | School/Spark_exp/pycharm/lex.py | from pyspark import Row
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("lex-spark") \
.master("local[*]") \
.getOrCreate()
sc = spark.sparkContext
file = r".\lex.txt" # 修改1
rdd = sc.textFile(file)
info = rdd.map(lambda x: x.split("\t")) # lex.txt的分割方式是/t
info_array = info.collect() # info_array就是有270个元素的数组,而每个元素又是一个数组,比如info_array[0][1]就是"858551"
cal_result = ['动画区', 'LexBurner'] # 修改2
def calculator(item, cal_result):
view_time_sum = 0 # 播放量总和
viewer_involve_sum = 0 # 观众参与度总和
viewer_support_sum = 0 # 观众支持度总和
view_time_count = 0 # 在选取的时间段内的视频个数
for i in range(0, 270): # 因为lex.txt一共270行 #修改3
year = int(item[i][0][0:4]) # 得到该视频的发布年
month = int(item[i][0][5:7]) # 得到该视频的发布月
if (year == 2018 and month >= 7) or (year >= 2019): # 筛选出我们要的时间段内的视频
view_time = int(item[i][1]) # 取得该视频的播放量
view_time_sum += view_time
viewer_involve = (int(item[i][2]) + int(item[i][3])) * 10000 // view_time # 取得该视频的参与度/0.0001
viewer_involve_sum += viewer_involve
viewer_support = (int(item[i][4]) + int(item[i][5]) + int(
item[i][6])) * 1000 // view_time # 取得该视频的支持度/0.001
viewer_support_sum += viewer_support
view_time_count += 1
cal_result.append(view_time_sum // view_time_count // 10000)
cal_result.append(viewer_involve_sum // view_time_count)
cal_result.append(viewer_support_sum // view_time_count)
calculator(info_array, cal_result)
print(cal_result)
real_result = [cal_result]
def f(item):
data = {}
data["area_name"] = item[0]
data["up_name"] = item[1]
data["traffic"] = item[2]
data["viewer_involve"] = item[3]
data["viewer_support"] = item[4]
return data
result_rdd = sc.parallelize(real_result)
result_df = result_rdd.map(lambda x: Row(**f(x))).toDF()
result_df.show()
conn_param = {}
conn_param['user'] = "root"
conn_param['password'] = "<PASSWORD>"
conn_param['driver'] = "com.mysql.jdbc.Driver"
result_df.write.jdbc("jdbc:mysql://localhost:3306/bilibili?serverTimezone=UTC", 'represent_up_situ', 'append',
conn_param)
print("执行完毕")
# 备注
# traffic 是近两年 视频平均播放量除以10000
# viewer_involve 是近两年 (弹幕+评论)/播放量 然后除以0.0001
# viewer_support 是近两年 (点赞+投币+收藏)/播放量 然后除以0.001 | 0.277473 | 0.378316 |
import random
import tempfile
from collections import Counter
from concurrent.futures import as_completed, ProcessPoolExecutor
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pystan
from tqdm import tqdm
from .. import config, estimation
from ..preprocessing import aggregation, conversion
def generate_sequence(k, seed=None):
"""Generate a random genome sequence of length `k`.
:param k: length of the sequence
:type k: int
:param seed: random seed, defaults to `None`
:type seed: int, optional
:return: a random sequence
:rtype: str
"""
random.seed(seed)
return ''.join(random.choices(conversion.BASE_COLUMNS, k=k))
def simulate_reads(sequence, p_e, p_c, pi, l=100, n=100, seed=None): # noqa
"""Simulate `n` reads of length `l` from a sequence.
:param sequence: sequence to generate the reads from
:type sequence: str
:param p_e: background specific mutation rate. This is the rate a specific
base mutates to another specific base (i.e. T>C, A>G, ...)
:type p_e: float
:param p_c: T>C mutation rate in labeled RNA
:type p_c: float
:param pi: fraction of labeled RNA
:type pi: float
:param l: length of each read, defaults to `100`
:type l: int, optional
:param n: number of reads to simulate, defaults to `100`
:type n: int, optional
:param seed: random seed, defaults to `None`
:type seed: int, optional
:return: a dataframe with each read as a row and the number of conversions and
base content as the columns
:rtype: pandas.DataFrame
"""
generator = np.random.RandomState(seed)
n_new = int(n * pi)
n_old = n - n_new
contents = []
convs = []
# Generate new sequences
for _ in range(n_new):
i = generator.randint(0, len(sequence) - l) # noqa
subsequence = sequence[i:i + l] # noqa
# Nucleotide content
content = Counter(subsequence)
# Create read with mutations
conv = {conversion: 0 for conversion in conversion.CONVERSION_COLUMNS}
for base in subsequence:
if base == 'T' and generator.random() < p_c:
conv['TC'] += 1
else:
other_bases = [b for b in conversion.BASE_COLUMNS if b != base]
for other_base in random.sample(other_bases, k=len(other_bases)):
bases = f'{base}{other_base}'
if generator.random() < p_e and bases != 'TC':
conv[bases] += 1
break
contents.append(dict(content))
convs.append(conv)
# Generate old sequences
for _ in range(n_old):
i = generator.randint(0, len(sequence) - l) # noqa
subsequence = sequence[i:i + l] # noqa
# Nucleotide content
content = Counter(subsequence)
# Create read with mutations
conv = {conversion: 0 for conversion in conversion.CONVERSION_COLUMNS}
for base in subsequence:
other_bases = [b for b in conversion.BASE_COLUMNS if b != base]
for other_base in random.sample(other_bases, k=len(other_bases)):
if generator.random() < p_e:
conv[f'{base}{other_base}'] += 1
break
contents.append(dict(content))
convs.append(conv)
df_contents = pd.DataFrame(contents)
df_conversions = pd.DataFrame(convs)
df_counts = pd.concat((df_contents, df_conversions),
axis=1)[conversion.COLUMNS].iloc[generator.choice(np.arange(n), size=n,
replace=False)].reset_index(drop=True)
return df_counts
__model = None
_pi_model = None
def initializer(model):
global _model
_model = model
def estimate(
df_counts,
p_e,
p_c,
pi,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
model=None,
nasc=False,
):
"""
"""
# p_e
if estimate_p_e:
if nasc:
with tempfile.NamedTemporaryFile() as tf:
rates_path = aggregation.calculate_mutation_rates(df_counts, tf.name)
df_rates = aggregation.read_rates(rates_path)
with tempfile.NamedTemporaryFile() as tf:
p_e_path = estimation.estimate_p_e_nasc(df_rates, tf.name)
p_e_estimate = estimation.read_p_e(p_e_path)
else:
with tempfile.NamedTemporaryFile() as tf:
p_e_path = estimation.estimate_p_e(df_counts, tf.name)
p_e_estimate = estimation.read_p_e(p_e_path)
else:
p_e_estimate = p_e
# p_c
if estimate_p_c:
df_aggregates = pd.DataFrame(df_counts.groupby(['TC', 'T'], sort=False, observed=True).size())
df_aggregates.columns = ['count']
df_aggregates.reset_index(inplace=True)
df_aggregates.rename(columns={'TC': 'conversion', 'T': 'base'}, inplace=True)
with tempfile.NamedTemporaryFile() as tf:
p_c_path = estimation.estimate_p_c(df_aggregates, p_e_estimate, tf.name, nasc=nasc)
p_c_estimate = estimation.read_p_c(p_c_path)
else:
p_c_estimate = p_c
# pi
if estimate_pi:
df_aggregates = pd.DataFrame(
df_counts[df_counts['GX'] == 'gene_0'].groupby(['TC', 'T'], sort=False, observed=True).size()
)
df_aggregates.columns = ['count']
df_aggregates.reset_index(inplace=True)
df_aggregates = df_aggregates[(df_aggregates[['T', 'count']] > 0).all(axis=1)]
vals = df_aggregates.values
guess = min(max((sum(vals[vals[:, 0] > 0][:, 2]) / sum(vals[:, 2])), 0.01), 0.99)
guess, alpha, beta, pi_estimate = estimation.pi.fit_stan_mcmc(
vals,
p_e_estimate,
p_c_estimate,
guess=guess,
model=model,
)
if nasc:
pi_estimate = estimation.pi.beta_mode(alpha, beta)
else:
guess, alpha, beta, pi_estimate = pi, None, None, pi
return p_e_estimate, p_c_estimate, guess, alpha, beta, pi_estimate
def _simulate(
p_e,
p_c,
pi,
sequence=None,
k=10000,
l=100, # noqa
n=100,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
seed=None,
model=None,
nasc=False,
):
model = model or _model
pis = pi
ns = n
if isinstance(pi, list) and not isinstance(n, list):
pis = pi
ns = [n] * len(pis)
elif not isinstance(pi, list) and isinstance(n, list):
ns = n
pis = [pi] * len(ns)
elif not isinstance(pi, list) and not isinstance(n, list):
ns = [n]
pis = [pi]
assert len(pis) == len(ns)
dfs = []
for i, (pi, n) in enumerate(zip(pis, ns)):
sequence = sequence or generate_sequence(k, seed=seed)
df_counts = simulate_reads(sequence, p_e, p_c, pi, l=l, n=n, seed=seed)
df_counts['GX'] = f'gene_{i}'
dfs.append(df_counts)
df_counts = pd.concat(dfs, ignore_index=True)
return estimate(
df_counts,
p_e,
p_c,
pis[0],
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
model=model,
nasc=nasc,
)
def simulate(
p_e,
p_c,
pi,
sequence=None,
k=10000,
l=100, # noqa
n=100,
n_runs=16,
n_threads=8,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
model=None,
nasc=False,
):
model = model or pystan.StanModel(file=config.MODEL_PATH, model_name=config.MODEL_NAME)
p_es = []
p_cs = []
guesses = []
alphas = []
betas = []
pis = []
with ProcessPoolExecutor(max_workers=n_threads, initializer=initializer, initargs=(model,)) as executor:
futures = [
executor.submit(
_simulate,
p_e,
p_c,
pi,
sequence=sequence,
k=k,
l=l,
n=n,
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
nasc=nasc,
) for _ in range(n_runs)
]
for future in as_completed(futures):
p_e_estimate, p_c_estimate, guess, alpha_estimate, beta_estimate, pi_estimate = future.result()
p_es.append(p_e_estimate)
p_cs.append(p_c_estimate)
guesses.append(guess)
alphas.append(alpha_estimate)
betas.append(beta_estimate)
pis.append(pi_estimate)
return p_es, p_cs, guesses, alphas, betas, pis
def simulate_batch(
p_e,
p_c,
pi,
l, # noqa
n,
estimate_p_e,
estimate_p_c,
estimate_pi,
n_runs,
n_threads,
model,
nasc=False
):
"""Helper function to run simulations in batches.
"""
p_es = p_e
p_cs = p_c
pis = pi
if not isinstance(p_e, list):
p_es = [p_e]
if not isinstance(p_c, list):
p_cs = [p_c]
if not isinstance(pi, list):
pis = [pi]
dfs = []
for p_e, p_c, pi in tqdm(list(product(p_es, p_cs, pis))):
p_e_estimates, p_c_estimates, guesses, alphas, betas, pi_estimates = simulate(
p_e,
p_c,
pi,
l=l,
n=n,
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
n_runs=n_runs,
n_threads=n_threads,
model=model,
nasc=nasc,
)
dfs.append(
pd.DataFrame({
'p_e': p_e,
'p_c': p_c,
'pi': pi[0] if isinstance(pi, list) else pi,
'p_e_estimate': p_e_estimates,
'p_c_estimate': p_c_estimates,
'guess': guesses,
'alpha_estimate': alphas,
'beta_estimate': betas,
'pi_estimate': pi_estimates
})
)
return pd.concat(dfs, ignore_index=True)
def plot_estimations(
X, Y, n_runs, means, truth, ax=None, box=True, tick_decimals=1, title=None, xlabel=None, ylabel=None
):
if ax is not None:
_ax = ax
else:
fig, _ax = plt.subplots(figsize=(5, 5), tight_layout=True)
if box:
X_range = max(X) - min(X)
_ax.boxplot(
list(np.array(Y).reshape(-1, n_runs)),
positions=np.sort(np.unique(X)),
zorder=-1,
widths=X_range * 0.05,
medianprops=dict(c='gray', linewidth=1.5),
boxprops=dict(facecolor='lightgray', color='gray', linewidth=1.5),
whiskerprops=dict(c='gray', linewidth=1.5),
capprops=dict(c='gray', linewidth=1.5),
patch_artist=True,
showfliers=False,
)
_ax.scatter(X, Y, s=3, label=f'n={n_runs}')
_ax.scatter(means.index, means.values, s=15, label='mean')
try:
iter(truth)
_ax.plot(truth, truth, c='red', linewidth=1, label='truth')
except: # noqa
_ax.plot([min(X), max(X)], [truth, truth], c='red', linewidth=1, label='truth')
if box:
_ax.set_xlim(left=min(X) - X_range * 0.1, right=max(X) + X_range * 0.1)
xticks = np.sort(np.unique(X))
_ax.set_xticks(xticks)
_ax.set_xticklabels([f'{round(x, tick_decimals)}' for x in xticks])
_ax.legend()
if title:
_ax.set_title(title)
if xlabel:
_ax.set_xlabel(xlabel)
if ylabel:
_ax.set_ylabel(ylabel)
if ax is None:
fig.show()
return _ax | dynast/benchmarking/simulation.py | import random
import tempfile
from collections import Counter
from concurrent.futures import as_completed, ProcessPoolExecutor
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pystan
from tqdm import tqdm
from .. import config, estimation
from ..preprocessing import aggregation, conversion
def generate_sequence(k, seed=None):
"""Generate a random genome sequence of length `k`.
:param k: length of the sequence
:type k: int
:param seed: random seed, defaults to `None`
:type seed: int, optional
:return: a random sequence
:rtype: str
"""
random.seed(seed)
return ''.join(random.choices(conversion.BASE_COLUMNS, k=k))
def simulate_reads(sequence, p_e, p_c, pi, l=100, n=100, seed=None): # noqa
"""Simulate `n` reads of length `l` from a sequence.
:param sequence: sequence to generate the reads from
:type sequence: str
:param p_e: background specific mutation rate. This is the rate a specific
base mutates to another specific base (i.e. T>C, A>G, ...)
:type p_e: float
:param p_c: T>C mutation rate in labeled RNA
:type p_c: float
:param pi: fraction of labeled RNA
:type pi: float
:param l: length of each read, defaults to `100`
:type l: int, optional
:param n: number of reads to simulate, defaults to `100`
:type n: int, optional
:param seed: random seed, defaults to `None`
:type seed: int, optional
:return: a dataframe with each read as a row and the number of conversions and
base content as the columns
:rtype: pandas.DataFrame
"""
generator = np.random.RandomState(seed)
n_new = int(n * pi)
n_old = n - n_new
contents = []
convs = []
# Generate new sequences
for _ in range(n_new):
i = generator.randint(0, len(sequence) - l) # noqa
subsequence = sequence[i:i + l] # noqa
# Nucleotide content
content = Counter(subsequence)
# Create read with mutations
conv = {conversion: 0 for conversion in conversion.CONVERSION_COLUMNS}
for base in subsequence:
if base == 'T' and generator.random() < p_c:
conv['TC'] += 1
else:
other_bases = [b for b in conversion.BASE_COLUMNS if b != base]
for other_base in random.sample(other_bases, k=len(other_bases)):
bases = f'{base}{other_base}'
if generator.random() < p_e and bases != 'TC':
conv[bases] += 1
break
contents.append(dict(content))
convs.append(conv)
# Generate old sequences
for _ in range(n_old):
i = generator.randint(0, len(sequence) - l) # noqa
subsequence = sequence[i:i + l] # noqa
# Nucleotide content
content = Counter(subsequence)
# Create read with mutations
conv = {conversion: 0 for conversion in conversion.CONVERSION_COLUMNS}
for base in subsequence:
other_bases = [b for b in conversion.BASE_COLUMNS if b != base]
for other_base in random.sample(other_bases, k=len(other_bases)):
if generator.random() < p_e:
conv[f'{base}{other_base}'] += 1
break
contents.append(dict(content))
convs.append(conv)
df_contents = pd.DataFrame(contents)
df_conversions = pd.DataFrame(convs)
df_counts = pd.concat((df_contents, df_conversions),
axis=1)[conversion.COLUMNS].iloc[generator.choice(np.arange(n), size=n,
replace=False)].reset_index(drop=True)
return df_counts
__model = None
_pi_model = None
def initializer(model):
global _model
_model = model
def estimate(
df_counts,
p_e,
p_c,
pi,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
model=None,
nasc=False,
):
"""
"""
# p_e
if estimate_p_e:
if nasc:
with tempfile.NamedTemporaryFile() as tf:
rates_path = aggregation.calculate_mutation_rates(df_counts, tf.name)
df_rates = aggregation.read_rates(rates_path)
with tempfile.NamedTemporaryFile() as tf:
p_e_path = estimation.estimate_p_e_nasc(df_rates, tf.name)
p_e_estimate = estimation.read_p_e(p_e_path)
else:
with tempfile.NamedTemporaryFile() as tf:
p_e_path = estimation.estimate_p_e(df_counts, tf.name)
p_e_estimate = estimation.read_p_e(p_e_path)
else:
p_e_estimate = p_e
# p_c
if estimate_p_c:
df_aggregates = pd.DataFrame(df_counts.groupby(['TC', 'T'], sort=False, observed=True).size())
df_aggregates.columns = ['count']
df_aggregates.reset_index(inplace=True)
df_aggregates.rename(columns={'TC': 'conversion', 'T': 'base'}, inplace=True)
with tempfile.NamedTemporaryFile() as tf:
p_c_path = estimation.estimate_p_c(df_aggregates, p_e_estimate, tf.name, nasc=nasc)
p_c_estimate = estimation.read_p_c(p_c_path)
else:
p_c_estimate = p_c
# pi
if estimate_pi:
df_aggregates = pd.DataFrame(
df_counts[df_counts['GX'] == 'gene_0'].groupby(['TC', 'T'], sort=False, observed=True).size()
)
df_aggregates.columns = ['count']
df_aggregates.reset_index(inplace=True)
df_aggregates = df_aggregates[(df_aggregates[['T', 'count']] > 0).all(axis=1)]
vals = df_aggregates.values
guess = min(max((sum(vals[vals[:, 0] > 0][:, 2]) / sum(vals[:, 2])), 0.01), 0.99)
guess, alpha, beta, pi_estimate = estimation.pi.fit_stan_mcmc(
vals,
p_e_estimate,
p_c_estimate,
guess=guess,
model=model,
)
if nasc:
pi_estimate = estimation.pi.beta_mode(alpha, beta)
else:
guess, alpha, beta, pi_estimate = pi, None, None, pi
return p_e_estimate, p_c_estimate, guess, alpha, beta, pi_estimate
def _simulate(
p_e,
p_c,
pi,
sequence=None,
k=10000,
l=100, # noqa
n=100,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
seed=None,
model=None,
nasc=False,
):
model = model or _model
pis = pi
ns = n
if isinstance(pi, list) and not isinstance(n, list):
pis = pi
ns = [n] * len(pis)
elif not isinstance(pi, list) and isinstance(n, list):
ns = n
pis = [pi] * len(ns)
elif not isinstance(pi, list) and not isinstance(n, list):
ns = [n]
pis = [pi]
assert len(pis) == len(ns)
dfs = []
for i, (pi, n) in enumerate(zip(pis, ns)):
sequence = sequence or generate_sequence(k, seed=seed)
df_counts = simulate_reads(sequence, p_e, p_c, pi, l=l, n=n, seed=seed)
df_counts['GX'] = f'gene_{i}'
dfs.append(df_counts)
df_counts = pd.concat(dfs, ignore_index=True)
return estimate(
df_counts,
p_e,
p_c,
pis[0],
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
model=model,
nasc=nasc,
)
def simulate(
p_e,
p_c,
pi,
sequence=None,
k=10000,
l=100, # noqa
n=100,
n_runs=16,
n_threads=8,
estimate_p_e=False,
estimate_p_c=False,
estimate_pi=True,
model=None,
nasc=False,
):
model = model or pystan.StanModel(file=config.MODEL_PATH, model_name=config.MODEL_NAME)
p_es = []
p_cs = []
guesses = []
alphas = []
betas = []
pis = []
with ProcessPoolExecutor(max_workers=n_threads, initializer=initializer, initargs=(model,)) as executor:
futures = [
executor.submit(
_simulate,
p_e,
p_c,
pi,
sequence=sequence,
k=k,
l=l,
n=n,
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
nasc=nasc,
) for _ in range(n_runs)
]
for future in as_completed(futures):
p_e_estimate, p_c_estimate, guess, alpha_estimate, beta_estimate, pi_estimate = future.result()
p_es.append(p_e_estimate)
p_cs.append(p_c_estimate)
guesses.append(guess)
alphas.append(alpha_estimate)
betas.append(beta_estimate)
pis.append(pi_estimate)
return p_es, p_cs, guesses, alphas, betas, pis
def simulate_batch(
p_e,
p_c,
pi,
l, # noqa
n,
estimate_p_e,
estimate_p_c,
estimate_pi,
n_runs,
n_threads,
model,
nasc=False
):
"""Helper function to run simulations in batches.
"""
p_es = p_e
p_cs = p_c
pis = pi
if not isinstance(p_e, list):
p_es = [p_e]
if not isinstance(p_c, list):
p_cs = [p_c]
if not isinstance(pi, list):
pis = [pi]
dfs = []
for p_e, p_c, pi in tqdm(list(product(p_es, p_cs, pis))):
p_e_estimates, p_c_estimates, guesses, alphas, betas, pi_estimates = simulate(
p_e,
p_c,
pi,
l=l,
n=n,
estimate_p_e=estimate_p_e,
estimate_p_c=estimate_p_c,
estimate_pi=estimate_pi,
n_runs=n_runs,
n_threads=n_threads,
model=model,
nasc=nasc,
)
dfs.append(
pd.DataFrame({
'p_e': p_e,
'p_c': p_c,
'pi': pi[0] if isinstance(pi, list) else pi,
'p_e_estimate': p_e_estimates,
'p_c_estimate': p_c_estimates,
'guess': guesses,
'alpha_estimate': alphas,
'beta_estimate': betas,
'pi_estimate': pi_estimates
})
)
return pd.concat(dfs, ignore_index=True)
def plot_estimations(
X, Y, n_runs, means, truth, ax=None, box=True, tick_decimals=1, title=None, xlabel=None, ylabel=None
):
if ax is not None:
_ax = ax
else:
fig, _ax = plt.subplots(figsize=(5, 5), tight_layout=True)
if box:
X_range = max(X) - min(X)
_ax.boxplot(
list(np.array(Y).reshape(-1, n_runs)),
positions=np.sort(np.unique(X)),
zorder=-1,
widths=X_range * 0.05,
medianprops=dict(c='gray', linewidth=1.5),
boxprops=dict(facecolor='lightgray', color='gray', linewidth=1.5),
whiskerprops=dict(c='gray', linewidth=1.5),
capprops=dict(c='gray', linewidth=1.5),
patch_artist=True,
showfliers=False,
)
_ax.scatter(X, Y, s=3, label=f'n={n_runs}')
_ax.scatter(means.index, means.values, s=15, label='mean')
try:
iter(truth)
_ax.plot(truth, truth, c='red', linewidth=1, label='truth')
except: # noqa
_ax.plot([min(X), max(X)], [truth, truth], c='red', linewidth=1, label='truth')
if box:
_ax.set_xlim(left=min(X) - X_range * 0.1, right=max(X) + X_range * 0.1)
xticks = np.sort(np.unique(X))
_ax.set_xticks(xticks)
_ax.set_xticklabels([f'{round(x, tick_decimals)}' for x in xticks])
_ax.legend()
if title:
_ax.set_title(title)
if xlabel:
_ax.set_xlabel(xlabel)
if ylabel:
_ax.set_ylabel(ylabel)
if ax is None:
fig.show()
return _ax | 0.738103 | 0.379206 |
import pandas as pd
def data_clean():
'''data_clean() 函数用于数据清洁,大致步骤如下:
1. 统一设置国家代码为新索引
2. 去掉多余的数据列
3. 将不规范空值替换为 NaN,并进行填充
'''
# 读取数据文件
df_data = pd.read_excel("ClimateChange.xlsx", sheetname='Data')
df_country = pd.read_excel("ClimateChange.xlsx", sheetname='Country')
# 处理 data 数据表
# 选取 EN.ATM.CO2E.KT 数据,并将国家代码设置为索引
df_data_reindex = df_data[df_data['Series code']== 'EN.ATM.CO2E.KT'].set_index('Country code')
# 剔除不必要的数据列
df_data_drop = df_data_reindex.drop(labels=['Country name', 'Series code', 'Series name', 'SCALE', 'Decimals'], axis=1)
# 将原数据集中不规范的空值替换为 NaN 方便填充
df_data_nan = df_data_drop.replace({'..': pd.np.NaN})
# 对 NaN 空值进行向前和向后填充
df_data_fill = df_data_nan.fillna(method='ffill', axis=1).fillna(method='bfill', axis=1)
# 对填充后依旧全部为空值的数据行进行剔除
df_data_dropna = df_data_fill.dropna(how='all')
# 处理 Country 数据表
# 将国家代码设置为索引
df_country_reindex = pd.DataFrame(df_country).set_index('Country code')
# 剔除不必要的数据列
df_country_drop = df_country_reindex.drop(labels=['Capital city', 'Region', 'Lending category'], axis=1)
# 合并数据表
# 对 Data 和 Country 表按照索引进行合并
df_combine = pd.concat([df_data_dropna, df_country_drop], axis=1)
# 对合并后数据集进行求和得到各国排放总量
df_combine['Sum emissions'] = df_combine[list(df_combine)[:-2]].sum(axis=1)
# 对合并后存在空值的数据行进行剔除,得到清洁后的数据集
df_clean = df_combine.dropna(thresh=10)
return df_clean
def co2():
'''co2() 函数用于数据统计,大致步骤如下:
1. 使用 groupby 按题目规则求和
2. 对数据进行排序并得到目标 DataFrame
'''
# 读取清洁后数据
df_clean = data_clean()
# 按收入群体对数据进行求和
sum_by_groups = df_clean.groupby('Income group')['Sum emissions'].sum()
# 按要求整理 DataFrame
item_high_list = []
item_low_list = []
for group_name in list(sum_by_groups.index):
# 得到各收入群体最高排放量数据
item_high = df_clean[df_clean['Income group'] == group_name].sort_values(by='Sum emissions', ascending=False).iloc[0]
# 将最高排放量数据存入相应列表方便生成最终 DataFrame
item_high_list.append((item_high['Income group'], item_high['Country name'], item_high['Sum emissions']))
# 得到各收入群体最低排放量数据
item_low = df_clean[df_clean['Income group'] == group_name].sort_values(by='Sum emissions').iloc[0]
# 将最低排放量数据存入相应列表方便生成最终 DataFrame
item_low_list.append((item_low['Income group'], item_low['Country name'], item_low['Sum emissions']))
# 设置 DataFrame 标签
high_labels = ['Income group', 'Highest emission country', 'Highest emissions']
low_labels = ['Income group', 'Lowest emission country', 'Lowest emissions']
# 生成并合并目标 DataFrame
highest_df = pd.DataFrame.from_records(item_high_list, columns=high_labels).set_index('Income group')
lowest_df = pd.DataFrame.from_records(item_low_list, columns=low_labels).set_index('Income group')
results = pd.concat([sum_by_groups, highest_df, lowest_df], axis=1)
return results | Answers/week2-challenge-04/carbon_dioxide.py | import pandas as pd
def data_clean():
'''data_clean() 函数用于数据清洁,大致步骤如下:
1. 统一设置国家代码为新索引
2. 去掉多余的数据列
3. 将不规范空值替换为 NaN,并进行填充
'''
# 读取数据文件
df_data = pd.read_excel("ClimateChange.xlsx", sheetname='Data')
df_country = pd.read_excel("ClimateChange.xlsx", sheetname='Country')
# 处理 data 数据表
# 选取 EN.ATM.CO2E.KT 数据,并将国家代码设置为索引
df_data_reindex = df_data[df_data['Series code']== 'EN.ATM.CO2E.KT'].set_index('Country code')
# 剔除不必要的数据列
df_data_drop = df_data_reindex.drop(labels=['Country name', 'Series code', 'Series name', 'SCALE', 'Decimals'], axis=1)
# 将原数据集中不规范的空值替换为 NaN 方便填充
df_data_nan = df_data_drop.replace({'..': pd.np.NaN})
# 对 NaN 空值进行向前和向后填充
df_data_fill = df_data_nan.fillna(method='ffill', axis=1).fillna(method='bfill', axis=1)
# 对填充后依旧全部为空值的数据行进行剔除
df_data_dropna = df_data_fill.dropna(how='all')
# 处理 Country 数据表
# 将国家代码设置为索引
df_country_reindex = pd.DataFrame(df_country).set_index('Country code')
# 剔除不必要的数据列
df_country_drop = df_country_reindex.drop(labels=['Capital city', 'Region', 'Lending category'], axis=1)
# 合并数据表
# 对 Data 和 Country 表按照索引进行合并
df_combine = pd.concat([df_data_dropna, df_country_drop], axis=1)
# 对合并后数据集进行求和得到各国排放总量
df_combine['Sum emissions'] = df_combine[list(df_combine)[:-2]].sum(axis=1)
# 对合并后存在空值的数据行进行剔除,得到清洁后的数据集
df_clean = df_combine.dropna(thresh=10)
return df_clean
def co2():
'''co2() 函数用于数据统计,大致步骤如下:
1. 使用 groupby 按题目规则求和
2. 对数据进行排序并得到目标 DataFrame
'''
# 读取清洁后数据
df_clean = data_clean()
# 按收入群体对数据进行求和
sum_by_groups = df_clean.groupby('Income group')['Sum emissions'].sum()
# 按要求整理 DataFrame
item_high_list = []
item_low_list = []
for group_name in list(sum_by_groups.index):
# 得到各收入群体最高排放量数据
item_high = df_clean[df_clean['Income group'] == group_name].sort_values(by='Sum emissions', ascending=False).iloc[0]
# 将最高排放量数据存入相应列表方便生成最终 DataFrame
item_high_list.append((item_high['Income group'], item_high['Country name'], item_high['Sum emissions']))
# 得到各收入群体最低排放量数据
item_low = df_clean[df_clean['Income group'] == group_name].sort_values(by='Sum emissions').iloc[0]
# 将最低排放量数据存入相应列表方便生成最终 DataFrame
item_low_list.append((item_low['Income group'], item_low['Country name'], item_low['Sum emissions']))
# 设置 DataFrame 标签
high_labels = ['Income group', 'Highest emission country', 'Highest emissions']
low_labels = ['Income group', 'Lowest emission country', 'Lowest emissions']
# 生成并合并目标 DataFrame
highest_df = pd.DataFrame.from_records(item_high_list, columns=high_labels).set_index('Income group')
lowest_df = pd.DataFrame.from_records(item_low_list, columns=low_labels).set_index('Income group')
results = pd.concat([sum_by_groups, highest_df, lowest_df], axis=1)
return results | 0.218669 | 0.459015 |
import numpy as np
from scipy.optimize import minimize, brute, fmin
from .confidence import (parametric_bootstrap, nonparametric_bootstrap,
delta, contour_walk, increase_bounds,
HomogeneousResult)
from .plotting import plot_probability as pp, plot_confidence_region as pcr
import copy
from .utils import (custom_log, _round, check_bounds, check_diff,
check_success, check_fail)
class Neyer():
"""
The Neyer model. Given an assumed form for the latent distribution,
either 'normal', 'logistic', or 'log-logistic', the maximum likelihood
estimates of the distribution parameters are computed. Neyer also provides
a sequential design algorithm.
Parameters
----------
latent : string, optional
DESCRIPTION. The form of the latent distribution. Either 'normal',
'logistic', or 'log-logistic'. The default is 'normal'.
inverted : boolean, optional
DESCRIPTION. If the probability of a 'go' increases as the stimulus
level decreases, then the data is 'inverted'. The default is False.
method : string, optional
DESCRIPTION. Name of the optimization routine called when computing
the maximum likelihood estimates. The default is 'L-BFGS-B'.
num_restarts : int, optional
DESCRIPTION. The number of random initializations to use when
maximizing the likelihood function. Note, the available latent
distributions only use two parameters. Consequently, the resulting
likelihood function is typically convex. The default is 3.
t1_min : flaot, optional
DESCRIPTION. When using the sequential design algorithm and starting
with no (or minimal) data, an intial guess on the lower bound of
the first parameter, theta_1, is required. For the normal and
logistic distributions theta_1 is mu. For the log-logistic distribution
theta_1 is alpoha. If None is provided and the sequential algorithm
is called, the program will prompt the user for the value.
The default is None.
t1_max : float, optional
DESCRIPTION. The initial guess for the upper bound of theta_1.
See t1_min for more details. The default is None.
t2_guess : float, optional
DESCRIPTION. The initial guess for theta_2. Required when using the
sequential design algorithm. See t1_min for more details. For the
normal and logisit distributions, theta_2 is sigma. For the log-logistic
distribution, theta_2 is beta. The default is None.
precision : int, optional
DESCRIPTION. Number of decimal points to incude in the final
output. The default is 8.
resolution : float, optional
DESCRIPTION. The smallest change in stimulus level available. For
example, a drop-weight apparatus may only have adjustments at
quarter inch intervals. Thus, the algorithm should not suggest testing
at 12.105 inches, etc. The default is None.
lower_bound : float, optional
DESCRIPTION. The lowest stimulus level a user can phsically test.
The default is None.
upper_bound : float, optional
DESCRIPTION. The highest stimulus level a user can phsically test.
The default is None.
hist : boolean, optional
DESCRIPTION. If True the determinant of the information matrix is
computed over a range of stimulus levels at each stage of the
sequential design. Typically used for debugging only!
The default is False.
log_file : str, optional
DESCRIPTION. File path for a log file. The log consists of the
steps taken during the sequential design algorithm.
The default is None.
"""
available_opt_methods = ('L-BFGS-B', 'SLSQP', 'TNC')
def __init__(self, latent='normal', inverted=False,
method='L-BFGS-B', num_restarts=3,
t1_min=None, t1_max=None, t2_guess=None,
precision=8, resolution=None,
lower_bound=None, upper_bound=None,
hist=False, log_file=None):
self.inverted = inverted
self.theta = None
self.latent = latent
self.method = method
self.num_restarts = num_restarts
if self.num_restarts < 1:
print('Number of restarts must be greater than or eqaul to 1.')
print('Defaulting to 3.')
self.num_restarts = 3
if self.method not in self.available_opt_methods:
print("""method '{}' not understood.
Defaulting to L-BFGS-B.
Please choose from {}""".format(self.method,
self.available_opt_methods))
self.method = 'L-BFGS-B'
if latent == 'normal':
from .norm_funcs import function_dictionary
elif latent == 'logistic':
from .logistic_funcs import function_dictionary
elif latent == 'log-logistic':
from .log_logistic_funcs import function_dictionary
else:
raise ValueError("""Value for "latent" not understood.
Must be "normal", "logistic", or "log-logistic".""")
self.pred = function_dictionary['pred']
self.opt_config = function_dictionary['opt_config']
self.cost_func = function_dictionary['cost']
self.cost_deriv = function_dictionary['cost_deriv']
self.est_names = function_dictionary['estimate_names']
self.Hessian = function_dictionary['Hessian']
self.cdf_deriv = function_dictionary['cdf_deriv']
self.info = function_dictionary['info']
self.precision = precision
self.start = True
self.binary = True
self.overlap = True
self.mle = True
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.hist = hist
if isinstance(log_file, str):
self.log_file = log_file
file_obj = open(log_file, 'w')
file_obj.close()
if resolution != None:
self.resolution = resolution
if self.hist == True:
self.det_vals = []
self.det_res = []
self.x_pts = []
self.t1_min = t1_min
self.t1_max = t1_max
self.t2_guess = t2_guess
self.X = np.asarray([]).reshape((-1,1))
self.Y = np.asarray([]).reshape((-1,1))
self.theta = np.array([np.nan, np.nan])
self.observed_info = np.empty((2,2))
self.updated = -1
def fit(self, X, Y):
"""
Compute the maximum likelihood estimates of the distribution parameters.
Parameters
----------
X : 2D array
The tested stimulus levels. Must be of shape (n_pts, 1)
Y : array
The observed response at each stimulus level. 1 for 'go' and 0
for 'no-go'.
Returns
-------
self
"""
if X.ndim != 2:
raise ValueError("X must be of shape [n_examples, 1]")
if X.shape[0] != Y.shape[0]:
raise ValueError("""input and output must have the same number of rows!
shapes {} and {} do not match.""".format(X.shape, Y.shape))
Y = Y.reshape((-1,1))
self.Y = Y.copy()
self.X = X
if self.inverted:
Y = np.logical_not(Y).astype(int)
if check_success(Y) or check_fail(Y):
raise HomogeneousResult('Need to have positive AND negative responses present in the data in order to call fit.')
thetas = []
costs = []
t1_low, t1_high, t2_low, t2_high, bounds = self.opt_config(self.X)
for i in range(self.num_restarts):
theta_0 = [np.random.uniform(t1_low, t1_high),
np.random.uniform(t2_low, t2_high)]
theta_0 = np.array(theta_0)
res = minimize(self.cost_func, theta_0,
args = (self.X, Y),
method=self.method,
jac=self.cost_deriv,
bounds=bounds)
thetas.append(res.x)
costs.append(res.fun)
thetas = np.asarray(thetas)
costs = np.asarray(costs)
best_run = np.argmin(costs)
self.theta = thetas[best_run]
self.cost = costs[best_run]
return self
def get_estimators(self):
"""
Provides access to the stored estimate of theta. For example,
[mu, sigma] or [alpha, beta].
Returns
-------
array
Current parameter estimates. Shape is (2,)
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
return self.theta
else:
raise Exception('Model not yet trained!')
def print_estimators(self, cost=False):
"""
Prints the current parameter estimates to the console.
Parameters
----------
cost : boolean, optional
If true, the value of the negative log-likelihood, or cost, at the
current parameter estimates is also printed to the console.
The default is False.
Returns
-------
None.
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
t1n, t2n = self.est_names()
t1, t2 = self.theta
print('{}: {}\n{}: {}'.format(t1n, t1, t2n, t2))
if cost:
print('cost: {}'.format(self.cost))
else:
raise Exception('Model not yet trained!')
def predict_probability(self, pts=None, confidence=None,
CI_level = [.5, .8, .9, .95],
num_samples=1000, max_iter=5):
"""
Returns the probability of a 'go' at pts. p(y=0|pt)
Parameters
----------
pts : array, optional
The stimulus levels at which to compute probability predictions.
The default is None. If None, range = max(X) - min(X) and
pts = np.linspace(min(X)-0.5*range, max(X)+0.5*range, 100)
confidence : str, optional
The name of the method used to supply confidence intervals.
Options are 'delta', 'perturbation' (same as delta), 'likelihood-ratio',
'parametric-bootstrap', and 'nonparametric-bootstrap'.
The default is None.
CI_level : list, optional
The confidence levels. Ignored if confidence is None.
The default is [.5, .8, .9, .95].
num_samples : int, optional
The number of bootstrapped samples generated. Only used if
confidence = 'parametric-bootstrap' or 'nonparametric=bootstrap'.
The default is 1000.
max_iter : int, optional
The maximum number of attempts to map the likelihood ratio.
Only used if confidence = 'likelihood-ratio'. The default is 5.
Returns
-------
tuple
Consists of the stimulus points, the predicted probability, and
arrays of the lower bounds and upper bounds of the confidence levels
if confidence was requested.
(pts (n_pts, 1), predicted probability (n_pts, 1)) or
(pts (n_pts, 1), predicted probability (n_pts, 1), lower CI bounds, upper CI bounds)
where the shape of lower and upper CI bounds is (n_pts, n_levels)
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
if pts is None:
xmin = np.min(self.X)
xmax = np.max(self.X)
xint = xmax-xmin
xstart = xmin - xint*.05
xend = xmax + xint*.05
pts = np.linspace(xstart, xend, 100)
pts = np.array(pts).reshape((-1,1))
p = self.pred(pts, self.theta, self.inverted)
if confidence is None:
return pts, p
elif confidence == 'parametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = parametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'nonparametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = nonparametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'likelihood-ratio':
new_bounds = increase_bounds(self.opt_config(self.X),
'both', 'both')
lb, ub = contour_walk(self, pts, new_bounds, [100],
CI_level, max_iter)
return pts, p, lb, ub
elif confidence == 'delta' or confidence == 'perturbation':
lb, ub = delta(self,
pts,
num_samples,
CI_level, p)
return pts, p, lb, ub
else:
ci_methods = [None, 'parametric-bootstrap',
'nonparametric-bootstrap', 'likelihood-ratio',
'delta', 'perturbation']
raise ValueError("confidence '{}' not understood.\nPlease choose from {}".format(confidence, ci_methods))
def plot_probability(self, include_data=True, xlabel=None, ylabel=None,
alpha=1.0, save_dst=None, show=True, **kwargs):
"""
A high-level method to call self.predict_probability and plot the result.
Parameters
----------
include_data : boolean, optional
Whether or not to plot the data (stimuli and responses).
The default is True.
xlabel : str, optional
If provided, the text for the plot xlabel. The default is None.
ylabel : str, optional
If provided, the text for the plot ylabel. The default is None.
alpha : float, optional
opacity of the observed data points. Must be between 0 and 1.
Only used if include_data is True. Useful to visualize many overlapping
data points. The default is 1.0.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None.
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
**kwargs :
All keyworkd arguments provided to predict_probability can also be
provided here.
Returns
-------
None.
"""
pp(self, include_data, xlabel, ylabel,
alpha, save_dst, show, **kwargs)
def plot_confidence_region(self, limits, n, CI_levels=10,
save_dst=None, show=True):
"""
A high-level function to plot the confidence region of the parameters.
Parameters
----------
limits : list
The plot limits provided as [lower xlim, upper xlim, lower ylim, upper ylim].
n : int or list of length 2
The number locations to sample in the x (theta_1) and y (theta_2) directions.
CI_levels : int or list, optional
If an integer, a filled contour plot will be produced with that
many levels. If it is a list, the list values specify the confidence
levels at which to draw contour lines. The default is 10.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
Returns
-------
None.
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
pcr(self, limits, n, CI_levels, save_dst, show)
def __prompt_input(self):
"""
If the sequential design algorithm is used and if there is 1) insufficent
data or 2) t1_min, t1_max, and t2_guess were not specifed, then prompt
the user for those values. Used internally. Should not be called.
Returns
-------
None.
"""
t1n, t2n = self.est_names()
self.t1_min = float(input('Lower bound guess for {}: '.format(t1n)))
self.t1_max = float(input('Upper bound guess for {}: '.format(t1n)))
self.t2_guess = float(input('Initial guess for {}: '.format(t2n)))
def __max_info(self, theta):
def det(level):
X_test = np.vstack((self.X, level))
info = self.info(X_test, theta[0], theta[1])
return -1*(info[0][0] * info[1][1] - info[0][1] * info[1][0])
ranges = self.max_s - self.min_s
if self.lower_bound == None and self.upper_bound == None:
res = brute(det, ((self.min_s - .5*ranges, self.max_s + .5*ranges),),
Ns=100, finish=fmin)
else:
if self.lower_bound == None:
lb = self.min_s - ranges
else: lb = self.lower_bound
if self.upper_bound == None:
ub = self.min_s + ranges
else: ub = self.upper_bound
res = brute(det, ((lb, ub),),
Ns=100, finish=fmin)
if self.hist:
if self.lower_bound == None:
x_pts = np.linspace(self.min_s - 2.5*ranges,
self.max_s + 2.5*ranges,
500)
else:
x_pts = np.linspace(self.lower_bound - .1 * ranges,
self.upper_bound + .1 * ranges,
500)
self.x_pts.append(x_pts)
d_res = []
for i in x_pts:
d_res.append(-1*det(np.asarray(i)))
self.det_vals.append(d_res)
self.det_res.append(float(res))
return float(res)
def __check_initial_theta(self):
if self.t1_max <= self.t1_min:
raise ValueError('t1_max cannot be less than t1_min!')
elif self.t2_guess <= 0:
raise ValueError('t2_guess must be positive!')
def next_pt(self):
"""
The sequential design algorithm. When this method is called, the next
suggested stimulus level for testing is printed to the console.
Returns
-------
self
"""
Y = self.Y.copy().astype(bool)
if self.inverted:
Y = np.logical_not(Y)
if self.start:
self.start = False
if self.X.size == 0:
custom_log(self, 'Starting Sequential Algorithm with No Data', True)
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
self.__prompt_input()
self.__check_initial_theta()
self.nx = _round(self, (self.t1_min + self.t1_max) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
self.updated = 0
return self.nx
else:
diff = check_diff(self.X, self.Y, self.inverted)
if diff > 0:
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
print("""Even though data has been provided, overlap has not been achieved.
In this case it is necessary to provide parameters for t1_min, t1_max, and t2_guess.
""")
self.__prompt_input()
self.__check_initial_theta()
return self.next_pt()
else:
self.binary = False
self.overlap = False
return self.next_pt()
else:
if self.X.size > self.updated:
self.updated = self.X.size
else:
return self.nx
if self.binary:
self.max_s = np.max(self.X)
self.min_s = np.min(self.X)
custom_log(self, 'In Binary Search Section', True)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
# all success case
if Y.size == np.sum(Y):
custom_log(self, 'In All Success Section', True)
t1 = (self.t1_min + self.min_s) / 2.
t2 = self.min_s - 2. * self.t2_guess
t3 = 2. * self.min_s - self.max_s
self.nx = _round(self, min(t1, t2, t2))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
# all failure case
if np.sum(Y) == 0:
custom_log(self, 'In All Failure Section', True)
t1 = (self.t1_max + self.max_s) / 2.
t2 = self.max_s + 2. * self.t2_guess
t3 = 2. * self.max_s - self.min_s
self.nx = _round(self, max(t1, t2, t3))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
self.nx = _round(self, (self.max_no + self.min_go) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
else:
self.binary = False
if self.overlap:
custom_log(self, 'In Overlap Search Section', True)
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
custom_log(self, 'Reverting Back to Binary Search', True)
self.binary = True
self.updated = -1
return self.next_pt()
if self.diff < 0:
custom_log(self, '--- Overlap Achieved! ---', True)
self.overlap = False
else:
self.theta[0] = (self.max_no + self.min_go) / 2.
self.theta[1] = self.t2_guess
custom_log(self, 'Maximize Determinate With...')
t1n, t2n = self.est_names()
custom_log(self, '{}: {}'.format(t1n, self.theta[0]))
custom_log(self, '{}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
self.t2_guess *= 0.8
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
if self.mle:
custom_log(self, 'In Maximum Liklihood Section', True)
self.max_s = max(self.X)
self.min_s = min(self.X)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
self.fit(self.X, self.Y)
t1n, t2n = self.est_names()
custom_log(self, 'Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Estimated {}: {}'.format(t2n, self.theta[1]))
self.theta[0] = max(self.min_s, min(self.theta[0], self.max_s))
self.theta[1] = min(self.theta[1], self.max_s - self.min_s)
custom_log(self, 'Bounded Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Bounded Estimated {}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
def post_test_outcome(self, res, pt):
"""
Append a stimulus level and result to the existing data.
Parameters
----------
res : int or boolean
The observed result at the tested stimulus level. Either 0, 1 or
False, True.
pt : float
The stimulus level at which the test was performed.
Returns
-------
None.
"""
if isinstance(res, bool) or (res == 0) or (res == 1):
self.X = np.vstack((self.X, pt))
custom_log(self, 'Tested Points: \n {}'.format(self.X.flatten()))
self.Y = np.vstack((self.Y, int(res)))
custom_log(self, 'Test Results: \n {}'.format(self.Y.flatten()))
else:
raise ValueError('Result must be \{0, 1\} or \{True, False\}!')
def loop(self, iterations=1000000):
"""
This method suggests new test levels and accepts user input to calculate
maximum likelihood estimates. That is, this method constitutes a loop.
Loop will continue indefinitely until 'end' is received as user input
during the either the test level or result input queries. Alternatively,
if a set number of specimens is to be used then the number of loops can
be specified with the 'iterations' keyword argument.
Parameters
----------
iterations : int, optional
End the loop automatically after n iterations. The default is 1000000.
Returns
-------
None.
"""
print('-'*50)
print("""If the level at which the test is performed is the same as the
suggested level, then the user can simply press enter (no need for input)
when queried about the test level.""")
print('\n')
print("""When the user does not wish to test any more levels,
input "end" (without quotes) when queried abou the next test.""")
print('-'*50)
print('\n')
for _ in range(iterations):
nx = self.next_pt()
print('Specimen number: {}'.format(self.X.size + 1))
print('The next suggested test point is: {}'.format(nx))
pt = input('Please input the level at which the test was performed: ')
pt = "".join(pt.split()).lower()
if pt == 'end':
break
elif pt == '':
pt = nx
else:
try:
pt = float(pt)
except:
print("Input level '{}' not understood. Try again. Type 'end' to terminate loop.".format(pt))
continue
res = input('Please input the result: ')
res = "".join(res.split()).lower()
print('\n')
if res == 'true' or res == '1':
self.post_test_outcome(1, pt)
elif res == 'false' or res == '0':
self.post_test_outcome(0, pt)
elif res == '':
pass
elif res == 'end':
break
else:
print("Result value '{}' not understood. Input must be 0 or False for a negative response and 1 or True for a positive response. Boolean inputs are not case sensitive. Try again. Type 'end' during input query to terminate loop.".format(res)) | senpy/neyer.py |
import numpy as np
from scipy.optimize import minimize, brute, fmin
from .confidence import (parametric_bootstrap, nonparametric_bootstrap,
delta, contour_walk, increase_bounds,
HomogeneousResult)
from .plotting import plot_probability as pp, plot_confidence_region as pcr
import copy
from .utils import (custom_log, _round, check_bounds, check_diff,
check_success, check_fail)
class Neyer():
"""
The Neyer model. Given an assumed form for the latent distribution,
either 'normal', 'logistic', or 'log-logistic', the maximum likelihood
estimates of the distribution parameters are computed. Neyer also provides
a sequential design algorithm.
Parameters
----------
latent : string, optional
DESCRIPTION. The form of the latent distribution. Either 'normal',
'logistic', or 'log-logistic'. The default is 'normal'.
inverted : boolean, optional
DESCRIPTION. If the probability of a 'go' increases as the stimulus
level decreases, then the data is 'inverted'. The default is False.
method : string, optional
DESCRIPTION. Name of the optimization routine called when computing
the maximum likelihood estimates. The default is 'L-BFGS-B'.
num_restarts : int, optional
DESCRIPTION. The number of random initializations to use when
maximizing the likelihood function. Note, the available latent
distributions only use two parameters. Consequently, the resulting
likelihood function is typically convex. The default is 3.
t1_min : flaot, optional
DESCRIPTION. When using the sequential design algorithm and starting
with no (or minimal) data, an intial guess on the lower bound of
the first parameter, theta_1, is required. For the normal and
logistic distributions theta_1 is mu. For the log-logistic distribution
theta_1 is alpoha. If None is provided and the sequential algorithm
is called, the program will prompt the user for the value.
The default is None.
t1_max : float, optional
DESCRIPTION. The initial guess for the upper bound of theta_1.
See t1_min for more details. The default is None.
t2_guess : float, optional
DESCRIPTION. The initial guess for theta_2. Required when using the
sequential design algorithm. See t1_min for more details. For the
normal and logisit distributions, theta_2 is sigma. For the log-logistic
distribution, theta_2 is beta. The default is None.
precision : int, optional
DESCRIPTION. Number of decimal points to incude in the final
output. The default is 8.
resolution : float, optional
DESCRIPTION. The smallest change in stimulus level available. For
example, a drop-weight apparatus may only have adjustments at
quarter inch intervals. Thus, the algorithm should not suggest testing
at 12.105 inches, etc. The default is None.
lower_bound : float, optional
DESCRIPTION. The lowest stimulus level a user can phsically test.
The default is None.
upper_bound : float, optional
DESCRIPTION. The highest stimulus level a user can phsically test.
The default is None.
hist : boolean, optional
DESCRIPTION. If True the determinant of the information matrix is
computed over a range of stimulus levels at each stage of the
sequential design. Typically used for debugging only!
The default is False.
log_file : str, optional
DESCRIPTION. File path for a log file. The log consists of the
steps taken during the sequential design algorithm.
The default is None.
"""
available_opt_methods = ('L-BFGS-B', 'SLSQP', 'TNC')
def __init__(self, latent='normal', inverted=False,
method='L-BFGS-B', num_restarts=3,
t1_min=None, t1_max=None, t2_guess=None,
precision=8, resolution=None,
lower_bound=None, upper_bound=None,
hist=False, log_file=None):
self.inverted = inverted
self.theta = None
self.latent = latent
self.method = method
self.num_restarts = num_restarts
if self.num_restarts < 1:
print('Number of restarts must be greater than or eqaul to 1.')
print('Defaulting to 3.')
self.num_restarts = 3
if self.method not in self.available_opt_methods:
print("""method '{}' not understood.
Defaulting to L-BFGS-B.
Please choose from {}""".format(self.method,
self.available_opt_methods))
self.method = 'L-BFGS-B'
if latent == 'normal':
from .norm_funcs import function_dictionary
elif latent == 'logistic':
from .logistic_funcs import function_dictionary
elif latent == 'log-logistic':
from .log_logistic_funcs import function_dictionary
else:
raise ValueError("""Value for "latent" not understood.
Must be "normal", "logistic", or "log-logistic".""")
self.pred = function_dictionary['pred']
self.opt_config = function_dictionary['opt_config']
self.cost_func = function_dictionary['cost']
self.cost_deriv = function_dictionary['cost_deriv']
self.est_names = function_dictionary['estimate_names']
self.Hessian = function_dictionary['Hessian']
self.cdf_deriv = function_dictionary['cdf_deriv']
self.info = function_dictionary['info']
self.precision = precision
self.start = True
self.binary = True
self.overlap = True
self.mle = True
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.hist = hist
if isinstance(log_file, str):
self.log_file = log_file
file_obj = open(log_file, 'w')
file_obj.close()
if resolution != None:
self.resolution = resolution
if self.hist == True:
self.det_vals = []
self.det_res = []
self.x_pts = []
self.t1_min = t1_min
self.t1_max = t1_max
self.t2_guess = t2_guess
self.X = np.asarray([]).reshape((-1,1))
self.Y = np.asarray([]).reshape((-1,1))
self.theta = np.array([np.nan, np.nan])
self.observed_info = np.empty((2,2))
self.updated = -1
def fit(self, X, Y):
"""
Compute the maximum likelihood estimates of the distribution parameters.
Parameters
----------
X : 2D array
The tested stimulus levels. Must be of shape (n_pts, 1)
Y : array
The observed response at each stimulus level. 1 for 'go' and 0
for 'no-go'.
Returns
-------
self
"""
if X.ndim != 2:
raise ValueError("X must be of shape [n_examples, 1]")
if X.shape[0] != Y.shape[0]:
raise ValueError("""input and output must have the same number of rows!
shapes {} and {} do not match.""".format(X.shape, Y.shape))
Y = Y.reshape((-1,1))
self.Y = Y.copy()
self.X = X
if self.inverted:
Y = np.logical_not(Y).astype(int)
if check_success(Y) or check_fail(Y):
raise HomogeneousResult('Need to have positive AND negative responses present in the data in order to call fit.')
thetas = []
costs = []
t1_low, t1_high, t2_low, t2_high, bounds = self.opt_config(self.X)
for i in range(self.num_restarts):
theta_0 = [np.random.uniform(t1_low, t1_high),
np.random.uniform(t2_low, t2_high)]
theta_0 = np.array(theta_0)
res = minimize(self.cost_func, theta_0,
args = (self.X, Y),
method=self.method,
jac=self.cost_deriv,
bounds=bounds)
thetas.append(res.x)
costs.append(res.fun)
thetas = np.asarray(thetas)
costs = np.asarray(costs)
best_run = np.argmin(costs)
self.theta = thetas[best_run]
self.cost = costs[best_run]
return self
def get_estimators(self):
"""
Provides access to the stored estimate of theta. For example,
[mu, sigma] or [alpha, beta].
Returns
-------
array
Current parameter estimates. Shape is (2,)
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
return self.theta
else:
raise Exception('Model not yet trained!')
def print_estimators(self, cost=False):
"""
Prints the current parameter estimates to the console.
Parameters
----------
cost : boolean, optional
If true, the value of the negative log-likelihood, or cost, at the
current parameter estimates is also printed to the console.
The default is False.
Returns
-------
None.
"""
if self.theta is not None:
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to estimate theta.')
t1n, t2n = self.est_names()
t1, t2 = self.theta
print('{}: {}\n{}: {}'.format(t1n, t1, t2n, t2))
if cost:
print('cost: {}'.format(self.cost))
else:
raise Exception('Model not yet trained!')
def predict_probability(self, pts=None, confidence=None,
CI_level = [.5, .8, .9, .95],
num_samples=1000, max_iter=5):
"""
Returns the probability of a 'go' at pts. p(y=0|pt)
Parameters
----------
pts : array, optional
The stimulus levels at which to compute probability predictions.
The default is None. If None, range = max(X) - min(X) and
pts = np.linspace(min(X)-0.5*range, max(X)+0.5*range, 100)
confidence : str, optional
The name of the method used to supply confidence intervals.
Options are 'delta', 'perturbation' (same as delta), 'likelihood-ratio',
'parametric-bootstrap', and 'nonparametric-bootstrap'.
The default is None.
CI_level : list, optional
The confidence levels. Ignored if confidence is None.
The default is [.5, .8, .9, .95].
num_samples : int, optional
The number of bootstrapped samples generated. Only used if
confidence = 'parametric-bootstrap' or 'nonparametric=bootstrap'.
The default is 1000.
max_iter : int, optional
The maximum number of attempts to map the likelihood ratio.
Only used if confidence = 'likelihood-ratio'. The default is 5.
Returns
-------
tuple
Consists of the stimulus points, the predicted probability, and
arrays of the lower bounds and upper bounds of the confidence levels
if confidence was requested.
(pts (n_pts, 1), predicted probability (n_pts, 1)) or
(pts (n_pts, 1), predicted probability (n_pts, 1), lower CI bounds, upper CI bounds)
where the shape of lower and upper CI bounds is (n_pts, n_levels)
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
if pts is None:
xmin = np.min(self.X)
xmax = np.max(self.X)
xint = xmax-xmin
xstart = xmin - xint*.05
xend = xmax + xint*.05
pts = np.linspace(xstart, xend, 100)
pts = np.array(pts).reshape((-1,1))
p = self.pred(pts, self.theta, self.inverted)
if confidence is None:
return pts, p
elif confidence == 'parametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = parametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'nonparametric-bootstrap':
current_model = copy.deepcopy(self)
lb, ub = nonparametric_bootstrap(current_model,
pts,
num_samples,
CI_level)
return pts, p, lb, ub
elif confidence == 'likelihood-ratio':
new_bounds = increase_bounds(self.opt_config(self.X),
'both', 'both')
lb, ub = contour_walk(self, pts, new_bounds, [100],
CI_level, max_iter)
return pts, p, lb, ub
elif confidence == 'delta' or confidence == 'perturbation':
lb, ub = delta(self,
pts,
num_samples,
CI_level, p)
return pts, p, lb, ub
else:
ci_methods = [None, 'parametric-bootstrap',
'nonparametric-bootstrap', 'likelihood-ratio',
'delta', 'perturbation']
raise ValueError("confidence '{}' not understood.\nPlease choose from {}".format(confidence, ci_methods))
def plot_probability(self, include_data=True, xlabel=None, ylabel=None,
alpha=1.0, save_dst=None, show=True, **kwargs):
"""
A high-level method to call self.predict_probability and plot the result.
Parameters
----------
include_data : boolean, optional
Whether or not to plot the data (stimuli and responses).
The default is True.
xlabel : str, optional
If provided, the text for the plot xlabel. The default is None.
ylabel : str, optional
If provided, the text for the plot ylabel. The default is None.
alpha : float, optional
opacity of the observed data points. Must be between 0 and 1.
Only used if include_data is True. Useful to visualize many overlapping
data points. The default is 1.0.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None.
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
**kwargs :
All keyworkd arguments provided to predict_probability can also be
provided here.
Returns
-------
None.
"""
pp(self, include_data, xlabel, ylabel,
alpha, save_dst, show, **kwargs)
def plot_confidence_region(self, limits, n, CI_levels=10,
save_dst=None, show=True):
"""
A high-level function to plot the confidence region of the parameters.
Parameters
----------
limits : list
The plot limits provided as [lower xlim, upper xlim, lower ylim, upper ylim].
n : int or list of length 2
The number locations to sample in the x (theta_1) and y (theta_2) directions.
CI_levels : int or list, optional
If an integer, a filled contour plot will be produced with that
many levels. If it is a list, the list values specify the confidence
levels at which to draw contour lines. The default is 10.
save_dst : str, optional
The file path (including file type) where the plot should be saved.
The default is None
show : boolean, optional
If True, simply calls matplotlib.plt.show(). May be required for
some IDEs. The default is True.
Returns
-------
None.
"""
if self.theta is None:
raise Exception('Model not yet trained!')
if check_diff(self.X, self.Y, self.inverted) > 0:
raise Exception('Not enough data to make a prediction.')
pcr(self, limits, n, CI_levels, save_dst, show)
def __prompt_input(self):
"""
If the sequential design algorithm is used and if there is 1) insufficent
data or 2) t1_min, t1_max, and t2_guess were not specifed, then prompt
the user for those values. Used internally. Should not be called.
Returns
-------
None.
"""
t1n, t2n = self.est_names()
self.t1_min = float(input('Lower bound guess for {}: '.format(t1n)))
self.t1_max = float(input('Upper bound guess for {}: '.format(t1n)))
self.t2_guess = float(input('Initial guess for {}: '.format(t2n)))
def __max_info(self, theta):
def det(level):
X_test = np.vstack((self.X, level))
info = self.info(X_test, theta[0], theta[1])
return -1*(info[0][0] * info[1][1] - info[0][1] * info[1][0])
ranges = self.max_s - self.min_s
if self.lower_bound == None and self.upper_bound == None:
res = brute(det, ((self.min_s - .5*ranges, self.max_s + .5*ranges),),
Ns=100, finish=fmin)
else:
if self.lower_bound == None:
lb = self.min_s - ranges
else: lb = self.lower_bound
if self.upper_bound == None:
ub = self.min_s + ranges
else: ub = self.upper_bound
res = brute(det, ((lb, ub),),
Ns=100, finish=fmin)
if self.hist:
if self.lower_bound == None:
x_pts = np.linspace(self.min_s - 2.5*ranges,
self.max_s + 2.5*ranges,
500)
else:
x_pts = np.linspace(self.lower_bound - .1 * ranges,
self.upper_bound + .1 * ranges,
500)
self.x_pts.append(x_pts)
d_res = []
for i in x_pts:
d_res.append(-1*det(np.asarray(i)))
self.det_vals.append(d_res)
self.det_res.append(float(res))
return float(res)
def __check_initial_theta(self):
if self.t1_max <= self.t1_min:
raise ValueError('t1_max cannot be less than t1_min!')
elif self.t2_guess <= 0:
raise ValueError('t2_guess must be positive!')
def next_pt(self):
"""
The sequential design algorithm. When this method is called, the next
suggested stimulus level for testing is printed to the console.
Returns
-------
self
"""
Y = self.Y.copy().astype(bool)
if self.inverted:
Y = np.logical_not(Y)
if self.start:
self.start = False
if self.X.size == 0:
custom_log(self, 'Starting Sequential Algorithm with No Data', True)
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
self.__prompt_input()
self.__check_initial_theta()
self.nx = _round(self, (self.t1_min + self.t1_max) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
self.updated = 0
return self.nx
else:
diff = check_diff(self.X, self.Y, self.inverted)
if diff > 0:
if (self.t1_min == None) or (self.t1_max == None) or (self.t2_guess == None):
print("""Even though data has been provided, overlap has not been achieved.
In this case it is necessary to provide parameters for t1_min, t1_max, and t2_guess.
""")
self.__prompt_input()
self.__check_initial_theta()
return self.next_pt()
else:
self.binary = False
self.overlap = False
return self.next_pt()
else:
if self.X.size > self.updated:
self.updated = self.X.size
else:
return self.nx
if self.binary:
self.max_s = np.max(self.X)
self.min_s = np.min(self.X)
custom_log(self, 'In Binary Search Section', True)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
# all success case
if Y.size == np.sum(Y):
custom_log(self, 'In All Success Section', True)
t1 = (self.t1_min + self.min_s) / 2.
t2 = self.min_s - 2. * self.t2_guess
t3 = 2. * self.min_s - self.max_s
self.nx = _round(self, min(t1, t2, t2))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
# all failure case
if np.sum(Y) == 0:
custom_log(self, 'In All Failure Section', True)
t1 = (self.t1_max + self.max_s) / 2.
t2 = self.max_s + 2. * self.t2_guess
t3 = 2. * self.max_s - self.min_s
self.nx = _round(self, max(t1, t2, t3))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
self.nx = _round(self, (self.max_no + self.min_go) / 2.)
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
else:
self.binary = False
if self.overlap:
custom_log(self, 'In Overlap Search Section', True)
self.min_go = np.min(self.X[Y])
self.max_no = np.max(self.X[np.logical_not(Y)])
self.diff = round(self.min_go - self.max_no, self.precision)
custom_log(self, 'Min Go: {}'.format(self.min_go))
custom_log(self, 'Max No-Go: {}'.format(self.max_no))
custom_log(self, 'Difference: {}'.format(self.diff))
custom_log(self, 'Theta 2 guess: {}'.format(self.t2_guess))
if self.diff > self.t2_guess:
custom_log(self, 'Reverting Back to Binary Search', True)
self.binary = True
self.updated = -1
return self.next_pt()
if self.diff < 0:
custom_log(self, '--- Overlap Achieved! ---', True)
self.overlap = False
else:
self.theta[0] = (self.max_no + self.min_go) / 2.
self.theta[1] = self.t2_guess
custom_log(self, 'Maximize Determinate With...')
t1n, t2n = self.est_names()
custom_log(self, '{}: {}'.format(t1n, self.theta[0]))
custom_log(self, '{}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
self.t2_guess *= 0.8
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
if self.mle:
custom_log(self, 'In Maximum Liklihood Section', True)
self.max_s = max(self.X)
self.min_s = min(self.X)
custom_log(self, 'Min Stimlus: {}'.format(self.min_s))
custom_log(self, 'Max Stimulus: {}'.format(self.max_s))
self.fit(self.X, self.Y)
t1n, t2n = self.est_names()
custom_log(self, 'Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Estimated {}: {}'.format(t2n, self.theta[1]))
self.theta[0] = max(self.min_s, min(self.theta[0], self.max_s))
self.theta[1] = min(self.theta[1], self.max_s - self.min_s)
custom_log(self, 'Bounded Estimated {}: {}'.format(t1n, self.theta[0]))
custom_log(self, 'Bounded Estimated {}: {}'.format(t2n, self.theta[1]))
self.nx = _round(self, self.__max_info(self.theta))
check_bounds(self, self.nx)
custom_log(self, 'Next Point Requested: {}'.format(self.nx))
return self.nx
def post_test_outcome(self, res, pt):
"""
Append a stimulus level and result to the existing data.
Parameters
----------
res : int or boolean
The observed result at the tested stimulus level. Either 0, 1 or
False, True.
pt : float
The stimulus level at which the test was performed.
Returns
-------
None.
"""
if isinstance(res, bool) or (res == 0) or (res == 1):
self.X = np.vstack((self.X, pt))
custom_log(self, 'Tested Points: \n {}'.format(self.X.flatten()))
self.Y = np.vstack((self.Y, int(res)))
custom_log(self, 'Test Results: \n {}'.format(self.Y.flatten()))
else:
raise ValueError('Result must be \{0, 1\} or \{True, False\}!')
def loop(self, iterations=1000000):
"""
This method suggests new test levels and accepts user input to calculate
maximum likelihood estimates. That is, this method constitutes a loop.
Loop will continue indefinitely until 'end' is received as user input
during the either the test level or result input queries. Alternatively,
if a set number of specimens is to be used then the number of loops can
be specified with the 'iterations' keyword argument.
Parameters
----------
iterations : int, optional
End the loop automatically after n iterations. The default is 1000000.
Returns
-------
None.
"""
print('-'*50)
print("""If the level at which the test is performed is the same as the
suggested level, then the user can simply press enter (no need for input)
when queried about the test level.""")
print('\n')
print("""When the user does not wish to test any more levels,
input "end" (without quotes) when queried abou the next test.""")
print('-'*50)
print('\n')
for _ in range(iterations):
nx = self.next_pt()
print('Specimen number: {}'.format(self.X.size + 1))
print('The next suggested test point is: {}'.format(nx))
pt = input('Please input the level at which the test was performed: ')
pt = "".join(pt.split()).lower()
if pt == 'end':
break
elif pt == '':
pt = nx
else:
try:
pt = float(pt)
except:
print("Input level '{}' not understood. Try again. Type 'end' to terminate loop.".format(pt))
continue
res = input('Please input the result: ')
res = "".join(res.split()).lower()
print('\n')
if res == 'true' or res == '1':
self.post_test_outcome(1, pt)
elif res == 'false' or res == '0':
self.post_test_outcome(0, pt)
elif res == '':
pass
elif res == 'end':
break
else:
print("Result value '{}' not understood. Input must be 0 or False for a negative response and 1 or True for a positive response. Boolean inputs are not case sensitive. Try again. Type 'end' during input query to terminate loop.".format(res)) | 0.893379 | 0.538923 |
from datetime import datetime
from typing import Optional
import attr
@attr.dataclass
class Sanitasi:
sekolah_id: str
semester_id: str
sumber_air_id: str
sumber_air_minum_id: str
ketersediaan_air: str
kecukupan_air: str
minum_siswa: str
memproses_air: str
siswa_bawa_air: str
toilet_siswa_laki: str
toilet_siswa_perempuan: str
toilet_siswa_kk: str
toilet_siswa_kecil: str
jml_jamban_l_g: str
jml_jamban_l_tg: str
jml_jamban_p_g: str
jml_jamban_p_tg: str
jml_jamban_lp_g: str
jml_jamban_lp_tg: str
tempat_cuci_tangan: str
tempat_cuci_tangan_rusak: str
a_sabun_air_mengalir: str
jamban_difabel: str
tipe_jamban: str
a_sedia_pembalut: str
kegiatan_cuci_tangan: Optional[str]
pembuangan_air_limbah: Optional[str]
a_kuras_septitank: Optional[str]
a_memiliki_solokan: Optional[str]
a_tempat_sampah_kelas: str
a_tempat_sampah_tutup_p: str
a_cermin_jamban_p: str
a_memiliki_tps: str
a_tps_angkut_rutin: str
a_anggaran_sanitasi: str
a_melibatkan_sanitasi_siswa: str
a_kemitraan_san_daerah: Optional[str]
a_kemitraan_san_puskesmas: Optional[str]
a_kemitraan_san_swasta: Optional[str]
a_kemitraan_san_non_pem: Optional[str]
kie_guru_cuci_tangan: Optional[str]
kie_guru_haid: Optional[str]
kie_guru_perawatan_toilet: Optional[str]
kie_guru_keamanan_pangan: Optional[str]
kie_guru_minum_air: Optional[str]
kie_kelas_cuci_tangan: Optional[str]
kie_kelas_haid: Optional[str]
kie_kelas_perawatan_toilet: Optional[str]
kie_kelas_keamanan_pangan: Optional[str]
kie_kelas_minum_air: Optional[str]
kie_toilet_cuci_tangan: Optional[str]
kie_toilet_haid: Optional[str]
kie_toilet_perawatan_toilet: Optional[str]
kie_toilet_keamanan_pangan: Optional[str]
kie_toilet_minum_air: Optional[str]
kie_selasar_cuci_tangan: Optional[str]
kie_selasar_haid: Optional[str]
kie_selasar_perawatan_toilet: Optional[str]
kie_selasar_keamanan_pangan: Optional[str]
kie_selasar_minum_air: Optional[str]
kie_uks_cuci_tangan: Optional[str]
kie_uks_haid: Optional[str]
kie_uks_perawatan_toilet: Optional[str]
kie_uks_keamanan_pangan: Optional[str]
kie_uks_minum_air: Optional[str]
kie_kantin_cuci_tangan: Optional[str]
kie_kantin_haid: Optional[str]
kie_kantin_perawatan_toilet: Optional[str]
kie_kantin_keamanan_pangan: Optional[str]
kie_kantin_minum_air: Optional[str]
create_date: datetime
last_update: datetime
soft_delete: str
last_sync: datetime
updater_id: str
sekolah_id_str: str
semester_id_str: str
sanitasi_id: str | dapodik/sekolah/sanitasi.py | from datetime import datetime
from typing import Optional
import attr
@attr.dataclass
class Sanitasi:
sekolah_id: str
semester_id: str
sumber_air_id: str
sumber_air_minum_id: str
ketersediaan_air: str
kecukupan_air: str
minum_siswa: str
memproses_air: str
siswa_bawa_air: str
toilet_siswa_laki: str
toilet_siswa_perempuan: str
toilet_siswa_kk: str
toilet_siswa_kecil: str
jml_jamban_l_g: str
jml_jamban_l_tg: str
jml_jamban_p_g: str
jml_jamban_p_tg: str
jml_jamban_lp_g: str
jml_jamban_lp_tg: str
tempat_cuci_tangan: str
tempat_cuci_tangan_rusak: str
a_sabun_air_mengalir: str
jamban_difabel: str
tipe_jamban: str
a_sedia_pembalut: str
kegiatan_cuci_tangan: Optional[str]
pembuangan_air_limbah: Optional[str]
a_kuras_septitank: Optional[str]
a_memiliki_solokan: Optional[str]
a_tempat_sampah_kelas: str
a_tempat_sampah_tutup_p: str
a_cermin_jamban_p: str
a_memiliki_tps: str
a_tps_angkut_rutin: str
a_anggaran_sanitasi: str
a_melibatkan_sanitasi_siswa: str
a_kemitraan_san_daerah: Optional[str]
a_kemitraan_san_puskesmas: Optional[str]
a_kemitraan_san_swasta: Optional[str]
a_kemitraan_san_non_pem: Optional[str]
kie_guru_cuci_tangan: Optional[str]
kie_guru_haid: Optional[str]
kie_guru_perawatan_toilet: Optional[str]
kie_guru_keamanan_pangan: Optional[str]
kie_guru_minum_air: Optional[str]
kie_kelas_cuci_tangan: Optional[str]
kie_kelas_haid: Optional[str]
kie_kelas_perawatan_toilet: Optional[str]
kie_kelas_keamanan_pangan: Optional[str]
kie_kelas_minum_air: Optional[str]
kie_toilet_cuci_tangan: Optional[str]
kie_toilet_haid: Optional[str]
kie_toilet_perawatan_toilet: Optional[str]
kie_toilet_keamanan_pangan: Optional[str]
kie_toilet_minum_air: Optional[str]
kie_selasar_cuci_tangan: Optional[str]
kie_selasar_haid: Optional[str]
kie_selasar_perawatan_toilet: Optional[str]
kie_selasar_keamanan_pangan: Optional[str]
kie_selasar_minum_air: Optional[str]
kie_uks_cuci_tangan: Optional[str]
kie_uks_haid: Optional[str]
kie_uks_perawatan_toilet: Optional[str]
kie_uks_keamanan_pangan: Optional[str]
kie_uks_minum_air: Optional[str]
kie_kantin_cuci_tangan: Optional[str]
kie_kantin_haid: Optional[str]
kie_kantin_perawatan_toilet: Optional[str]
kie_kantin_keamanan_pangan: Optional[str]
kie_kantin_minum_air: Optional[str]
create_date: datetime
last_update: datetime
soft_delete: str
last_sync: datetime
updater_id: str
sekolah_id_str: str
semester_id_str: str
sanitasi_id: str | 0.723993 | 0.275733 |
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.functions import udf, col, from_json, flatten, explode, desc, count
from datetime import datetime
import argparse
def getChart(df):
df=df.withColumn("patient_ID",df["patient.patientID"])
df=df.withColumn("patient_age",df["patient.age"])
df=df.withColumn("patient_sex",df["patient.sex"])
df=df.withColumn("year",df["visit.date.year"])
df=df.withColumn("month",df["visit.date.month"])
df=df.withColumn("day",df["visit.date.day"])
df=df.withColumn("hospital_name",df["hospital.hospitalID"])
df=df.withColumn("treatment_name",df["treatment.name"])
df = df.select(
df["patient_ID"],
df["patient_age"],
df["patient_sex"],
df["hospital_name"],
df["doctor"],
df["treatment_name"],
df["year"],
df["month"],
df["day"],
).orderBy("patient", ascending=False)
return df
def getReceipt(df):
df=df.withColumn("hospital_name",df["hospital.hospitalID"])
df=df.withColumn("hospital_address",df["hospital.address"])
df=df.withColumn("hospital_contact",df["hospital.contact"])
df=df.withColumn("year",df["visit.date.year"])
df=df.withColumn("month",df["visit.date.month"])
df=df.withColumn("day",df["visit.date.day"])
df=df.withColumn("patient_ID",df["patient.patientID"])
df=df.withColumn("treatment_name",df["treatment.name"])
df=df.withColumn("treatment_price",df["treatment.price"])
df = df.select(
df["hospital_name"],
df["hospital_address"],
df["hospital_contact"],
df["year"],
df["month"],
df["day"],
df["patient_ID"],
df["treatment_name"],
df["treatment_price"],
df["payment"],
).orderBy("patient", ascending=False)
return df
if __name__=="__main__":
sc = SparkContext("local","etl")
spark=SparkSession.builder.appName("etl process").getOrCreate()
schema = StructType([
StructField("patient", StructType([
StructField("patientID", StringType(), True), StructField("age", IntegerType(), True), StructField("sex", StringType(), True)]), True),
StructField("doctor", StringType(), True),
StructField("hospital", StructType([
StructField("hospitalID", StringType(), True), StructField("address", StringType(), True), StructField("contact", StringType(), True)]), True),
StructField("treatment", StructType([
StructField("name", StringType(), True), StructField("price", IntegerType(), True)]), True),
StructField("visit", StructType([
StructField("date", StructType([
StructField("year", IntegerType(), True), StructField("month", IntegerType(), True), StructField("day", IntegerType(), True)]), True)]), True),
StructField("payment", StringType(), True)])
#인자
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", help="target info", default="chart")
parser.add_argument("-if","--inputformat", help="input format", default="json")
parser.add_argument("-of", "--outputformat", help="output format", default="parquet")
parser.add_argument("-o", "--output", help="output path", default="parquet")
args = parser.parse_args()
df = spark.read.format(args.inputformat).load("./data")
spark.sparkContext.setLogLevel("ERROR")
print("Original Data")
df.show(50)
df.printSchema()
if args.target == "chart":
df = getChart(df)
df1 = df.groupBy("doctor").agg(count("patient_ID")).withColumnRenamed("count(patient_ID)", "patient_num")
elif args.target == "receipt":
df = getReceipt(df)
df1 = df.groupBy("hospital_name").sum("treatment_price").withColumnRenamed("sum(treatment_price)", "profit").\
withColumnRenamed("hospital_name", "hospital")
print("Hospital Profit")
print("Parsed Data")
df.show(50)
df.printSchema()
print("Filtered Data")
df1.show(50)
df1.printSchema()
df.coalesce(1).write.format(args.outputformat).mode("overwrite").save(args.output) | ETL.py | from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.functions import udf, col, from_json, flatten, explode, desc, count
from datetime import datetime
import argparse
def getChart(df):
df=df.withColumn("patient_ID",df["patient.patientID"])
df=df.withColumn("patient_age",df["patient.age"])
df=df.withColumn("patient_sex",df["patient.sex"])
df=df.withColumn("year",df["visit.date.year"])
df=df.withColumn("month",df["visit.date.month"])
df=df.withColumn("day",df["visit.date.day"])
df=df.withColumn("hospital_name",df["hospital.hospitalID"])
df=df.withColumn("treatment_name",df["treatment.name"])
df = df.select(
df["patient_ID"],
df["patient_age"],
df["patient_sex"],
df["hospital_name"],
df["doctor"],
df["treatment_name"],
df["year"],
df["month"],
df["day"],
).orderBy("patient", ascending=False)
return df
def getReceipt(df):
df=df.withColumn("hospital_name",df["hospital.hospitalID"])
df=df.withColumn("hospital_address",df["hospital.address"])
df=df.withColumn("hospital_contact",df["hospital.contact"])
df=df.withColumn("year",df["visit.date.year"])
df=df.withColumn("month",df["visit.date.month"])
df=df.withColumn("day",df["visit.date.day"])
df=df.withColumn("patient_ID",df["patient.patientID"])
df=df.withColumn("treatment_name",df["treatment.name"])
df=df.withColumn("treatment_price",df["treatment.price"])
df = df.select(
df["hospital_name"],
df["hospital_address"],
df["hospital_contact"],
df["year"],
df["month"],
df["day"],
df["patient_ID"],
df["treatment_name"],
df["treatment_price"],
df["payment"],
).orderBy("patient", ascending=False)
return df
if __name__=="__main__":
sc = SparkContext("local","etl")
spark=SparkSession.builder.appName("etl process").getOrCreate()
schema = StructType([
StructField("patient", StructType([
StructField("patientID", StringType(), True), StructField("age", IntegerType(), True), StructField("sex", StringType(), True)]), True),
StructField("doctor", StringType(), True),
StructField("hospital", StructType([
StructField("hospitalID", StringType(), True), StructField("address", StringType(), True), StructField("contact", StringType(), True)]), True),
StructField("treatment", StructType([
StructField("name", StringType(), True), StructField("price", IntegerType(), True)]), True),
StructField("visit", StructType([
StructField("date", StructType([
StructField("year", IntegerType(), True), StructField("month", IntegerType(), True), StructField("day", IntegerType(), True)]), True)]), True),
StructField("payment", StringType(), True)])
#인자
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", help="target info", default="chart")
parser.add_argument("-if","--inputformat", help="input format", default="json")
parser.add_argument("-of", "--outputformat", help="output format", default="parquet")
parser.add_argument("-o", "--output", help="output path", default="parquet")
args = parser.parse_args()
df = spark.read.format(args.inputformat).load("./data")
spark.sparkContext.setLogLevel("ERROR")
print("Original Data")
df.show(50)
df.printSchema()
if args.target == "chart":
df = getChart(df)
df1 = df.groupBy("doctor").agg(count("patient_ID")).withColumnRenamed("count(patient_ID)", "patient_num")
elif args.target == "receipt":
df = getReceipt(df)
df1 = df.groupBy("hospital_name").sum("treatment_price").withColumnRenamed("sum(treatment_price)", "profit").\
withColumnRenamed("hospital_name", "hospital")
print("Hospital Profit")
print("Parsed Data")
df.show(50)
df.printSchema()
print("Filtered Data")
df1.show(50)
df1.printSchema()
df.coalesce(1).write.format(args.outputformat).mode("overwrite").save(args.output) | 0.590425 | 0.286762 |
import codecs
import io
import os
import re
import shutil
import typing as tp
__all__ = ['read_re_sub_and_write', 'find_files', 'split', 'read_in_file', 'write_to_file',
'write_out_file_if_different', 'make_noncolliding_name', 'try_unlink',
'DevNullFilelikeObject', 'read_lines']
from satella.coding.recast_exceptions import silence_excs
from satella.coding.typing import Predicate
SEPARATORS = {'\\', '/'}
SEPARATORS.add(os.path.sep)
class DevNullFilelikeObject:
"""
A /dev/null filelike object. For multiple uses.
"""
__slots__ = 'is_closed',
def __init__(self):
self.is_closed = False
def read(self, byte_count: tp.Optional[int] = None):
"""
:raises ValueError: this object has been closed
:raises io.UnsupportedOperation: since reading from this is forbidden
"""
if self.is_closed:
raise ValueError('Reading from closed /dev/null!')
raise io.UnsupportedOperation('read')
def write(self, x: tp.Union[str, bytes]) -> int:
"""
Discard any amount of bytes
:raises ValueError: this object has been closed
:return: length of written content
"""
if self.is_closed:
raise ValueError('Writing to closed /dev/null!')
return len(x)
def flush(self) -> None:
"""
:raises ValueError: when this object has been closed
"""
if self.is_closed:
raise ValueError('flush of closed file')
def close(self) -> None:
"""
Close this stream. Further write()s and flush()es will raise a ValueError.
No-op if invoked multiple times
"""
self.is_closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _has_separator(path: str) -> bool:
# handle Windows case
if len(path) == 3:
if path.endswith(':/') or path.endswith(':\\'):
return False
return any(map(lambda x: x in path, SEPARATORS))
def read_lines(path: str, delete_empty_lines: bool = True,
encoding: str = 'utf-8') -> tp.List[str]:
"""
Read lines from a particular file, removing end-of-line characters and optionally
empty lines. Additionally whitespaces (and end-of-line characters) will be removed
from both ends of each line.
:param path: path of file to read
:param delete_empty_lines: set to False if empty lines are not to be removed
:param encoding: encoding to read the file with
:return: each line as a separate entry
"""
with codecs.open(path, 'r', encoding) as f_in:
lines = [line.strip() for line in f_in.readlines()]
if delete_empty_lines:
lines = [line for line in lines if line]
return lines
def make_noncolliding_name(path: str,
exists_checker: Predicate[str] = os.path.exists) -> str:
"""
Try to make a noncolliding name in such a way that .1, .2, .3, and so on will be appended
to the file name right before the extension (yielding test.1.txt) or at the end of the file
name if the extension isn't present
:param path: path of the file that has not to exist
:param exists_checker: a callable to check with if the file exists
:return: name mutated in such a way that exists_checker returned False on it
"""
path, filename = os.path.split(path)
if '.' in filename:
*filename, extension = filename.split('.')
filename = '.'.join(filename)
extension = '.' + extension
else:
extension = ''
addition = ''
addition_counter = 0
while exists_checker(os.path.join(path, filename + addition + extension)):
addition_counter += 1
addition = '.' + str(addition_counter)
return os.path.join(path, filename + addition + extension)
def split(path: str) -> tp.List[str]:
"""
An exact reverse of os.path.join
Is is true that
>>> os.path.join(split(a)) == a
"""
data = list(os.path.split(path))
while _has_separator(data[0]):
data = list(os.path.split(data[0])) + data[1:]
return data
def write_to_file(path: str, data: tp.Union[bytes, str],
encoding: tp.Optional[str] = None) -> None:
"""
Write provided content as a file, applying given encoding (or data is bytes, if none given)
:param path: Path to put the file under
:param data: Data to write. Must be bytes if no encoding is given, str otherwise
:param encoding: Encoding. Default is None, which means no encoding (bytes will be written)
"""
if encoding is None:
file = open(path, 'wb')
else:
file = codecs.open(path, 'wb', encoding)
try:
file.write(data)
finally:
file.close()
class _NOTSET:
...
def read_in_file(path: str, encoding: tp.Optional[str] = None,
default: tp.Optional[tp.Union[bytes, str]] = _NOTSET) -> tp.Union[bytes, str]:
"""
Opens a file for reading, reads it in, converts to given encoding (or returns as bytes
if not given), and closes it.
:param path: path of file to read
:param encoding: optional encoding. If default, this will be returned as bytes
:param default: value to return when the file does not exist. Default (None) will raise a
FileNotFoundError
:return: file content, either decoded as a str, or not as bytes
:raises FileNotFoundError: file did not exist and default was not set
"""
if os.path.isdir(path):
if default is not _NOTSET:
return default
raise FileNotFoundError('%s found and is a directory' % (path,))
try:
if encoding is None:
file = open(path, 'rb')
else:
file = codecs.open(path, 'r', encoding)
except FileNotFoundError:
if default is not _NOTSET:
return default
raise
try:
return file.read()
finally:
file.close()
def read_re_sub_and_write(path: str, pattern: tp.Union[re.compile, str],
repl: tp.Union[tp.Callable[[tp.Any], str]]) -> None:
"""
Read a text file, treat with re.sub and write the contents.
Note that this is not thread or multiprocess safe.
:param path: path of file to treat
:param pattern: regexp compiled pattern or a string, a pattern to match the file contents
:param repl: string or a callable(re.Match)->str to replace the contents
"""
with open(path, 'r') as f_in:
data = f_in.read()
if isinstance(pattern, str):
data = re.sub(pattern, repl, data)
else:
data = pattern.sub(repl, data)
with open(path, 'w') as f_out:
f_out.write(data)
@silence_excs(OSError, returns=False)
def try_unlink(path: str) -> bool:
"""
A syntactic sugar for:
>>> try:
>>> os.unlink(path)
>>> return True
>>> except FileNotFoundError:
>>> return False
Note that if path is a directory, rmtree from shlex will be called on it, and
any OSErrors will report the deletion as False
:param path: path of file to delete
:return: whether the deletion happened
"""
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
return True
def _cond_join(prefix: tp.Optional[str], filename: str) -> str:
"""or a conditional os.path.join"""
if prefix is None:
return filename
else:
return os.path.join(prefix, filename)
def find_files(path: str, wildcard: str = r'(.*)',
prefix_with: tp.Optional[str] = None,
scan_subdirectories: bool = True,
apply_wildcard_to_entire_path: bool = False,
prefix_with_path: bool = True) -> tp.Iterator[str]:
"""
Look at given path's files and all subdirectories and return an iterator of
file names (paths included) that conform to given wildcard.
Note that wildcard is only applied to the file name if apply_wildcard_to_entire_path
is False, else the wildcard is applied to entire path (including the application of
prefix_with!).
Files will be additionally prefixed with path, but only if prefix_with_path is True
.. warning:: Note that this will try to match only the start of the path. For a complete match
remember to put a $ at the end of the string!
:param path: path to look into.
:param wildcard: a regular expression to match
:param prefix_with: an optional path component to prefix before the filename with os.path.join
:param scan_subdirectories: whether to scan subdirectories
:param apply_wildcard_to_entire_path: whether to take the entire relative path into account
when checking wildcard
:param prefix_with_path: whether to add path to the resulting path
:return: paths with the files. They will be relative paths, relative to path
"""
if prefix_with_path:
prefix_with = _cond_join(prefix_with, path)
for filename in os.listdir(path):
if scan_subdirectories and os.path.isdir(os.path.join(path, filename)):
new_prefix = _cond_join(prefix_with, filename)
yield from find_files(os.path.join(path, filename), wildcard,
prefix_with=new_prefix,
prefix_with_path=False)
else:
if apply_wildcard_to_entire_path:
fn_path = _cond_join(prefix_with, filename)
else:
fn_path = filename
if re.match(wildcard, fn_path):
yield _cond_join(prefix_with, filename)
def write_out_file_if_different(path: str, data: tp.Union[bytes, str],
encoding: tp.Optional[str] = None) -> bool:
"""
Syntactic sugar for
>>> try:
>>> if read_in_file(path, encoding) != data:
>>> write_to_file(path, data, encoding)
>>> return True
>>> else:
>>> return False
>>> except OSError:
>>> write_to_file(path, data, encoding)
>>> return True
:param path: Path to put the file under
:param data: Data to write. Must be bytes if no encoding is given, str otherwise
:param encoding: Encoding. Default is None, which means no encoding (bytes will be written)
:return: if write has happened
"""
try:
if read_in_file(path, encoding) != data:
write_to_file(path, data, encoding)
return True
else:
return False
except FileNotFoundError:
write_to_file(path, data, encoding)
return True | satella/files.py | import codecs
import io
import os
import re
import shutil
import typing as tp
__all__ = ['read_re_sub_and_write', 'find_files', 'split', 'read_in_file', 'write_to_file',
'write_out_file_if_different', 'make_noncolliding_name', 'try_unlink',
'DevNullFilelikeObject', 'read_lines']
from satella.coding.recast_exceptions import silence_excs
from satella.coding.typing import Predicate
SEPARATORS = {'\\', '/'}
SEPARATORS.add(os.path.sep)
class DevNullFilelikeObject:
"""
A /dev/null filelike object. For multiple uses.
"""
__slots__ = 'is_closed',
def __init__(self):
self.is_closed = False
def read(self, byte_count: tp.Optional[int] = None):
"""
:raises ValueError: this object has been closed
:raises io.UnsupportedOperation: since reading from this is forbidden
"""
if self.is_closed:
raise ValueError('Reading from closed /dev/null!')
raise io.UnsupportedOperation('read')
def write(self, x: tp.Union[str, bytes]) -> int:
"""
Discard any amount of bytes
:raises ValueError: this object has been closed
:return: length of written content
"""
if self.is_closed:
raise ValueError('Writing to closed /dev/null!')
return len(x)
def flush(self) -> None:
"""
:raises ValueError: when this object has been closed
"""
if self.is_closed:
raise ValueError('flush of closed file')
def close(self) -> None:
"""
Close this stream. Further write()s and flush()es will raise a ValueError.
No-op if invoked multiple times
"""
self.is_closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _has_separator(path: str) -> bool:
# handle Windows case
if len(path) == 3:
if path.endswith(':/') or path.endswith(':\\'):
return False
return any(map(lambda x: x in path, SEPARATORS))
def read_lines(path: str, delete_empty_lines: bool = True,
encoding: str = 'utf-8') -> tp.List[str]:
"""
Read lines from a particular file, removing end-of-line characters and optionally
empty lines. Additionally whitespaces (and end-of-line characters) will be removed
from both ends of each line.
:param path: path of file to read
:param delete_empty_lines: set to False if empty lines are not to be removed
:param encoding: encoding to read the file with
:return: each line as a separate entry
"""
with codecs.open(path, 'r', encoding) as f_in:
lines = [line.strip() for line in f_in.readlines()]
if delete_empty_lines:
lines = [line for line in lines if line]
return lines
def make_noncolliding_name(path: str,
exists_checker: Predicate[str] = os.path.exists) -> str:
"""
Try to make a noncolliding name in such a way that .1, .2, .3, and so on will be appended
to the file name right before the extension (yielding test.1.txt) or at the end of the file
name if the extension isn't present
:param path: path of the file that has not to exist
:param exists_checker: a callable to check with if the file exists
:return: name mutated in such a way that exists_checker returned False on it
"""
path, filename = os.path.split(path)
if '.' in filename:
*filename, extension = filename.split('.')
filename = '.'.join(filename)
extension = '.' + extension
else:
extension = ''
addition = ''
addition_counter = 0
while exists_checker(os.path.join(path, filename + addition + extension)):
addition_counter += 1
addition = '.' + str(addition_counter)
return os.path.join(path, filename + addition + extension)
def split(path: str) -> tp.List[str]:
"""
An exact reverse of os.path.join
Is is true that
>>> os.path.join(split(a)) == a
"""
data = list(os.path.split(path))
while _has_separator(data[0]):
data = list(os.path.split(data[0])) + data[1:]
return data
def write_to_file(path: str, data: tp.Union[bytes, str],
encoding: tp.Optional[str] = None) -> None:
"""
Write provided content as a file, applying given encoding (or data is bytes, if none given)
:param path: Path to put the file under
:param data: Data to write. Must be bytes if no encoding is given, str otherwise
:param encoding: Encoding. Default is None, which means no encoding (bytes will be written)
"""
if encoding is None:
file = open(path, 'wb')
else:
file = codecs.open(path, 'wb', encoding)
try:
file.write(data)
finally:
file.close()
class _NOTSET:
...
def read_in_file(path: str, encoding: tp.Optional[str] = None,
default: tp.Optional[tp.Union[bytes, str]] = _NOTSET) -> tp.Union[bytes, str]:
"""
Opens a file for reading, reads it in, converts to given encoding (or returns as bytes
if not given), and closes it.
:param path: path of file to read
:param encoding: optional encoding. If default, this will be returned as bytes
:param default: value to return when the file does not exist. Default (None) will raise a
FileNotFoundError
:return: file content, either decoded as a str, or not as bytes
:raises FileNotFoundError: file did not exist and default was not set
"""
if os.path.isdir(path):
if default is not _NOTSET:
return default
raise FileNotFoundError('%s found and is a directory' % (path,))
try:
if encoding is None:
file = open(path, 'rb')
else:
file = codecs.open(path, 'r', encoding)
except FileNotFoundError:
if default is not _NOTSET:
return default
raise
try:
return file.read()
finally:
file.close()
def read_re_sub_and_write(path: str, pattern: tp.Union[re.compile, str],
repl: tp.Union[tp.Callable[[tp.Any], str]]) -> None:
"""
Read a text file, treat with re.sub and write the contents.
Note that this is not thread or multiprocess safe.
:param path: path of file to treat
:param pattern: regexp compiled pattern or a string, a pattern to match the file contents
:param repl: string or a callable(re.Match)->str to replace the contents
"""
with open(path, 'r') as f_in:
data = f_in.read()
if isinstance(pattern, str):
data = re.sub(pattern, repl, data)
else:
data = pattern.sub(repl, data)
with open(path, 'w') as f_out:
f_out.write(data)
@silence_excs(OSError, returns=False)
def try_unlink(path: str) -> bool:
"""
A syntactic sugar for:
>>> try:
>>> os.unlink(path)
>>> return True
>>> except FileNotFoundError:
>>> return False
Note that if path is a directory, rmtree from shlex will be called on it, and
any OSErrors will report the deletion as False
:param path: path of file to delete
:return: whether the deletion happened
"""
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
return True
def _cond_join(prefix: tp.Optional[str], filename: str) -> str:
"""or a conditional os.path.join"""
if prefix is None:
return filename
else:
return os.path.join(prefix, filename)
def find_files(path: str, wildcard: str = r'(.*)',
prefix_with: tp.Optional[str] = None,
scan_subdirectories: bool = True,
apply_wildcard_to_entire_path: bool = False,
prefix_with_path: bool = True) -> tp.Iterator[str]:
"""
Look at given path's files and all subdirectories and return an iterator of
file names (paths included) that conform to given wildcard.
Note that wildcard is only applied to the file name if apply_wildcard_to_entire_path
is False, else the wildcard is applied to entire path (including the application of
prefix_with!).
Files will be additionally prefixed with path, but only if prefix_with_path is True
.. warning:: Note that this will try to match only the start of the path. For a complete match
remember to put a $ at the end of the string!
:param path: path to look into.
:param wildcard: a regular expression to match
:param prefix_with: an optional path component to prefix before the filename with os.path.join
:param scan_subdirectories: whether to scan subdirectories
:param apply_wildcard_to_entire_path: whether to take the entire relative path into account
when checking wildcard
:param prefix_with_path: whether to add path to the resulting path
:return: paths with the files. They will be relative paths, relative to path
"""
if prefix_with_path:
prefix_with = _cond_join(prefix_with, path)
for filename in os.listdir(path):
if scan_subdirectories and os.path.isdir(os.path.join(path, filename)):
new_prefix = _cond_join(prefix_with, filename)
yield from find_files(os.path.join(path, filename), wildcard,
prefix_with=new_prefix,
prefix_with_path=False)
else:
if apply_wildcard_to_entire_path:
fn_path = _cond_join(prefix_with, filename)
else:
fn_path = filename
if re.match(wildcard, fn_path):
yield _cond_join(prefix_with, filename)
def write_out_file_if_different(path: str, data: tp.Union[bytes, str],
encoding: tp.Optional[str] = None) -> bool:
"""
Syntactic sugar for
>>> try:
>>> if read_in_file(path, encoding) != data:
>>> write_to_file(path, data, encoding)
>>> return True
>>> else:
>>> return False
>>> except OSError:
>>> write_to_file(path, data, encoding)
>>> return True
:param path: Path to put the file under
:param data: Data to write. Must be bytes if no encoding is given, str otherwise
:param encoding: Encoding. Default is None, which means no encoding (bytes will be written)
:return: if write has happened
"""
try:
if read_in_file(path, encoding) != data:
write_to_file(path, data, encoding)
return True
else:
return False
except FileNotFoundError:
write_to_file(path, data, encoding)
return True | 0.588889 | 0.279189 |
import base64
import binascii
import hashlib
import hmac
import requests
import scrypt
class InvalidRequestException(Exception):
"""Exception containing information about failed request."""
def __init__(self, message, status=None):
"""Instantiate exception with message and (optional) status object.
Arguments:
message -- error message
status -- keybase.io status object (default None)
"""
super(InvalidRequestException, self).__init__(message)
self.status = status
def _make_request(method, url, params):
"""Send and process an API call to keybase.io.
Arguments:
method -- requests method to use for the call
url -- full URL to call
params -- request parameters to send with the call
Returns:
If successful, full response object
If failed, InvalidRequestException with an error message and potentially
the keybase.io status object
"""
response = method(url, params=params)
if response.status_code != 200:
raise InvalidRequestException(response.text)
response_json = response.json()
if response_json['status']['code'] != 0:
raise InvalidRequestException(response_json['status']['desc'],
response_json['status'])
return response
def get_salt(username):
"""Retrieve salt, token, and session for user with provided username.
Arguments:
username -- username for the desired user
Returns:
If successful, tuple with salt, csrf token and login session
If failed, InvalidRequestException
"""
salt_obj = _make_request(requests.get,
'https://keybase.io/_/api/1.0/getsalt.json',
params={'email_or_username': username}).json()
salt = salt_obj['salt']
csrf_token = salt_obj['csrf_token']
login_session = salt_obj['login_session']
return salt, csrf_token, login_session
def _generate_hmac_pwh(password, salt, login_session):
"""Generate password hash consisting of the password, salt, and session.
Arguments:
password -- password to use as hash key
salt -- hex encoded salt to use as hash key
login_session -- base64 encoded session to hash
Returns:
Hashed login session
"""
pwh = scrypt.hash(password, binascii.unhexlify(salt),
1 << 15, 8, 1, 224)[192:224]
hmac_pwh = hmac.new(pwh, base64.b64decode(login_session),
hashlib.sha512).hexdigest()
return hmac_pwh
def login(username, password):
"""Login user with the given username and password.
Arguments:
username -- username for the user to login
password -- password for the user to login
Returns:
If successful, tuple containing session and user object
If failed, InvalidRequestException
"""
salt, csrf_token, login_session = get_salt(username)
hmac_pwh = _generate_hmac_pwh(password, salt, login_session)
login_obj = _make_request(requests.post,
'https://keybase.io/_/api/1.0/login.json',
params={'email_or_username': username,
'csrf_token': csrf_token,
'hmac_pwh': hmac_pwh,
'login_session': login_session}).json()
return login_obj['session'], login_obj['me'] | keybaseclient/raw_api.py | import base64
import binascii
import hashlib
import hmac
import requests
import scrypt
class InvalidRequestException(Exception):
"""Exception containing information about failed request."""
def __init__(self, message, status=None):
"""Instantiate exception with message and (optional) status object.
Arguments:
message -- error message
status -- keybase.io status object (default None)
"""
super(InvalidRequestException, self).__init__(message)
self.status = status
def _make_request(method, url, params):
"""Send and process an API call to keybase.io.
Arguments:
method -- requests method to use for the call
url -- full URL to call
params -- request parameters to send with the call
Returns:
If successful, full response object
If failed, InvalidRequestException with an error message and potentially
the keybase.io status object
"""
response = method(url, params=params)
if response.status_code != 200:
raise InvalidRequestException(response.text)
response_json = response.json()
if response_json['status']['code'] != 0:
raise InvalidRequestException(response_json['status']['desc'],
response_json['status'])
return response
def get_salt(username):
"""Retrieve salt, token, and session for user with provided username.
Arguments:
username -- username for the desired user
Returns:
If successful, tuple with salt, csrf token and login session
If failed, InvalidRequestException
"""
salt_obj = _make_request(requests.get,
'https://keybase.io/_/api/1.0/getsalt.json',
params={'email_or_username': username}).json()
salt = salt_obj['salt']
csrf_token = salt_obj['csrf_token']
login_session = salt_obj['login_session']
return salt, csrf_token, login_session
def _generate_hmac_pwh(password, salt, login_session):
"""Generate password hash consisting of the password, salt, and session.
Arguments:
password -- password to use as hash key
salt -- hex encoded salt to use as hash key
login_session -- base64 encoded session to hash
Returns:
Hashed login session
"""
pwh = scrypt.hash(password, binascii.unhexlify(salt),
1 << 15, 8, 1, 224)[192:224]
hmac_pwh = hmac.new(pwh, base64.b64decode(login_session),
hashlib.sha512).hexdigest()
return hmac_pwh
def login(username, password):
"""Login user with the given username and password.
Arguments:
username -- username for the user to login
password -- password for the user to login
Returns:
If successful, tuple containing session and user object
If failed, InvalidRequestException
"""
salt, csrf_token, login_session = get_salt(username)
hmac_pwh = _generate_hmac_pwh(password, salt, login_session)
login_obj = _make_request(requests.post,
'https://keybase.io/_/api/1.0/login.json',
params={'email_or_username': username,
'csrf_token': csrf_token,
'hmac_pwh': hmac_pwh,
'login_session': login_session}).json()
return login_obj['session'], login_obj['me'] | 0.767429 | 0.116061 |
from unittest import TestCase
from neo.Prompt import Utils
from neocore.Fixed8 import Fixed8
from neocore.UInt160 import UInt160
class TestInputParser(TestCase):
def test_utils_1(self):
args = [1, 2, 3]
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, 3])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_2(self):
args = []
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_3(self):
args = None
with self.assertRaises(Exception):
Utils.get_asset_attachments(args)
def test_utils_4(self):
args = [1, 2, '--attach-neo=100']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(100))
self.assertIsNone(gas)
def test_utils_5(self):
args = [1, 2, '--attach-gas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
self.assertIsNone(neo)
def test_utils_6(self):
args = [1, 2, '--attachgas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, '--attachgas=100.0003'])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_7(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=5.7']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, None)
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_utils_8(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=6']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(6))
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_owner_1(self):
args = [1, 2]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertIsNone(owners)
def test_owner_2(self):
args = [1, 2, "--owners=['ABC','DEF',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(owners, set())
def test_owner_3(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
def test_owner_and_assets(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]", '--attach-neo=10']
args, owners = Utils.get_owners_from_params(args)
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
self.assertEqual(neo, Fixed8.FromDecimal(10)) | neo/Prompt/test_utils.py | from unittest import TestCase
from neo.Prompt import Utils
from neocore.Fixed8 import Fixed8
from neocore.UInt160 import UInt160
class TestInputParser(TestCase):
def test_utils_1(self):
args = [1, 2, 3]
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, 3])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_2(self):
args = []
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_3(self):
args = None
with self.assertRaises(Exception):
Utils.get_asset_attachments(args)
def test_utils_4(self):
args = [1, 2, '--attach-neo=100']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(100))
self.assertIsNone(gas)
def test_utils_5(self):
args = [1, 2, '--attach-gas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
self.assertIsNone(neo)
def test_utils_6(self):
args = [1, 2, '--attachgas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, '--attachgas=100.0003'])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_7(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=5.7']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, None)
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_utils_8(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=6']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(6))
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_owner_1(self):
args = [1, 2]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertIsNone(owners)
def test_owner_2(self):
args = [1, 2, "--owners=['ABC','DEF',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(owners, set())
def test_owner_3(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
def test_owner_and_assets(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]", '--attach-neo=10']
args, owners = Utils.get_owners_from_params(args)
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
self.assertEqual(neo, Fixed8.FromDecimal(10)) | 0.726231 | 0.641296 |
import numpy as np
from numpy.lib.stride_tricks import as_strided
import numbers
def rolled(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in : ndarray
N-d input array.
window_shape : integer or tuple of length arr_in.ndim
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
If an integer is given, the shape will be a hypercube of
sidelength given by its value.
step : integer or tuple of length arr_in.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
arr_out : ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] https://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
ndim = arr_in.ndim
if isinstance(window_shape, numbers.Number):
window_shape = (window_shape,) * ndim
if not (len(window_shape) == ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if isinstance(step, numbers.Number):
if step < 1:
raise ValueError("`step` must be >= 1")
step = (step,) * ndim
if len(step) != ndim:
raise ValueError("`step` is incompatible with `arr_in.shape`")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
slices = tuple(slice(None, None, st) for st in step)
window_strides = np.array(arr_in.strides)
indexing_strides = arr_in[slices].strides
win_indices_shape = (((np.array(arr_in.shape) - np.array(window_shape))
// np.array(step)) + 1)
new_shape = tuple(list(win_indices_shape) + list(window_shape))
strides = tuple(list(indexing_strides) + list(window_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=strides)
return arr_out | deddiag_loader/utils.py |
import numpy as np
from numpy.lib.stride_tricks import as_strided
import numbers
def rolled(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in : ndarray
N-d input array.
window_shape : integer or tuple of length arr_in.ndim
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
If an integer is given, the shape will be a hypercube of
sidelength given by its value.
step : integer or tuple of length arr_in.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
arr_out : ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] https://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
ndim = arr_in.ndim
if isinstance(window_shape, numbers.Number):
window_shape = (window_shape,) * ndim
if not (len(window_shape) == ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if isinstance(step, numbers.Number):
if step < 1:
raise ValueError("`step` must be >= 1")
step = (step,) * ndim
if len(step) != ndim:
raise ValueError("`step` is incompatible with `arr_in.shape`")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
slices = tuple(slice(None, None, st) for st in step)
window_strides = np.array(arr_in.strides)
indexing_strides = arr_in[slices].strides
win_indices_shape = (((np.array(arr_in.shape) - np.array(window_shape))
// np.array(step)) + 1)
new_shape = tuple(list(win_indices_shape) + list(window_shape))
strides = tuple(list(indexing_strides) + list(window_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=strides)
return arr_out | 0.92433 | 0.872782 |
import numpy as np
import pandas as pd
import joblib
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.base import BaseEstimator, TransformerMixin
from classifier import config
from classifier.utils.process_data import text_processors
class SentimentClassifier(BaseEstimator, TransformerMixin):
"""Sentiment classifier that can be trained on text data with
corresponding sentiment labels.
"""
def __init___(self):
self.clf = None
def fit(self, X: pd.Series, y: pd.Series) -> "SentimentClassifier":
"""Fit the sentiment classification pipeline
Parameters
----------
X : pd.Series
Pandas series containing text data
y : Pandas series containg the sentiment label corresponding
Returns
-------
SentimentClassifier
Returns the trained sentiment classification model
"""
# Declaring model pipeline
self.clf = Pipeline(steps=[
("text_processors", text_processors),
("classifier", LogisticRegression(C=4, n_jobs=-1))
])
# Training sentiment model
self.clf.fit(X, y)
return self.clf
def predict(self, X: pd.Series) -> np.array:
"""Predict sentiment based on input data
Parameters
----------
X : pd.Series
Input data containg text
Returns
-------
np.array
Returns numpy array with sentiment prediction
"""
# Get sentiment prediction
y_pred = self.clf.predict(X)
return y_pred
def predict_proba(self, X) -> np.array:
"""Predict probabilities of sentiment input data
Parameters
----------
X : pd.Series
Input data containg text
Returns
-------
np.array
Returns numpy array with sentiment prediction probabilities
"""
# Get sentiment probabilities of sentiment classes
y_proba = self.clf.predict_proba(X)
return y_proba
def save(self, path: str) -> None:
"""Save the trained sentiment model
Parameters
----------
path : str
Save trained model path
"""
# Create trained model directory
config.TRAINED_MODEL_DIR.mkdir(exist_ok=True, parents=True)
# Save model
joblib.dump(self.clf, config.MODEL_NAME, compress=3)
def load(self, path: str) -> "SentimentClassifier":
"""Load the trained sentiment model
Returns
-------
[type]
Returns the trained sentiment models
"""
# Declare model instance
model = SentimentClassifier()
# Load model
model.clf = joblib.load(path)
return model.clf | src/classifier/sentiment_model.py | import numpy as np
import pandas as pd
import joblib
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.base import BaseEstimator, TransformerMixin
from classifier import config
from classifier.utils.process_data import text_processors
class SentimentClassifier(BaseEstimator, TransformerMixin):
"""Sentiment classifier that can be trained on text data with
corresponding sentiment labels.
"""
def __init___(self):
self.clf = None
def fit(self, X: pd.Series, y: pd.Series) -> "SentimentClassifier":
"""Fit the sentiment classification pipeline
Parameters
----------
X : pd.Series
Pandas series containing text data
y : Pandas series containg the sentiment label corresponding
Returns
-------
SentimentClassifier
Returns the trained sentiment classification model
"""
# Declaring model pipeline
self.clf = Pipeline(steps=[
("text_processors", text_processors),
("classifier", LogisticRegression(C=4, n_jobs=-1))
])
# Training sentiment model
self.clf.fit(X, y)
return self.clf
def predict(self, X: pd.Series) -> np.array:
"""Predict sentiment based on input data
Parameters
----------
X : pd.Series
Input data containg text
Returns
-------
np.array
Returns numpy array with sentiment prediction
"""
# Get sentiment prediction
y_pred = self.clf.predict(X)
return y_pred
def predict_proba(self, X) -> np.array:
"""Predict probabilities of sentiment input data
Parameters
----------
X : pd.Series
Input data containg text
Returns
-------
np.array
Returns numpy array with sentiment prediction probabilities
"""
# Get sentiment probabilities of sentiment classes
y_proba = self.clf.predict_proba(X)
return y_proba
def save(self, path: str) -> None:
"""Save the trained sentiment model
Parameters
----------
path : str
Save trained model path
"""
# Create trained model directory
config.TRAINED_MODEL_DIR.mkdir(exist_ok=True, parents=True)
# Save model
joblib.dump(self.clf, config.MODEL_NAME, compress=3)
def load(self, path: str) -> "SentimentClassifier":
"""Load the trained sentiment model
Returns
-------
[type]
Returns the trained sentiment models
"""
# Declare model instance
model = SentimentClassifier()
# Load model
model.clf = joblib.load(path)
return model.clf | 0.888934 | 0.654978 |
import scanpy as sc
from sctriangulate import *
from sctriangulate.colors import *
from sctriangulate.preprocessing import *
print('===================\nimport modules test:',u'\u2713','\n====================')
sctriangulate_setting(backend='Agg')
adata = sc.read('input.h5ad')
sctri = ScTriangulate(dir='output',adata=adata,add_metrics={},query=['sctri_rna_leiden_1','sctri_rna_leiden_2','sctri_rna_leiden_3'])
print('====================\ninstantiation test:',u'\u2713','\n====================')
sctri.lazy_run(scale_sccaf=False,viewer_cluster=False,viewer_heterogeneity=False)
print('=====================\nlazy_run test:',u'\u2713','\n========================')
sctri.plot_winners_statistics(col='raw',fontsize=6)
print('======================\nplot_winners_statistics test:',u'\u2713','\n=======================')
sctri.plot_clusterability(key='sctri_rna_leiden_1',col='raw',fontsize=8)
print('======================\nplot_clusterability test:',u'\u2713','\n=========================')
sctri.display_hierarchy(ref_col='sctri_rna_leiden_1',query_col='raw')
print('======================\ndisplay_hierarchy test:',u'\u2713','\n=====================')
sctri.plot_umap(col='pruned',kind='category')
sctri.plot_umap(col='confidence',kind='continuous',umap_cmap='viridis')
print('======================\nplot_umap test:',u'\u2713','\n===================')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='enrichment')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='marker_genes')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='exclusive_genes')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='location')
print('======================\nplot_cluster_feature test:',u'\u2713','\n=====================')
sctri.plot_long_heatmap(key='pruned',n_features=20,figsize=(20,20))
print('=======================\nplot_long_heatmap test:',u'\u2713','\n========================')
sctri.plot_confusion(name='confusion_reassign',key='sctri_rna_leiden_1',cmap=retrieve_pretty_cmap('shap'))
print('=======================\nplot_confusion test:',u'\u2713','\n============================') | test/mini_test.py | import scanpy as sc
from sctriangulate import *
from sctriangulate.colors import *
from sctriangulate.preprocessing import *
print('===================\nimport modules test:',u'\u2713','\n====================')
sctriangulate_setting(backend='Agg')
adata = sc.read('input.h5ad')
sctri = ScTriangulate(dir='output',adata=adata,add_metrics={},query=['sctri_rna_leiden_1','sctri_rna_leiden_2','sctri_rna_leiden_3'])
print('====================\ninstantiation test:',u'\u2713','\n====================')
sctri.lazy_run(scale_sccaf=False,viewer_cluster=False,viewer_heterogeneity=False)
print('=====================\nlazy_run test:',u'\u2713','\n========================')
sctri.plot_winners_statistics(col='raw',fontsize=6)
print('======================\nplot_winners_statistics test:',u'\u2713','\n=======================')
sctri.plot_clusterability(key='sctri_rna_leiden_1',col='raw',fontsize=8)
print('======================\nplot_clusterability test:',u'\u2713','\n=========================')
sctri.display_hierarchy(ref_col='sctri_rna_leiden_1',query_col='raw')
print('======================\ndisplay_hierarchy test:',u'\u2713','\n=====================')
sctri.plot_umap(col='pruned',kind='category')
sctri.plot_umap(col='confidence',kind='continuous',umap_cmap='viridis')
print('======================\nplot_umap test:',u'\u2713','\n===================')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='enrichment')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='marker_genes')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='exclusive_genes')
sctri.plot_cluster_feature(key='sctri_rna_leiden_1',cluster='3',feature='location')
print('======================\nplot_cluster_feature test:',u'\u2713','\n=====================')
sctri.plot_long_heatmap(key='pruned',n_features=20,figsize=(20,20))
print('=======================\nplot_long_heatmap test:',u'\u2713','\n========================')
sctri.plot_confusion(name='confusion_reassign',key='sctri_rna_leiden_1',cmap=retrieve_pretty_cmap('shap'))
print('=======================\nplot_confusion test:',u'\u2713','\n============================') | 0.291888 | 0.159054 |
import os
import unittest
import logging
import shutil
import tempfile
import ciftify.config
from ciftify.utils import run
import pytest
from pytest import raises
from unittest.mock import patch
import pandas as pd
import numpy as np
def get_test_data_path():
return os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
test_dtseries = os.path.join(get_test_data_path(),
'sub-50005_task-rest_Atlas_s0.dtseries.nii')
test_nifti = os.path.join(get_test_data_path(),
'sub-50005_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz')
left_surface = os.path.join(get_test_data_path(),
'sub-50005.L.midthickness.32k_fs_LR.surf.gii')
right_surface = os.path.join(get_test_data_path(),
'sub-50005.R.midthickness.32k_fs_LR.surf.gii')
custom_dlabel = os.path.join(get_test_data_path(),
'rois',
'rois_for_tests.dlabel.nii')
weighted_dscalar = os.path.join(get_test_data_path(),
'rois',
'weighted_test_roi.dscalar.nii')
custom_dlabel_meants = os.path.join(get_test_data_path(),
'rois',
'sub-50005_task-rest_Atlas_s0_rois_for_tests_meants.csv')
# ciftify_meants (weighted)
# ciftify_meants (with a label)
# ciftify_meants (with multiple entries) - dlabel (no labels)
# ciftify_meants (with multiple entries) - dlabel (w labels)
# ciftify_meants (with multiple entries) - dscalar (no labels)
# ciftify_meants (with multiple entries) - dscalar (w labels)
# ciftify_meants (with mask)
# known correlations with the seed
# 0 0.087475
# 1 0.488206
# 2 0.049888
# 3 0.187913
# 4 0.078139
@pytest.fixture(scope = "function")
def output_dir():
with ciftify.utils.TempDir() as outputdir:
yield outputdir
@pytest.fixture(scope = "module")
def left_hemisphere_dir():
'''build a cleaned and smoothed dtseries file that multiple tests use as input'''
with ciftify.utils.TempDir() as mask_dir:
left_mask_gii = os.path.join(mask_dir, 'mask.L.shape.gii')
left_mask_dscalar = os.path.join(mask_dir, 'mask_L.dscalar.nii')
left_roi_dscalar = os.path.join(mask_dir, 'roi_L.dscalar.nii')
left_roi_gii = os.path.join(mask_dir, 'roi.L.shape.gii')
left_func = os.path.join(mask_dir, 'func.L.func.gii')
run(['wb_command', '-cifti-label-to-roi',
custom_dlabel, left_roi_dscalar,
'-key 1 -map 1'])
run(['wb_command', '-cifti-separate',
left_roi_dscalar, 'COLUMN',
'-metric', 'CORTEX_LEFT', left_roi_gii,
'-roi', left_mask_gii])
run(['wb_command', '-cifti-separate',
test_dtseries, 'COLUMN',
'-metric', 'CORTEX_LEFT', left_func])
run(['wb_command', '-cifti-create-dense-from-template',
test_dtseries, left_mask_dscalar,
'-metric', 'CORTEX_LEFT', left_mask_gii])
yield mask_dir
@pytest.fixture(scope = "module")
def subcort_images_dir():
'''create the '''
with ciftify.utils.TempDir() as subcort_images_dir:
subcort_mask = os.path.join(subcort_images_dir, 'mask.nii.gz')
subcort_rois = os.path.join(subcort_images_dir, 'rois.nii.gz')
subcort_func = os.path.join(subcort_images_dir, 'func.nii.gz')
run(['wb_command', '-cifti-separate',
custom_dlabel, 'COLUMN',
'-volume-all', subcort_rois,
'-roi', subcort_mask])
run(['wb_command', '-cifti-separate',
test_dtseries, 'COLUMN',
'-volume-all', subcort_func])
yield subcort_images_dir
def read_meants_and_transpose(meants_csv):
return pd.read_csv(meants_csv, header = None).transpose()
def get_the_5_rois_meants_outputs(input_file, tmpdir, seed_dscalar):
meants_csv = os.path.join(tmpdir, 'mts.csv')
meants_labels = os.path.join(tmpdir, 'labels.csv')
run(['ciftify_meants',
'--outputcsv', meants_csv,
'--outputlabels', meants_labels,
input_file, seed_dscalar])
meants_pd = pd.read_csv(meants_csv, header = None)
labels_pd = pd.read_csv(meants_labels)
return meants_pd, labels_pd
@pytest.fixture(scope = "module")
def custom_dlabel_timeseries():
meants_pd = read_meants_and_transpose(custom_dlabel_meants)
yield meants_pd
# ciftify_meants <cifti_func> <cifti_seed> (cifti_seed - cortical)
# ciftify_meants <cifti_func> <gifti_seed>
# ciftify_meants <gifti_func> <gifti_seed>
# results of a, b, and c should match
def test_ciftify_meants_cifti_func_custom_dlabel_left_gii(output_dir, custom_dlabel_timeseries, left_hemisphere_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--hemi L',
'--outputcsv', meants_out,
test_dtseries,
os.path.join(left_hemisphere_dir, 'roi.L.shape.gii')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,0]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.01)
def test_ciftify_meants_gii_func_custom_dlabel_left_gii(output_dir, custom_dlabel_timeseries, left_hemisphere_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants', '--debug',
'--outputcsv', meants_out,
'--hemi L',
os.path.join(left_hemisphere_dir, 'func.L.func.gii'),
os.path.join(left_hemisphere_dir, 'roi.L.shape.gii')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,0]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.01)
# ciftify_meants <nifti_func> <nifti_seed>
# ciftify_meants <cifti_func> <cifti_seed> (cifti_seed - subcortical)
# results of a and b should match (as long as the nifti came from them cifti)
def test_ciftify_meants_cifti_func_custom_dlabel(output_dir, custom_dlabel_timeseries):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
test_dtseries, custom_dlabel])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
results_cormat = meants_pd_out.corr().values
print(results_cormat)
expected_cormat = custom_dlabel_timeseries.corr().values
print(expected_cormat)
assert np.allclose(results_cormat, expected_cormat, atol = 0.001)
assert np.allclose(meants_pd_out, custom_dlabel_timeseries, atol = 0.001)
def test_ciftify_meants_cifti_func_custom_dlabel_some_missing(output_dir, custom_dlabel_timeseries, left_hemisphere_dir, subcort_images_dir):
''' set to make sure that the correct number of ROIs are extracted if the data is missing'''
## build a cifti file with only one hemisphere of data
one_hemi_func = os.path.join(output_dir, 'func_L_hemi.dtseries.nii')
run(['wb_command', '-cifti-create-dense-timeseries',
one_hemi_func,
'-left-metric', os.path.join(left_hemisphere_dir, 'func.L.func.gii'),
'-volume',
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(ciftify.config.find_ciftify_global(),'standard_mesh_atlases','Atlas_ROIs.2.nii.gz')])
meants_out = os.path.join(output_dir, 'meants.csv')
labels_out = os.path.join(output_dir, 'labels.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
'--outputlabels',labels_out,
one_hemi_func, custom_dlabel])
assert os.path.isfile(meants_out)
assert os.path.isfile(labels_out)
meants_pd_out = read_meants_and_transpose(meants_out)
meants_labels = pd.read_csv(labels_out)
assert meants_pd_out.shape[1] == meants_labels.shape[0]
print(meants_pd_out)
print(meants_labels)
assert np.allclose(meants_pd_out.loc[:,0].values, custom_dlabel_timeseries.loc[:,0].values, atol = 0.001)
assert np.allclose(meants_pd_out.loc[:,1:2].values, np.zeros((meants_pd_out.shape[0],2)), atol = 0.001)
def test_ciftify_meants_cifti_func_custom_dlabel_subcort(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
test_dtseries,
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
expected_pd = custom_dlabel_timeseries.loc[:,3:4]
corr_test = meants_pd_out.corr().values
corr_expected = expected_pd.corr().values
assert np.allclose(corr_test[0,1], corr_expected[0,1], atol = 0.001)
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_meants_nifti_func_custom_dlabel_subcort(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
expected_pd = custom_dlabel_timeseries.loc[:,3:4]
corr_test = meants_pd_out.corr().values
corr_expected = expected_pd.corr().values
assert np.allclose(corr_test[0,1], corr_expected[0,1], atol = 0.001)
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_meants_nifti_func_custom_dlabel_subcort_one_label(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
'--roi-label 4',
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,3]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_seedcorr_with_cifti_output_no_mask(output_dir, left_hemisphere_dir):
new_test_dtseries = os.path.join(output_dir, 'sub-xx_test.dtseries.nii')
run(['cp', test_dtseries, new_test_dtseries])
seedcorr_output = os.path.join(output_dir,
'sub-xx_test_weighted_test_roi.dscalar.nii')
run(['ciftify_seed_corr', '--debug',
'--weighted',
new_test_dtseries,
weighted_dscalar])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, custom_dlabel)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0875
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.488
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.1879
def test_ciftify_seedcorr_cifti_output_with_mask(output_dir, left_hemisphere_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
run(['ciftify_seed_corr', '--debug',
'--outputname', seedcorr_output,
'--weighted',
'--mask', os.path.join(left_hemisphere_dir, 'mask_L.dscalar.nii'),
test_dtseries,
weighted_dscalar])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, custom_dlabel)
assert os.path.isfile(seedcorr_output)
print(meants5)
print(labels5)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0875
assert pytest.approx(meants5.loc[1,0], 0.001) == 0
assert pytest.approx(meants5.loc[3,0], 0.001) == 0
def test_ciftify_seedcorr_cifti_output_nifti_seed(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
run(['ciftify_seed_corr',
'--debug',
'--roi-label 4',
'--outputname', seedcorr_output,
test_dtseries,
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
print(meants5)
print(labels5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.1256
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.3094
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[4,0], 0.001) == 0.1458
def test_ciftify_seedcorr_nifti_output_no_mask(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.nii.gz')
run(['ciftify_seed_corr',
'--roi-label 4',
'--outputname', seedcorr_output,
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, os.path.join(subcort_images_dir, 'rois.nii.gz'))
print(meants5)
print(labels5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.1458
def test_ciftify_seedcorr_nifti_output_with_mask(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.nii.gz')
run(['ciftify_seed_corr',
'--roi-label 4',
'--outputname', seedcorr_output,
'--mask', os.path.join(subcort_images_dir, 'mask.nii.gz'),
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, os.path.join(subcort_images_dir, 'rois.nii.gz'))
print(meants5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.1458
def test_ciftify_seedcorr_cifti_output_with_fisherz(output_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
meants_output = os.path.join(output_dir,
'seedcorr_meants.csv')
run(['ciftify_seed_corr', '--debug',
'--weighted', '--fisher-z',
'--outputname', seedcorr_output,
'--output-ts',
test_dtseries,
weighted_dscalar])
assert os.path.isfile(seedcorr_output)
assert os.path.isfile(meants_output)
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0894
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.547
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.194
def test_ciftify_seedcorr_cifti_output_with_TRfile(output_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
TR_file = os.path.join(output_dir, 'TR_file.txt')
with open(TR_file, "w") as text_file:
text_file.write('''1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30''')
run(['ciftify_seed_corr',
'--outputname', seedcorr_output,
'--use-TRs', TR_file,
'--weighted',
test_dtseries,
weighted_dscalar])
assert os.path.isfile(seedcorr_output)
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0929
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.482
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.220 | tests/functional/test_ciftify_seed_corr.py | import os
import unittest
import logging
import shutil
import tempfile
import ciftify.config
from ciftify.utils import run
import pytest
from pytest import raises
from unittest.mock import patch
import pandas as pd
import numpy as np
def get_test_data_path():
return os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
test_dtseries = os.path.join(get_test_data_path(),
'sub-50005_task-rest_Atlas_s0.dtseries.nii')
test_nifti = os.path.join(get_test_data_path(),
'sub-50005_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz')
left_surface = os.path.join(get_test_data_path(),
'sub-50005.L.midthickness.32k_fs_LR.surf.gii')
right_surface = os.path.join(get_test_data_path(),
'sub-50005.R.midthickness.32k_fs_LR.surf.gii')
custom_dlabel = os.path.join(get_test_data_path(),
'rois',
'rois_for_tests.dlabel.nii')
weighted_dscalar = os.path.join(get_test_data_path(),
'rois',
'weighted_test_roi.dscalar.nii')
custom_dlabel_meants = os.path.join(get_test_data_path(),
'rois',
'sub-50005_task-rest_Atlas_s0_rois_for_tests_meants.csv')
# ciftify_meants (weighted)
# ciftify_meants (with a label)
# ciftify_meants (with multiple entries) - dlabel (no labels)
# ciftify_meants (with multiple entries) - dlabel (w labels)
# ciftify_meants (with multiple entries) - dscalar (no labels)
# ciftify_meants (with multiple entries) - dscalar (w labels)
# ciftify_meants (with mask)
# known correlations with the seed
# 0 0.087475
# 1 0.488206
# 2 0.049888
# 3 0.187913
# 4 0.078139
@pytest.fixture(scope = "function")
def output_dir():
with ciftify.utils.TempDir() as outputdir:
yield outputdir
@pytest.fixture(scope = "module")
def left_hemisphere_dir():
'''build a cleaned and smoothed dtseries file that multiple tests use as input'''
with ciftify.utils.TempDir() as mask_dir:
left_mask_gii = os.path.join(mask_dir, 'mask.L.shape.gii')
left_mask_dscalar = os.path.join(mask_dir, 'mask_L.dscalar.nii')
left_roi_dscalar = os.path.join(mask_dir, 'roi_L.dscalar.nii')
left_roi_gii = os.path.join(mask_dir, 'roi.L.shape.gii')
left_func = os.path.join(mask_dir, 'func.L.func.gii')
run(['wb_command', '-cifti-label-to-roi',
custom_dlabel, left_roi_dscalar,
'-key 1 -map 1'])
run(['wb_command', '-cifti-separate',
left_roi_dscalar, 'COLUMN',
'-metric', 'CORTEX_LEFT', left_roi_gii,
'-roi', left_mask_gii])
run(['wb_command', '-cifti-separate',
test_dtseries, 'COLUMN',
'-metric', 'CORTEX_LEFT', left_func])
run(['wb_command', '-cifti-create-dense-from-template',
test_dtseries, left_mask_dscalar,
'-metric', 'CORTEX_LEFT', left_mask_gii])
yield mask_dir
@pytest.fixture(scope = "module")
def subcort_images_dir():
'''create the '''
with ciftify.utils.TempDir() as subcort_images_dir:
subcort_mask = os.path.join(subcort_images_dir, 'mask.nii.gz')
subcort_rois = os.path.join(subcort_images_dir, 'rois.nii.gz')
subcort_func = os.path.join(subcort_images_dir, 'func.nii.gz')
run(['wb_command', '-cifti-separate',
custom_dlabel, 'COLUMN',
'-volume-all', subcort_rois,
'-roi', subcort_mask])
run(['wb_command', '-cifti-separate',
test_dtseries, 'COLUMN',
'-volume-all', subcort_func])
yield subcort_images_dir
def read_meants_and_transpose(meants_csv):
return pd.read_csv(meants_csv, header = None).transpose()
def get_the_5_rois_meants_outputs(input_file, tmpdir, seed_dscalar):
meants_csv = os.path.join(tmpdir, 'mts.csv')
meants_labels = os.path.join(tmpdir, 'labels.csv')
run(['ciftify_meants',
'--outputcsv', meants_csv,
'--outputlabels', meants_labels,
input_file, seed_dscalar])
meants_pd = pd.read_csv(meants_csv, header = None)
labels_pd = pd.read_csv(meants_labels)
return meants_pd, labels_pd
@pytest.fixture(scope = "module")
def custom_dlabel_timeseries():
meants_pd = read_meants_and_transpose(custom_dlabel_meants)
yield meants_pd
# ciftify_meants <cifti_func> <cifti_seed> (cifti_seed - cortical)
# ciftify_meants <cifti_func> <gifti_seed>
# ciftify_meants <gifti_func> <gifti_seed>
# results of a, b, and c should match
def test_ciftify_meants_cifti_func_custom_dlabel_left_gii(output_dir, custom_dlabel_timeseries, left_hemisphere_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--hemi L',
'--outputcsv', meants_out,
test_dtseries,
os.path.join(left_hemisphere_dir, 'roi.L.shape.gii')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,0]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.01)
def test_ciftify_meants_gii_func_custom_dlabel_left_gii(output_dir, custom_dlabel_timeseries, left_hemisphere_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants', '--debug',
'--outputcsv', meants_out,
'--hemi L',
os.path.join(left_hemisphere_dir, 'func.L.func.gii'),
os.path.join(left_hemisphere_dir, 'roi.L.shape.gii')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,0]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.01)
# ciftify_meants <nifti_func> <nifti_seed>
# ciftify_meants <cifti_func> <cifti_seed> (cifti_seed - subcortical)
# results of a and b should match (as long as the nifti came from them cifti)
def test_ciftify_meants_cifti_func_custom_dlabel(output_dir, custom_dlabel_timeseries):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
test_dtseries, custom_dlabel])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
results_cormat = meants_pd_out.corr().values
print(results_cormat)
expected_cormat = custom_dlabel_timeseries.corr().values
print(expected_cormat)
assert np.allclose(results_cormat, expected_cormat, atol = 0.001)
assert np.allclose(meants_pd_out, custom_dlabel_timeseries, atol = 0.001)
def test_ciftify_meants_cifti_func_custom_dlabel_some_missing(output_dir, custom_dlabel_timeseries, left_hemisphere_dir, subcort_images_dir):
''' set to make sure that the correct number of ROIs are extracted if the data is missing'''
## build a cifti file with only one hemisphere of data
one_hemi_func = os.path.join(output_dir, 'func_L_hemi.dtseries.nii')
run(['wb_command', '-cifti-create-dense-timeseries',
one_hemi_func,
'-left-metric', os.path.join(left_hemisphere_dir, 'func.L.func.gii'),
'-volume',
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(ciftify.config.find_ciftify_global(),'standard_mesh_atlases','Atlas_ROIs.2.nii.gz')])
meants_out = os.path.join(output_dir, 'meants.csv')
labels_out = os.path.join(output_dir, 'labels.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
'--outputlabels',labels_out,
one_hemi_func, custom_dlabel])
assert os.path.isfile(meants_out)
assert os.path.isfile(labels_out)
meants_pd_out = read_meants_and_transpose(meants_out)
meants_labels = pd.read_csv(labels_out)
assert meants_pd_out.shape[1] == meants_labels.shape[0]
print(meants_pd_out)
print(meants_labels)
assert np.allclose(meants_pd_out.loc[:,0].values, custom_dlabel_timeseries.loc[:,0].values, atol = 0.001)
assert np.allclose(meants_pd_out.loc[:,1:2].values, np.zeros((meants_pd_out.shape[0],2)), atol = 0.001)
def test_ciftify_meants_cifti_func_custom_dlabel_subcort(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
test_dtseries,
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
expected_pd = custom_dlabel_timeseries.loc[:,3:4]
corr_test = meants_pd_out.corr().values
corr_expected = expected_pd.corr().values
assert np.allclose(corr_test[0,1], corr_expected[0,1], atol = 0.001)
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_meants_nifti_func_custom_dlabel_subcort(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out)
expected_pd = custom_dlabel_timeseries.loc[:,3:4]
corr_test = meants_pd_out.corr().values
corr_expected = expected_pd.corr().values
assert np.allclose(corr_test[0,1], corr_expected[0,1], atol = 0.001)
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_meants_nifti_func_custom_dlabel_subcort_one_label(output_dir, custom_dlabel_timeseries, subcort_images_dir):
meants_out = os.path.join(output_dir, 'meants.csv')
run(['ciftify_meants',
'--outputcsv', meants_out,
'--roi-label 4',
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
assert os.path.isfile(meants_out)
meants_pd_out = read_meants_and_transpose(meants_out).loc[:,0]
expected_pd = custom_dlabel_timeseries.loc[:,3]
assert np.allclose(meants_pd_out.values, expected_pd.values, atol = 0.001)
def test_ciftify_seedcorr_with_cifti_output_no_mask(output_dir, left_hemisphere_dir):
new_test_dtseries = os.path.join(output_dir, 'sub-xx_test.dtseries.nii')
run(['cp', test_dtseries, new_test_dtseries])
seedcorr_output = os.path.join(output_dir,
'sub-xx_test_weighted_test_roi.dscalar.nii')
run(['ciftify_seed_corr', '--debug',
'--weighted',
new_test_dtseries,
weighted_dscalar])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, custom_dlabel)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0875
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.488
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.1879
def test_ciftify_seedcorr_cifti_output_with_mask(output_dir, left_hemisphere_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
run(['ciftify_seed_corr', '--debug',
'--outputname', seedcorr_output,
'--weighted',
'--mask', os.path.join(left_hemisphere_dir, 'mask_L.dscalar.nii'),
test_dtseries,
weighted_dscalar])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, custom_dlabel)
assert os.path.isfile(seedcorr_output)
print(meants5)
print(labels5)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0875
assert pytest.approx(meants5.loc[1,0], 0.001) == 0
assert pytest.approx(meants5.loc[3,0], 0.001) == 0
def test_ciftify_seedcorr_cifti_output_nifti_seed(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
run(['ciftify_seed_corr',
'--debug',
'--roi-label 4',
'--outputname', seedcorr_output,
test_dtseries,
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
print(meants5)
print(labels5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.1256
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.3094
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[4,0], 0.001) == 0.1458
def test_ciftify_seedcorr_nifti_output_no_mask(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.nii.gz')
run(['ciftify_seed_corr',
'--roi-label 4',
'--outputname', seedcorr_output,
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, os.path.join(subcort_images_dir, 'rois.nii.gz'))
print(meants5)
print(labels5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.1458
def test_ciftify_seedcorr_nifti_output_with_mask(output_dir, subcort_images_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.nii.gz')
run(['ciftify_seed_corr',
'--roi-label 4',
'--outputname', seedcorr_output,
'--mask', os.path.join(subcort_images_dir, 'mask.nii.gz'),
os.path.join(subcort_images_dir, 'func.nii.gz'),
os.path.join(subcort_images_dir, 'rois.nii.gz')])
meants5, labels5 = get_the_5_rois_meants_outputs(seedcorr_output, output_dir, os.path.join(subcort_images_dir, 'rois.nii.gz'))
print(meants5)
assert os.path.isfile(seedcorr_output)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.3237
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.1458
def test_ciftify_seedcorr_cifti_output_with_fisherz(output_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
meants_output = os.path.join(output_dir,
'seedcorr_meants.csv')
run(['ciftify_seed_corr', '--debug',
'--weighted', '--fisher-z',
'--outputname', seedcorr_output,
'--output-ts',
test_dtseries,
weighted_dscalar])
assert os.path.isfile(seedcorr_output)
assert os.path.isfile(meants_output)
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0894
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.547
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.194
def test_ciftify_seedcorr_cifti_output_with_TRfile(output_dir):
seedcorr_output = os.path.join(output_dir,
'seedcorr.dscalar.nii')
TR_file = os.path.join(output_dir, 'TR_file.txt')
with open(TR_file, "w") as text_file:
text_file.write('''1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30''')
run(['ciftify_seed_corr',
'--outputname', seedcorr_output,
'--use-TRs', TR_file,
'--weighted',
test_dtseries,
weighted_dscalar])
assert os.path.isfile(seedcorr_output)
meants5, labels5 = get_the_5_rois_meants_outputs(
seedcorr_output, output_dir, custom_dlabel)
assert pytest.approx(meants5.loc[0,0], 0.001) == 0.0929
assert pytest.approx(meants5.loc[1,0], 0.001) == 0.482
assert pytest.approx(meants5.loc[3,0], 0.001) == 0.220 | 0.335024 | 0.254796 |
from collections import defaultdict
import itertools
from cdecimal import Decimal
from gryphon.lib.logger import get_logger
from gryphon.lib.models.emeraldhavoc.orderbook import Orderbook
from gryphon.lib.money import Money
from gryphon.lib.metrics import midpoint
logger = get_logger(__name__)
CURRENCY_MISMATCH_ERROR_MESSAGE = """\
Orderbooks do not have the same volume currency. A: %s, B: %s\
"""
class MismatchedVolumeCurrenciesError(Exception):
def __init__(self, cur_a, cur_b):
self.message = CURRENCY_MISMATCH_ERROR_MESSAGE % (cur_a, cur_b)
class Cross(object):
"""
Represents a cross between the bids and asks two orderbooks, whether or not that
cross is a profitable arbitrage opportunity.
"""
def __init__(self, volume, revenue, fees, buy_ob=None, sell_ob=None, buy_ex=None, sell_ex=None):
self.volume = volume
self.revenue = revenue
self.fees = fees
self.buy_orderbook = buy_ob
self.sell_orderbook = sell_ob
self.buy_exchange = buy_ob['asks'][0].exchange if buy_ob else None
self.sell_exchange = sell_ob['bids'][0].exchange if sell_ob else None
@property
def volume_currency(self):
return self.buy_exchange.volume_currency
@property
def price_currency(self):
return self.buy_exchange.currency
@property
def profit(self):
return self.revenue - self.fees
def __nonzero__(self):
"""
A cross is falsy if there is no overlap volume.
"""
return bool(self.volume)
def detect_cross(ob1, ob2, ignore_unprofitable=True):
"""
Look for orderbook overlap between two exchanges that could be arbitraged in either
direction.
Returns a Cross or None if no overlap found.
"""
cross = detect_directional_cross(ob1, ob2, ignore_unprofitable)
if cross is None:
cross = detect_directional_cross(ob2, ob1, ignore_unprofitable)
return cross
def detect_directional_cross(buy_ob, sell_ob, ignore_unprofitable=True):
"""
Calculates the volume by which buy_ob's asks cut into sell_ob's bids, and the
profit that could be gleaned if one could take the arbitrage. By default will not
return a cross if it is not profitable to take advantage of given the fee brackets
on the two pairs. This can be turned off by setting the argument ignore_unprofitable
to False.
Returns a Cross object containing the total volume of the overlap, the expected
revenue and expected fees if one were to take the full opportunity. By default
Notes:
- The orderbook arguments are named for the action you take, not the type of order
you are looking at. If you want to buy on ob1 and sell on ob2, you look at the
asks on ob1 (buy_ob) and the bids on ob2 (sell_ob).
- This supports considering orderbooks with different price currencies, since
that is a common operation. However, if you wish to use this for price
currencies with high hourly or even minute-ly volatility (like BTC, ETH or any
other cryptocurrency), the speed at which exchange rates are updated in the
builtin exchange rate service--OpenExchangeRates--may not be fast enough. It's
strongly recommended that users do their own research on this.
- It's important that the market order fees for the exchanges you use have be
accurately configured.
- In this usage 'volume' refers to the area of the overlap, which is one-half the
total volume that would be required in orders to take the opportunity, since
there are two exchanges.
"""
if not buy_ob['asks'] or not sell_ob['bids']: # A degenerate orderbook.
return None
buy_ex = buy_ob['asks'][0].exchange
sell_ex = sell_ob['bids'][0].exchange
# Detect the volume currency and check for a mismatch.
if buy_ex.volume_currency != sell_ex.volume_currency:
raise MismatchedVolumeCurrenciesError(
buy_ex.volume_currency,
sell_ex.volume_currency,
)
volume_currency = buy_ex.volume_currency
# We use the buy exchange's price currency as our ground.
base_price_currency = buy_ex.currency
# Initialize the variables we use in the iteration phase.
total_volume = Money('0', volume_currency)
total_revenue = Money('0', base_price_currency)
total_fees = Money('0', base_price_currency)
ask_index = -1
bid_index = -1
ask_remaining_volume = Money('0', volume_currency)
bid_remaining_volume = Money('0', volume_currency)
ask = None
bid = None
while ask_index < len(buy_ob['asks']) - 1 and bid_index < len(sell_ob['asks']) - 1:
if ask_remaining_volume == Money('0', volume_currency):
ask_index += 1
ask = buy_ob['asks'][ask_index]
ask_price = ask.price.to(base_price_currency)
ask_remaining_volume = ask.volume
if bid_remaining_volume == Money('0', volume_currency):
bid_index += 1
bid = sell_ob['bids'][bid_index]
bid_price = bid.price.to(base_price_currency)
bid_remaining_volume = bid.volume
if bid_price > ask_price: # Found a cross
margin = bid_price - ask_price
volume = None
if bid_remaining_volume > ask_remaining_volume:
# This bid eats the whole ask.
volume = ask_remaining_volume
else:
# This bid only eats part of the ask.
volume = bid_remaining_volume
revenue = margin * volume.amount
total_revenue += revenue
total_fees += bid_price * volume.amount * bid.exchange.market_order_fee
total_fees += ask_price * volume.amount * ask.exchange.market_order_fee
total_volume += volume
bid_remaining_volume -= volume
ask_remaining_volume -= volume
else:
break
if total_volume:
cross = Cross(total_volume, total_revenue, total_fees, buy_ob, sell_ob)
if ignore_unprofitable is False:
return cross
elif total_revenue > total_fees:
return cross
else:
return None
else:
return None
def get_executable_volume(cross, buy_ex_balance, sell_ex_balance):
"""
Given a cross between two exchanges and balance information for accounts on both
exchanges, determine how much volume of the opportunity could be taken by the
trader.
"""
if not cross:
return None
# Sell max is just whichever is lower, the volume available or the balance we have.
sell_max = min(cross.volume, sell_ex_balance[cross.volume_currency])
# It's a little more complicated on the buy side.
buy_max = max_buy_volume(buy_ex_balance[cross.price_currency], cross.buy_orderbook)
return min(buy_max, sell_max)
def detect_crosses_between_many_orderbooks(orderbooks, ignore_unprofitable=True):
"""
Takes in a list of orderbooks and returns a list of crosses between those
orderbooks sorted by profitability.
"""
crosses = []
for pair in itertools.combinations(orderbooks, 2):
cross = detect_cross(pair[0], pair[1], ignore_unprofitable)
if cross is not None:
crosses.append(cross)
crosses = sorted(crosses, key=lambda c: c.profit, reverse=True)
return crosses
def max_buy_volume(balance, buy_orderbook):
"""
What is the maximum volume we can buy on the given orderbook with the given balance?
This is more complicated than it initially appears due to slippage and fees.
It feels like this belongs in it's own library, but there isn't a clear place for it
I can think of, so it remains here for now, nearby it's main usage.
"""
buy_ex = buy_orderbook['asks'][0].exchange
fee = buy_ex.market_order_fee
vol_currency = buy_ex.volume_currency
balance_remaining = balance
volume_available = Money('0', vol_currency)
for order in buy_orderbook['asks']:
total_order_price = order.price * order.volume.amount
total_order_fee = total_order_price * fee
total_order_cost = total_order_price + total_order_fee
if total_order_cost <= balance_remaining:
balance_remaining -= total_order_cost
volume_available += order.volume
else:
# With our given balance we want to use all of it, so we have:
# (1 + fee) * (volume * price) = balance
# volume = (balance / (1 + fee)) / price
last_volume = (
(balance_remaining.amount / (Decimal('1') + fee)) / order.price.amount
)
last_volume = Money(last_volume, vol_currency)
volume_available += last_volume
break
return volume_available | gryphon/lib/arbitrage.py | from collections import defaultdict
import itertools
from cdecimal import Decimal
from gryphon.lib.logger import get_logger
from gryphon.lib.models.emeraldhavoc.orderbook import Orderbook
from gryphon.lib.money import Money
from gryphon.lib.metrics import midpoint
logger = get_logger(__name__)
CURRENCY_MISMATCH_ERROR_MESSAGE = """\
Orderbooks do not have the same volume currency. A: %s, B: %s\
"""
class MismatchedVolumeCurrenciesError(Exception):
def __init__(self, cur_a, cur_b):
self.message = CURRENCY_MISMATCH_ERROR_MESSAGE % (cur_a, cur_b)
class Cross(object):
"""
Represents a cross between the bids and asks two orderbooks, whether or not that
cross is a profitable arbitrage opportunity.
"""
def __init__(self, volume, revenue, fees, buy_ob=None, sell_ob=None, buy_ex=None, sell_ex=None):
self.volume = volume
self.revenue = revenue
self.fees = fees
self.buy_orderbook = buy_ob
self.sell_orderbook = sell_ob
self.buy_exchange = buy_ob['asks'][0].exchange if buy_ob else None
self.sell_exchange = sell_ob['bids'][0].exchange if sell_ob else None
@property
def volume_currency(self):
return self.buy_exchange.volume_currency
@property
def price_currency(self):
return self.buy_exchange.currency
@property
def profit(self):
return self.revenue - self.fees
def __nonzero__(self):
"""
A cross is falsy if there is no overlap volume.
"""
return bool(self.volume)
def detect_cross(ob1, ob2, ignore_unprofitable=True):
"""
Look for orderbook overlap between two exchanges that could be arbitraged in either
direction.
Returns a Cross or None if no overlap found.
"""
cross = detect_directional_cross(ob1, ob2, ignore_unprofitable)
if cross is None:
cross = detect_directional_cross(ob2, ob1, ignore_unprofitable)
return cross
def detect_directional_cross(buy_ob, sell_ob, ignore_unprofitable=True):
"""
Calculates the volume by which buy_ob's asks cut into sell_ob's bids, and the
profit that could be gleaned if one could take the arbitrage. By default will not
return a cross if it is not profitable to take advantage of given the fee brackets
on the two pairs. This can be turned off by setting the argument ignore_unprofitable
to False.
Returns a Cross object containing the total volume of the overlap, the expected
revenue and expected fees if one were to take the full opportunity. By default
Notes:
- The orderbook arguments are named for the action you take, not the type of order
you are looking at. If you want to buy on ob1 and sell on ob2, you look at the
asks on ob1 (buy_ob) and the bids on ob2 (sell_ob).
- This supports considering orderbooks with different price currencies, since
that is a common operation. However, if you wish to use this for price
currencies with high hourly or even minute-ly volatility (like BTC, ETH or any
other cryptocurrency), the speed at which exchange rates are updated in the
builtin exchange rate service--OpenExchangeRates--may not be fast enough. It's
strongly recommended that users do their own research on this.
- It's important that the market order fees for the exchanges you use have be
accurately configured.
- In this usage 'volume' refers to the area of the overlap, which is one-half the
total volume that would be required in orders to take the opportunity, since
there are two exchanges.
"""
if not buy_ob['asks'] or not sell_ob['bids']: # A degenerate orderbook.
return None
buy_ex = buy_ob['asks'][0].exchange
sell_ex = sell_ob['bids'][0].exchange
# Detect the volume currency and check for a mismatch.
if buy_ex.volume_currency != sell_ex.volume_currency:
raise MismatchedVolumeCurrenciesError(
buy_ex.volume_currency,
sell_ex.volume_currency,
)
volume_currency = buy_ex.volume_currency
# We use the buy exchange's price currency as our ground.
base_price_currency = buy_ex.currency
# Initialize the variables we use in the iteration phase.
total_volume = Money('0', volume_currency)
total_revenue = Money('0', base_price_currency)
total_fees = Money('0', base_price_currency)
ask_index = -1
bid_index = -1
ask_remaining_volume = Money('0', volume_currency)
bid_remaining_volume = Money('0', volume_currency)
ask = None
bid = None
while ask_index < len(buy_ob['asks']) - 1 and bid_index < len(sell_ob['asks']) - 1:
if ask_remaining_volume == Money('0', volume_currency):
ask_index += 1
ask = buy_ob['asks'][ask_index]
ask_price = ask.price.to(base_price_currency)
ask_remaining_volume = ask.volume
if bid_remaining_volume == Money('0', volume_currency):
bid_index += 1
bid = sell_ob['bids'][bid_index]
bid_price = bid.price.to(base_price_currency)
bid_remaining_volume = bid.volume
if bid_price > ask_price: # Found a cross
margin = bid_price - ask_price
volume = None
if bid_remaining_volume > ask_remaining_volume:
# This bid eats the whole ask.
volume = ask_remaining_volume
else:
# This bid only eats part of the ask.
volume = bid_remaining_volume
revenue = margin * volume.amount
total_revenue += revenue
total_fees += bid_price * volume.amount * bid.exchange.market_order_fee
total_fees += ask_price * volume.amount * ask.exchange.market_order_fee
total_volume += volume
bid_remaining_volume -= volume
ask_remaining_volume -= volume
else:
break
if total_volume:
cross = Cross(total_volume, total_revenue, total_fees, buy_ob, sell_ob)
if ignore_unprofitable is False:
return cross
elif total_revenue > total_fees:
return cross
else:
return None
else:
return None
def get_executable_volume(cross, buy_ex_balance, sell_ex_balance):
"""
Given a cross between two exchanges and balance information for accounts on both
exchanges, determine how much volume of the opportunity could be taken by the
trader.
"""
if not cross:
return None
# Sell max is just whichever is lower, the volume available or the balance we have.
sell_max = min(cross.volume, sell_ex_balance[cross.volume_currency])
# It's a little more complicated on the buy side.
buy_max = max_buy_volume(buy_ex_balance[cross.price_currency], cross.buy_orderbook)
return min(buy_max, sell_max)
def detect_crosses_between_many_orderbooks(orderbooks, ignore_unprofitable=True):
"""
Takes in a list of orderbooks and returns a list of crosses between those
orderbooks sorted by profitability.
"""
crosses = []
for pair in itertools.combinations(orderbooks, 2):
cross = detect_cross(pair[0], pair[1], ignore_unprofitable)
if cross is not None:
crosses.append(cross)
crosses = sorted(crosses, key=lambda c: c.profit, reverse=True)
return crosses
def max_buy_volume(balance, buy_orderbook):
"""
What is the maximum volume we can buy on the given orderbook with the given balance?
This is more complicated than it initially appears due to slippage and fees.
It feels like this belongs in it's own library, but there isn't a clear place for it
I can think of, so it remains here for now, nearby it's main usage.
"""
buy_ex = buy_orderbook['asks'][0].exchange
fee = buy_ex.market_order_fee
vol_currency = buy_ex.volume_currency
balance_remaining = balance
volume_available = Money('0', vol_currency)
for order in buy_orderbook['asks']:
total_order_price = order.price * order.volume.amount
total_order_fee = total_order_price * fee
total_order_cost = total_order_price + total_order_fee
if total_order_cost <= balance_remaining:
balance_remaining -= total_order_cost
volume_available += order.volume
else:
# With our given balance we want to use all of it, so we have:
# (1 + fee) * (volume * price) = balance
# volume = (balance / (1 + fee)) / price
last_volume = (
(balance_remaining.amount / (Decimal('1') + fee)) / order.price.amount
)
last_volume = Money(last_volume, vol_currency)
volume_available += last_volume
break
return volume_available | 0.85067 | 0.420302 |
import sys
sys.path.extend(('lib', 'db'))
from kleros import db, Dispute, Round, Vote, Config, Court, JurorStake, Deposit, Juror
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from datetime import datetime
from time import gmtime, strftime
import statistics
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../db/kleros.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
@app.route('/court/<int:id>', methods=['GET'])
def court(id):
court = Court.query.get(id)
court.disputes = court.disputes()
staking_data = []
for juror in court.jurors:
juror_data = juror.current_amount_in_court(court.id)
votes_in_court = juror.votes_in_court(court.id)
if juror_data['court_and_children'] == 0 and votes_in_court == 0: continue
staking_data.append({
'address': juror.address,
'votes_in_court': votes_in_court,
'court_only': juror_data['court_only'],
'court_and_children': juror_data['court_and_children']
})
court.staking_data = sorted(staking_data,
key=lambda x: (x['court_and_children'], x['votes_in_court']), reverse=True)
num_Jurors = len(court.jurors) - 1
return render_template('monitor/court.html',
court=court,
last_updated=Config.get('updated'),
num_Jurors=num_Jurors,
jurors_stats=[], full_jurors=[], full_jurors_stats=[], voting_jurors_num=[]
)
'''
court_num = court
voting_jurors = court_num.jurors
voting_jurors_num = len(voting_jurors)
jurors = court.jurors_stakings()
jurors_stats = court.juror_stats()
court_mapping = {
0: [2, 3, 4],
2: [3, 4,]
}
full_jurors = []
full_jurors_stats = {}
if id in court_mapping:
amounts = []
unique_jurors = {}
for j in jurors:
if j['address'] not in unique_jurors:
unique_jurors[j['address']] = {"staking_amount": j['staking_amount']}
else:
unique_jurors[j['address']]["staking_amount"] += j['staking_amount']
amounts.append(j['staking_amount'])
courts = Court.query.filter(Court.id.in_(court_mapping[id]))
for c in courts:
court_jurors = c.jurors_stakings()
for cj in court_jurors:
if cj['address'] not in unique_jurors:
unique_jurors[cj['address']] = {"staking_amount": cj['staking_amount']}
else:
unique_jurors[cj['address']]["staking_amount"] += cj['staking_amount']
amounts.append(cj['staking_amount'])
full_jurors_stats['length'] = len(amounts)
full_jurors_stats['mean'] = statistics.mean(amounts)
full_jurors_stats['median'] = statistics.median(amounts)
full_jurors = [{'address': address, 'staking_amount': juror['staking_amount']} for address, juror in unique_jurors.items()]
full_jurors = sorted(full_jurors, key=lambda j: j['staking_amount'], reverse=True)
return render_template('monitor/court.html', court=court, disputes=disputes, jurors=jurors, last_updated=Config.get('updated'),
jurors_stats=jurors_stats, full_jurors=full_jurors, full_jurors_stats=full_jurors_stats, voting_jurors_num=voting_jurors_num)
'''
@app.route('/', methods=['GET'])
@app.route('/disputes', methods=['GET'])
def disputes():
disputes = Dispute.query.order_by(Dispute.id.desc()).all()
total_ETH = Deposit.total()
round_eth = round(total_ETH, 2)
eth_price = float(Config.get('eth_price'))
round_price = round(eth_price, 2)
total_in_USD = round(round_eth * round_price, 2)
unique_voting_jurors = Juror.list()
return render_template('monitor/disputes.html', disputes=disputes, last_updated=Config.get('updated'), round_eth=round_eth, round_price=round_price, total_in_USD=total_in_USD, voting_jurors=len(unique_voting_jurors))
@app.route('/dispute/<int:id>', methods=['GET'])
def dispute(id):
dispute = Dispute.query.get(id)
dispute.rounds = dispute.rounds()
for r in dispute.rounds:
r.votes = r.votes()
for v in r.votes:
if v.choice == 1: v.vote_str = 'Yes'
elif v.choice == 2: v.vote_str = 'No'
else: v.vote_str = 'Pending'
if r.majority_reached:
if v.choice == 0: v.color = '#F7DC6F'
elif v.choice == r.winning_choice: v.color = '#27AE60'
else: v.color = '#E74C3C'
else:
if v.choice == 0: v.color = '#FCF3CF'
elif v.choice == r.winning_choice: v.color = '#D1F2EB'
else: v.color = '#F5B7B1'
return render_template('monitor/dispute.html',
dispute=dispute,
last_updated=Config.get('updated')
)
@app.route('/juror/<string:address>', methods=['GET'])
def juror(address):
votes = (db.session.query(Vote, Round)
.filter(func.lower(Vote.account) == address.lower())
.filter(Vote.round_id == Round.id)
.order_by(Vote.round_id.desc())
.all()
)
for v in votes:
if v[0].vote == 0: v[0].color = '#F7DC6F'
else:
if v[0].is_winner: v[0].color = '#27AE60'
else: v[0].color = '#F5B7B1'
stakes = (db.session.query(JurorStake, Court)
.filter(func.lower(JurorStake.address) == address.lower())
.filter(Court.id == JurorStake.court_id)
.order_by(JurorStake.staking_date.desc())
.all())
disputes = Dispute.query.filter(func.lower(Dispute.created_by) == address.lower()).order_by(Dispute.created_date.desc())
return render_template('monitor/juror.html', address=address, votes=votes, stakes = stakes, disputes=disputes, last_updated=Config.get('updated')) | flaskr/monitor.py |
import sys
sys.path.extend(('lib', 'db'))
from kleros import db, Dispute, Round, Vote, Config, Court, JurorStake, Deposit, Juror
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from datetime import datetime
from time import gmtime, strftime
import statistics
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../db/kleros.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
@app.route('/court/<int:id>', methods=['GET'])
def court(id):
court = Court.query.get(id)
court.disputes = court.disputes()
staking_data = []
for juror in court.jurors:
juror_data = juror.current_amount_in_court(court.id)
votes_in_court = juror.votes_in_court(court.id)
if juror_data['court_and_children'] == 0 and votes_in_court == 0: continue
staking_data.append({
'address': juror.address,
'votes_in_court': votes_in_court,
'court_only': juror_data['court_only'],
'court_and_children': juror_data['court_and_children']
})
court.staking_data = sorted(staking_data,
key=lambda x: (x['court_and_children'], x['votes_in_court']), reverse=True)
num_Jurors = len(court.jurors) - 1
return render_template('monitor/court.html',
court=court,
last_updated=Config.get('updated'),
num_Jurors=num_Jurors,
jurors_stats=[], full_jurors=[], full_jurors_stats=[], voting_jurors_num=[]
)
'''
court_num = court
voting_jurors = court_num.jurors
voting_jurors_num = len(voting_jurors)
jurors = court.jurors_stakings()
jurors_stats = court.juror_stats()
court_mapping = {
0: [2, 3, 4],
2: [3, 4,]
}
full_jurors = []
full_jurors_stats = {}
if id in court_mapping:
amounts = []
unique_jurors = {}
for j in jurors:
if j['address'] not in unique_jurors:
unique_jurors[j['address']] = {"staking_amount": j['staking_amount']}
else:
unique_jurors[j['address']]["staking_amount"] += j['staking_amount']
amounts.append(j['staking_amount'])
courts = Court.query.filter(Court.id.in_(court_mapping[id]))
for c in courts:
court_jurors = c.jurors_stakings()
for cj in court_jurors:
if cj['address'] not in unique_jurors:
unique_jurors[cj['address']] = {"staking_amount": cj['staking_amount']}
else:
unique_jurors[cj['address']]["staking_amount"] += cj['staking_amount']
amounts.append(cj['staking_amount'])
full_jurors_stats['length'] = len(amounts)
full_jurors_stats['mean'] = statistics.mean(amounts)
full_jurors_stats['median'] = statistics.median(amounts)
full_jurors = [{'address': address, 'staking_amount': juror['staking_amount']} for address, juror in unique_jurors.items()]
full_jurors = sorted(full_jurors, key=lambda j: j['staking_amount'], reverse=True)
return render_template('monitor/court.html', court=court, disputes=disputes, jurors=jurors, last_updated=Config.get('updated'),
jurors_stats=jurors_stats, full_jurors=full_jurors, full_jurors_stats=full_jurors_stats, voting_jurors_num=voting_jurors_num)
'''
@app.route('/', methods=['GET'])
@app.route('/disputes', methods=['GET'])
def disputes():
disputes = Dispute.query.order_by(Dispute.id.desc()).all()
total_ETH = Deposit.total()
round_eth = round(total_ETH, 2)
eth_price = float(Config.get('eth_price'))
round_price = round(eth_price, 2)
total_in_USD = round(round_eth * round_price, 2)
unique_voting_jurors = Juror.list()
return render_template('monitor/disputes.html', disputes=disputes, last_updated=Config.get('updated'), round_eth=round_eth, round_price=round_price, total_in_USD=total_in_USD, voting_jurors=len(unique_voting_jurors))
@app.route('/dispute/<int:id>', methods=['GET'])
def dispute(id):
dispute = Dispute.query.get(id)
dispute.rounds = dispute.rounds()
for r in dispute.rounds:
r.votes = r.votes()
for v in r.votes:
if v.choice == 1: v.vote_str = 'Yes'
elif v.choice == 2: v.vote_str = 'No'
else: v.vote_str = 'Pending'
if r.majority_reached:
if v.choice == 0: v.color = '#F7DC6F'
elif v.choice == r.winning_choice: v.color = '#27AE60'
else: v.color = '#E74C3C'
else:
if v.choice == 0: v.color = '#FCF3CF'
elif v.choice == r.winning_choice: v.color = '#D1F2EB'
else: v.color = '#F5B7B1'
return render_template('monitor/dispute.html',
dispute=dispute,
last_updated=Config.get('updated')
)
@app.route('/juror/<string:address>', methods=['GET'])
def juror(address):
votes = (db.session.query(Vote, Round)
.filter(func.lower(Vote.account) == address.lower())
.filter(Vote.round_id == Round.id)
.order_by(Vote.round_id.desc())
.all()
)
for v in votes:
if v[0].vote == 0: v[0].color = '#F7DC6F'
else:
if v[0].is_winner: v[0].color = '#27AE60'
else: v[0].color = '#F5B7B1'
stakes = (db.session.query(JurorStake, Court)
.filter(func.lower(JurorStake.address) == address.lower())
.filter(Court.id == JurorStake.court_id)
.order_by(JurorStake.staking_date.desc())
.all())
disputes = Dispute.query.filter(func.lower(Dispute.created_by) == address.lower()).order_by(Dispute.created_date.desc())
return render_template('monitor/juror.html', address=address, votes=votes, stakes = stakes, disputes=disputes, last_updated=Config.get('updated')) | 0.302803 | 0.095392 |
from collections import defaultdict
from docopt import docopt
import logging
import logzero
from logzero import logger as log
import re
import os
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
def re_match_group(pattern, string):
m = re.search(pattern, string)
if m is not None:
return m.group(1)
return False
def gather_pdftotext_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
curr_id = None
for line in logfile:
line = line.strip()
if line.endswith(".pdf"):
curr_id = line[:-4]
elif "Warning" in line:
logs[curr_id]["pdftotext_warning"] += 1
elif "Error" in line:
logs[curr_id]["pdftotext_error"] += 1
else:
log.warning(f"pdftotext log: ignoring message: {line}")
return logs
def gather_parscit_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
pdf_list = []
curr_id = None
for line in logfile:
line = line.strip()
if line.endswith(".pdf"):
curr_id = line[:-4]
pdf_list.append(curr_id)
elif line.startswith("Die in"):
logs[curr_id]["parscit_died"] += 1
elif line.startswith("Citation text longer than article body"):
logs[curr_id]["parscit_cite_too_long"] += 1
else:
logs[curr_id]["parscit_other"] += 1
return pdf_list, logs
def gather_parsetei_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
for line in logfile:
line = line.strip()
if line.startswith("[E") or line.startswith("[W"):
key = re_match_group("] ([^ ]+): ", line)
assert key
if key.endswith(".xml"):
key = key[:-4]
if "Could not find any" in line:
logs[key]["tei_no_dates"] += 1
elif "Could not parse dates for" in line:
no_date_entries = re_match_group("for ([0-9]+) entries", line)
if no_date_entries:
logs[key]["tei_no_date_entries"] = int(no_date_entries)
num_files = re_match_group("entries in ([0-9]+)/[0-9]+ files", line)
if num_files:
logs[key]["tei_files_with_probs"] = int(num_files)
return logs
if __name__ == "__main__":
args = docopt(__doc__)
log_level = logging.DEBUG if args["--debug"] else logging.INFO
logzero.loglevel(log_level)
logzero.formatter(logzero.LogFormatter(datefmt="%Y-%m-%d %H:%M:%S"))
parsed_logs = {}
log_pdftotext = f"{SCRIPTDIR}/run_parscit_pipeline.pdftotext.log"
if not os.path.exists(log_pdftotext):
log.warning(f"Couldn't find pdf2totext log! (expected under: {log_pdftotext})")
else:
with open(log_pdftotext, "r") as f:
parsed_logs["pdftotext"] = gather_pdftotext_log(f)
log_parscit = f"{SCRIPTDIR}/run_parscit_pipeline.parscit.log"
if not os.path.exists(log_parscit):
log.error(f"Couldn't find ParsCit log! (expected under: {log_parscit})")
exit(1)
else:
with open(log_parscit, "r") as f:
all_ids, parsed_logs["parscit"] = gather_parscit_log(f)
log_parsetei = f"{SCRIPTDIR}/run_parscit_pipeline.tei.log"
if not os.path.exists(log_parsetei):
log.warning(f"Couldn't find parse_tei log! (expected under: {log_parsetei})")
else:
with open(log_parsetei, "r") as f:
parsed_logs["tei"] = gather_parsetei_log(f)
failures = set()
warnings = 0
for file_id, problems in parsed_logs["parscit"].items():
# errors that cause no output file to be generated
if problems["parscit_died"] or problems["parscit_other"]:
failures.add(file_id)
for file_id, problems in parsed_logs["tei"].items():
# top-level stats
if len(file_id) == 3:
warnings += problems["tei_files_with_probs"]
# file parsed, but no dates found
elif problems["tei_no_dates"]:
failures.add(file_id)
log.info(f" Total files processed: {len(all_ids):6d}")
log.info(f" Total files w/ date parsing issues: {warnings:6d}")
log.info(f"Total files that couldn't be parsed: {len(failures):6d}")
log.warning(f"List of IDs that couldn't be parsed:")
failures = sorted(failures)
while failures:
log.warning(" " + ", ".join(failures[:8]) + ",")
failures = failures[8:] | bin/summarize_logs.py | from collections import defaultdict
from docopt import docopt
import logging
import logzero
from logzero import logger as log
import re
import os
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
def re_match_group(pattern, string):
m = re.search(pattern, string)
if m is not None:
return m.group(1)
return False
def gather_pdftotext_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
curr_id = None
for line in logfile:
line = line.strip()
if line.endswith(".pdf"):
curr_id = line[:-4]
elif "Warning" in line:
logs[curr_id]["pdftotext_warning"] += 1
elif "Error" in line:
logs[curr_id]["pdftotext_error"] += 1
else:
log.warning(f"pdftotext log: ignoring message: {line}")
return logs
def gather_parscit_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
pdf_list = []
curr_id = None
for line in logfile:
line = line.strip()
if line.endswith(".pdf"):
curr_id = line[:-4]
pdf_list.append(curr_id)
elif line.startswith("Die in"):
logs[curr_id]["parscit_died"] += 1
elif line.startswith("Citation text longer than article body"):
logs[curr_id]["parscit_cite_too_long"] += 1
else:
logs[curr_id]["parscit_other"] += 1
return pdf_list, logs
def gather_parsetei_log(logfile):
logs = defaultdict(lambda: defaultdict(int))
for line in logfile:
line = line.strip()
if line.startswith("[E") or line.startswith("[W"):
key = re_match_group("] ([^ ]+): ", line)
assert key
if key.endswith(".xml"):
key = key[:-4]
if "Could not find any" in line:
logs[key]["tei_no_dates"] += 1
elif "Could not parse dates for" in line:
no_date_entries = re_match_group("for ([0-9]+) entries", line)
if no_date_entries:
logs[key]["tei_no_date_entries"] = int(no_date_entries)
num_files = re_match_group("entries in ([0-9]+)/[0-9]+ files", line)
if num_files:
logs[key]["tei_files_with_probs"] = int(num_files)
return logs
if __name__ == "__main__":
args = docopt(__doc__)
log_level = logging.DEBUG if args["--debug"] else logging.INFO
logzero.loglevel(log_level)
logzero.formatter(logzero.LogFormatter(datefmt="%Y-%m-%d %H:%M:%S"))
parsed_logs = {}
log_pdftotext = f"{SCRIPTDIR}/run_parscit_pipeline.pdftotext.log"
if not os.path.exists(log_pdftotext):
log.warning(f"Couldn't find pdf2totext log! (expected under: {log_pdftotext})")
else:
with open(log_pdftotext, "r") as f:
parsed_logs["pdftotext"] = gather_pdftotext_log(f)
log_parscit = f"{SCRIPTDIR}/run_parscit_pipeline.parscit.log"
if not os.path.exists(log_parscit):
log.error(f"Couldn't find ParsCit log! (expected under: {log_parscit})")
exit(1)
else:
with open(log_parscit, "r") as f:
all_ids, parsed_logs["parscit"] = gather_parscit_log(f)
log_parsetei = f"{SCRIPTDIR}/run_parscit_pipeline.tei.log"
if not os.path.exists(log_parsetei):
log.warning(f"Couldn't find parse_tei log! (expected under: {log_parsetei})")
else:
with open(log_parsetei, "r") as f:
parsed_logs["tei"] = gather_parsetei_log(f)
failures = set()
warnings = 0
for file_id, problems in parsed_logs["parscit"].items():
# errors that cause no output file to be generated
if problems["parscit_died"] or problems["parscit_other"]:
failures.add(file_id)
for file_id, problems in parsed_logs["tei"].items():
# top-level stats
if len(file_id) == 3:
warnings += problems["tei_files_with_probs"]
# file parsed, but no dates found
elif problems["tei_no_dates"]:
failures.add(file_id)
log.info(f" Total files processed: {len(all_ids):6d}")
log.info(f" Total files w/ date parsing issues: {warnings:6d}")
log.info(f"Total files that couldn't be parsed: {len(failures):6d}")
log.warning(f"List of IDs that couldn't be parsed:")
failures = sorted(failures)
while failures:
log.warning(" " + ", ".join(failures[:8]) + ",")
failures = failures[8:] | 0.303629 | 0.085556 |
import cv2
import io
import base64
import numpy as np
import tempfile
from flask import flash, send_file
from makeup_service.server.face_makeup_facade import FaceMakeupFacade
face_makeup = FaceMakeupFacade()
def transform_image(request):
allowed_extensions = {'png', 'jpg', 'jpeg'}
colors = get_colors(request)
image, file_name = get_image_from_request(request, allowed_extensions)
transformed_image = face_makeup.apply_makeup_on_image(image, colors)
extension = get_file_extension(file_name)
retval, buffer = cv2.imencode("." + extension, transformed_image)
return send_file(io.BytesIO(buffer), mimetype='image/' + extension)
def transform_video(request):
allowed_extensions = {'avi'}
colors = get_colors(request)
video_file = get_video_file_from_request(request, allowed_extensions)
transformed_temp_file = tempfile.NamedTemporaryFile(suffix=get_file_extension(video_file.name, True))
face_makeup.apply_makeup_on_video(video_file.name, colors, save_to_file=True,
out_file_path=transformed_temp_file.name)
extension = get_file_extension(video_file.name)
return send_file(transformed_temp_file.name, mimetype='video/' + extension)
def get_segmentation_for_image(data):
decoded = base64.b64decode(data['image'])
np_arr = np.frombuffer(decoded, np.uint8)
img_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
segmentation = face_makeup.get_segmentation(img_np)
return segmentation.tolist()
def color_string_to_list(string):
return list(map(int, string.split(',')))
def get_file_extension(file_name, with_dot=False):
extension = file_name.split('.')[1]
if with_dot:
return '.' + extension
else:
return extension
def is_allowed_file(file_name, allowed_extensions):
extension = get_file_extension(file_name)
return extension in allowed_extensions
def get_colors(request):
data = request.form
hair_color = color_string_to_list(data['hair_color'])
upper_lip_color = color_string_to_list(data['upper_lip_color'])
lower_lip_color = color_string_to_list(data['lower_lip_color'])
colors = [hair_color, upper_lip_color, lower_lip_color]
return colors
def get_source_from_request(request, allowed_extensions):
data_key = 'source'
if data_key not in request.files:
flash('No file part')
raise RuntimeError("No file was provided")
source = request.files[data_key]
if source.filename == '':
flash('No selected file')
raise RuntimeError("File was not selected")
if not source or not is_allowed_file(source.filename, allowed_extensions):
flash('Invalid file')
raise RuntimeError("Invalid file")
return source
def get_video_file_from_request(request, allowed_extensions):
source = get_source_from_request(request, allowed_extensions)
source_video_temp_file = tempfile.NamedTemporaryFile(suffix=get_file_extension(source.filename, True))
source.save(source_video_temp_file.name)
return source_video_temp_file
def get_image_from_request(request, allowed_extensions):
source = get_source_from_request(request, allowed_extensions)
np_arr = np.frombuffer(source.read(), np.uint8)
img_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
return img_np, source.filename | makeup_service/server/request_processor.py | import cv2
import io
import base64
import numpy as np
import tempfile
from flask import flash, send_file
from makeup_service.server.face_makeup_facade import FaceMakeupFacade
face_makeup = FaceMakeupFacade()
def transform_image(request):
allowed_extensions = {'png', 'jpg', 'jpeg'}
colors = get_colors(request)
image, file_name = get_image_from_request(request, allowed_extensions)
transformed_image = face_makeup.apply_makeup_on_image(image, colors)
extension = get_file_extension(file_name)
retval, buffer = cv2.imencode("." + extension, transformed_image)
return send_file(io.BytesIO(buffer), mimetype='image/' + extension)
def transform_video(request):
allowed_extensions = {'avi'}
colors = get_colors(request)
video_file = get_video_file_from_request(request, allowed_extensions)
transformed_temp_file = tempfile.NamedTemporaryFile(suffix=get_file_extension(video_file.name, True))
face_makeup.apply_makeup_on_video(video_file.name, colors, save_to_file=True,
out_file_path=transformed_temp_file.name)
extension = get_file_extension(video_file.name)
return send_file(transformed_temp_file.name, mimetype='video/' + extension)
def get_segmentation_for_image(data):
decoded = base64.b64decode(data['image'])
np_arr = np.frombuffer(decoded, np.uint8)
img_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
segmentation = face_makeup.get_segmentation(img_np)
return segmentation.tolist()
def color_string_to_list(string):
return list(map(int, string.split(',')))
def get_file_extension(file_name, with_dot=False):
extension = file_name.split('.')[1]
if with_dot:
return '.' + extension
else:
return extension
def is_allowed_file(file_name, allowed_extensions):
extension = get_file_extension(file_name)
return extension in allowed_extensions
def get_colors(request):
data = request.form
hair_color = color_string_to_list(data['hair_color'])
upper_lip_color = color_string_to_list(data['upper_lip_color'])
lower_lip_color = color_string_to_list(data['lower_lip_color'])
colors = [hair_color, upper_lip_color, lower_lip_color]
return colors
def get_source_from_request(request, allowed_extensions):
data_key = 'source'
if data_key not in request.files:
flash('No file part')
raise RuntimeError("No file was provided")
source = request.files[data_key]
if source.filename == '':
flash('No selected file')
raise RuntimeError("File was not selected")
if not source or not is_allowed_file(source.filename, allowed_extensions):
flash('Invalid file')
raise RuntimeError("Invalid file")
return source
def get_video_file_from_request(request, allowed_extensions):
source = get_source_from_request(request, allowed_extensions)
source_video_temp_file = tempfile.NamedTemporaryFile(suffix=get_file_extension(source.filename, True))
source.save(source_video_temp_file.name)
return source_video_temp_file
def get_image_from_request(request, allowed_extensions):
source = get_source_from_request(request, allowed_extensions)
np_arr = np.frombuffer(source.read(), np.uint8)
img_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
return img_np, source.filename | 0.40486 | 0.109491 |
from __future__ import absolute_import
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
import imagedt
from .DataInterface import *
from ...image.process import noise_padd
slim = tf.contrib.slim
def dynamically_loaded_data(image_paths, labels, height=224, width=224):
labels = map(int, labels)
# max(x. 128): prevent memory bomb
images = np.zeros([max(len(image_paths), 128), height, width, 3], np.float32)
for index, image_path in enumerate(image_paths):
cvmat = cv2.imread(image_path)
h, w, c = cvmat.shape
if h != 224 or w != 224:
cvmat = noise_padd(cvmat, edge_size=224,start_pixel_value=0)
images[index] = np.array(cvmat)
return images, labels
def resize_image_keep_aspect(image, max_edge=224):
# Take width/height
initial_height = tf.shape(image)[0]
initial_width = tf.shape(image)[1]
# Take the greater value, and use it for the ratio
max_value = tf.maximum(initial_width, initial_height)
ratio = tf.to_float(max_value) / tf.constant(max_edge, dtype=tf.float32)
new_width = tf.to_int32(tf.to_float(initial_width) / ratio)
new_height = tf.to_int32(tf.to_float(initial_height) / ratio)
def set_w():
new_width = max_edge
return new_width, new_height
def set_h():
new_height = max_edge
return new_width, new_height
new_width, new_height = tf.cond(tf.greater(new_width, new_height), set_w, set_h)
return tf.image.resize_images(image, [max_edge, new_width])
def tf_noise_padd(images, max_edge=224, start_pixel=0, end_pixel=255):
# resize image with scale
images = resize_image_keep_aspect(images, max_edge)
height = tf.shape(images)[0]
width = tf.shape(images)[1]
channels = 3
# # height > width
def case_height_width():
left_pad_size = tf.div(tf.subtract(max_edge, width), 2)
right_pad_size = tf.subtract(tf.subtract(max_edge, width), left_pad_size)
noise_left = tf.random_uniform((height, left_pad_size, channels), minval=start_pixel,
maxval=end_pixel,dtype=tf.float32)
noise_right = tf.random_uniform((height, right_pad_size, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
# noise_left = tf.ones((height, left_pad_size, channels)) * start_pixel
# noise_right = tf.ones((height, right_pad_size, channels)) * start_pixel
merge = tf.concat([noise_left, images, noise_right], axis=1)
return merge
# width > height
def case_width_height():
top_padd_size = tf.div(tf.subtract(max_edge, height), 2)
bottom_padd_size = tf.subtract(tf.subtract(max_edge, height), top_padd_size)
noise_top = tf.random_uniform((top_padd_size, width, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
noise_bottom = tf.random_uniform((bottom_padd_size, width, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
# noise_top = tf.ones((top_padd_size, width, channels)) * start_pixel
# noise_bottom = tf.ones((bottom_padd_size, width, channels)) * start_pixel
merge = tf.concat([noise_top, images, noise_bottom], axis=0)
return merge
padd_noise_op = tf.cond(tf.greater(height, width), case_height_width, case_width_height)
return padd_noise_op
def tf_JitterCut(images, jitter=0.05):
width, height ,channels = images.shape
ratio = tf.random_uniform(1, minval=0, maxval=jitter, dtype=tf.float32)
new_h = height*(1-ratio)
new_w = width*(1-ratio)
start_x = tf.random_uniform(0, minval=0, maxval=width-new_w)
start_y = tf.random_uniform(0, minval=0, maxval=height-new_h)
return images[start_y:new_h, start_x:new_w]
# cv::Mat JitterCut(cv::Mat &src, float &jitter){
# cv::Mat cv_img = src.clone();
# uint64 timeseed =(double)cv::getTickCount();
# cv::RNG rng(timeseed);
# unsigned int height = cv_img.rows;
# unsigned int width = cv_img.cols;
# float ratio = rng.uniform(0., jitter);
# unsigned int new_h = height*(1-ratio);
# unsigned int new_w = width*(1-ratio);
# unsigned int start_x = rng.uniform(0, width-new_w);
# unsigned int start_y = rng.uniform(0, height-new_h);
# cv::Rect roi(start_x,start_y,new_w,new_h);
# cv_img = cv_img(roi);
# return cv_img;
# }
class Data_Provider(object):
"""docstring for Data_Provider"""
def __init__(self, data_dir, num_classes):
super(Data_Provider, self).__init__()
self.data_dir = data_dir
self.num_classes = num_classes
self._init_dataset_infos()
self._init_reader()
@property
def _get_tfrecords(self, file_format=['.tfrecord']):
return imagedt.dir.loop(self.data_dir, file_format)
def _search_laebls_file(self):
sear_files = imagedt.dir.loop(self.data_dir, ['.txt'])
sear_label_file = [item for item in sear_files if ('label' in item)
and item.endswith('.txt')]
if sear_label_file:
lines = imagedt.file.readlines(sear_label_file[0])
self.num_classes = len(lines)
def _init_dataset_infos(self):
self._search_laebls_file()
@property
def _log_dataset_infos(self):
print('##'*20)
print ("Dataset infos:\ndata_dir: {0} \nnum_classes: {1}".format(self.data_dir, self.num_classes))
print('##'*20)
def _init_reader(self):
# read all tfrecord files
self.reader = tf.TFRecordReader()
record_files = self._get_tfrecords
self.filequeue = tf.train.string_input_producer(record_files)
def read_from_tfrecord(self, batch_size=32, image_shape=(224, 224, 3)):
self.image_shape = image_shape
_, fetch_tensors = self.reader.read(self.filequeue)
load_features = tf.parse_single_example(
fetch_tensors,features={
'image/height': tf.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
}
)
height = tf.cast(load_features['image/height'], tf.int32)
width = tf.cast(load_features['image/width'], tf.int32)
label = tf.cast(load_features['image/class/label'], tf.int64)
image = tf.image.decode_jpeg(load_features['image/encoded'], channels=image_shape[2])
image = tf_noise_padd(image, max_edge=image_shape[0], start_pixel=255)
image = tf.reshape(image, image_shape, name=None)
# make a batch
label = slim.one_hot_encoding(label, self.num_classes)
image_batch, label_batch = tf.train.shuffle_batch([image, label],
batch_size=batch_size,
capacity=500,
min_after_dequeue=100,
num_threads=4)
return image_batch, label_batch | imagedt/tensorflow/tools/data_provider.py | from __future__ import absolute_import
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
import imagedt
from .DataInterface import *
from ...image.process import noise_padd
slim = tf.contrib.slim
def dynamically_loaded_data(image_paths, labels, height=224, width=224):
labels = map(int, labels)
# max(x. 128): prevent memory bomb
images = np.zeros([max(len(image_paths), 128), height, width, 3], np.float32)
for index, image_path in enumerate(image_paths):
cvmat = cv2.imread(image_path)
h, w, c = cvmat.shape
if h != 224 or w != 224:
cvmat = noise_padd(cvmat, edge_size=224,start_pixel_value=0)
images[index] = np.array(cvmat)
return images, labels
def resize_image_keep_aspect(image, max_edge=224):
# Take width/height
initial_height = tf.shape(image)[0]
initial_width = tf.shape(image)[1]
# Take the greater value, and use it for the ratio
max_value = tf.maximum(initial_width, initial_height)
ratio = tf.to_float(max_value) / tf.constant(max_edge, dtype=tf.float32)
new_width = tf.to_int32(tf.to_float(initial_width) / ratio)
new_height = tf.to_int32(tf.to_float(initial_height) / ratio)
def set_w():
new_width = max_edge
return new_width, new_height
def set_h():
new_height = max_edge
return new_width, new_height
new_width, new_height = tf.cond(tf.greater(new_width, new_height), set_w, set_h)
return tf.image.resize_images(image, [max_edge, new_width])
def tf_noise_padd(images, max_edge=224, start_pixel=0, end_pixel=255):
# resize image with scale
images = resize_image_keep_aspect(images, max_edge)
height = tf.shape(images)[0]
width = tf.shape(images)[1]
channels = 3
# # height > width
def case_height_width():
left_pad_size = tf.div(tf.subtract(max_edge, width), 2)
right_pad_size = tf.subtract(tf.subtract(max_edge, width), left_pad_size)
noise_left = tf.random_uniform((height, left_pad_size, channels), minval=start_pixel,
maxval=end_pixel,dtype=tf.float32)
noise_right = tf.random_uniform((height, right_pad_size, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
# noise_left = tf.ones((height, left_pad_size, channels)) * start_pixel
# noise_right = tf.ones((height, right_pad_size, channels)) * start_pixel
merge = tf.concat([noise_left, images, noise_right], axis=1)
return merge
# width > height
def case_width_height():
top_padd_size = tf.div(tf.subtract(max_edge, height), 2)
bottom_padd_size = tf.subtract(tf.subtract(max_edge, height), top_padd_size)
noise_top = tf.random_uniform((top_padd_size, width, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
noise_bottom = tf.random_uniform((bottom_padd_size, width, channels), minval=start_pixel,
maxval=end_pixel, dtype=tf.float32)
# noise_top = tf.ones((top_padd_size, width, channels)) * start_pixel
# noise_bottom = tf.ones((bottom_padd_size, width, channels)) * start_pixel
merge = tf.concat([noise_top, images, noise_bottom], axis=0)
return merge
padd_noise_op = tf.cond(tf.greater(height, width), case_height_width, case_width_height)
return padd_noise_op
def tf_JitterCut(images, jitter=0.05):
width, height ,channels = images.shape
ratio = tf.random_uniform(1, minval=0, maxval=jitter, dtype=tf.float32)
new_h = height*(1-ratio)
new_w = width*(1-ratio)
start_x = tf.random_uniform(0, minval=0, maxval=width-new_w)
start_y = tf.random_uniform(0, minval=0, maxval=height-new_h)
return images[start_y:new_h, start_x:new_w]
# cv::Mat JitterCut(cv::Mat &src, float &jitter){
# cv::Mat cv_img = src.clone();
# uint64 timeseed =(double)cv::getTickCount();
# cv::RNG rng(timeseed);
# unsigned int height = cv_img.rows;
# unsigned int width = cv_img.cols;
# float ratio = rng.uniform(0., jitter);
# unsigned int new_h = height*(1-ratio);
# unsigned int new_w = width*(1-ratio);
# unsigned int start_x = rng.uniform(0, width-new_w);
# unsigned int start_y = rng.uniform(0, height-new_h);
# cv::Rect roi(start_x,start_y,new_w,new_h);
# cv_img = cv_img(roi);
# return cv_img;
# }
class Data_Provider(object):
"""docstring for Data_Provider"""
def __init__(self, data_dir, num_classes):
super(Data_Provider, self).__init__()
self.data_dir = data_dir
self.num_classes = num_classes
self._init_dataset_infos()
self._init_reader()
@property
def _get_tfrecords(self, file_format=['.tfrecord']):
return imagedt.dir.loop(self.data_dir, file_format)
def _search_laebls_file(self):
sear_files = imagedt.dir.loop(self.data_dir, ['.txt'])
sear_label_file = [item for item in sear_files if ('label' in item)
and item.endswith('.txt')]
if sear_label_file:
lines = imagedt.file.readlines(sear_label_file[0])
self.num_classes = len(lines)
def _init_dataset_infos(self):
self._search_laebls_file()
@property
def _log_dataset_infos(self):
print('##'*20)
print ("Dataset infos:\ndata_dir: {0} \nnum_classes: {1}".format(self.data_dir, self.num_classes))
print('##'*20)
def _init_reader(self):
# read all tfrecord files
self.reader = tf.TFRecordReader()
record_files = self._get_tfrecords
self.filequeue = tf.train.string_input_producer(record_files)
def read_from_tfrecord(self, batch_size=32, image_shape=(224, 224, 3)):
self.image_shape = image_shape
_, fetch_tensors = self.reader.read(self.filequeue)
load_features = tf.parse_single_example(
fetch_tensors,features={
'image/height': tf.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
}
)
height = tf.cast(load_features['image/height'], tf.int32)
width = tf.cast(load_features['image/width'], tf.int32)
label = tf.cast(load_features['image/class/label'], tf.int64)
image = tf.image.decode_jpeg(load_features['image/encoded'], channels=image_shape[2])
image = tf_noise_padd(image, max_edge=image_shape[0], start_pixel=255)
image = tf.reshape(image, image_shape, name=None)
# make a batch
label = slim.one_hot_encoding(label, self.num_classes)
image_batch, label_batch = tf.train.shuffle_batch([image, label],
batch_size=batch_size,
capacity=500,
min_after_dequeue=100,
num_threads=4)
return image_batch, label_batch | 0.74055 | 0.260389 |
import sys
from flask import Flask, render_template, json, request
import logging as l
import os
from datetime import datetime
from stat import *
URL_BASE = "https://satai.dk"
app = Flask(__name__)
items = [{"url": ("%s" % URL_BASE), "name": "Loading ...", "date": "...", "size": "..."}]
source = '/mnt/Series/'
def walk_tree(top, callback, file_list, to_visit, override=False):
for f in os.listdir(top):
pathname = os.path.join(top, f)
try:
mode = os.stat(pathname).st_mode
except FileNotFoundError as e:
l.error("Deleting file '{0}', because of error '{1}'".format(pathname, e))
os.unlink(pathname)
continue
if S_ISDIR(mode):
# It's a directory, recurse into it
if f in to_visit or override:
walk_tree(pathname, callback, file_list, to_visit, override=True)
else:
print("Not traversing into " + f, file=sys.stdout)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname, file_list)
else:
l.error('Skipping {}'.format(pathname))
@app.route('/get_shows', methods=['POST'])
def get_folders():
return json.dumps(sorted(os.listdir(source)))
def record_file_stats(file, file_list):
if file.endswith(".mkv") or file.endswith(".avi") or file.endswith(".mp4"):
file_list[file] = (os.stat(file).st_mtime, os.stat(file).st_size,)
@app.route('/')
def shows():
return render_template('shows.html', items=items, edit_url="/edit")
@app.route('/edit')
def edit():
return render_template('edit.html')
def formatSize(size):
prefixes = {
0: "",
1: "k",
2: "M",
3: "G"
}
cnt = 0
while size > 9000:
size /= 1024
cnt += 1
return f"{int(size)}{prefixes[cnt]}"
@app.route('/get_content', methods=['POST'])
def get_content():
content = request.json
to_visit = content["shows"]
all_files = dict()
walk_tree(source, record_file_stats, all_files, to_visit)
print(all_files.items())
res = []
for k, v in list(all_files.items()):
ts, size = v
res.append({
"url": URL_BASE + "/tv/Series/" + "/".join(k.split("/")[3:]),
"name": k.split("/")[-1],
"date": datetime.utcfromtimestamp(ts).strftime("%Y-%m-%d"),
"size": formatSize(size)
})
newest = sorted(res, key=lambda x: x["date"], reverse=True)
return json.dumps(newest[0:30])
if __name__ == '__main__':
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True, host='0.0.0.0') | app.py | import sys
from flask import Flask, render_template, json, request
import logging as l
import os
from datetime import datetime
from stat import *
URL_BASE = "https://satai.dk"
app = Flask(__name__)
items = [{"url": ("%s" % URL_BASE), "name": "Loading ...", "date": "...", "size": "..."}]
source = '/mnt/Series/'
def walk_tree(top, callback, file_list, to_visit, override=False):
for f in os.listdir(top):
pathname = os.path.join(top, f)
try:
mode = os.stat(pathname).st_mode
except FileNotFoundError as e:
l.error("Deleting file '{0}', because of error '{1}'".format(pathname, e))
os.unlink(pathname)
continue
if S_ISDIR(mode):
# It's a directory, recurse into it
if f in to_visit or override:
walk_tree(pathname, callback, file_list, to_visit, override=True)
else:
print("Not traversing into " + f, file=sys.stdout)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname, file_list)
else:
l.error('Skipping {}'.format(pathname))
@app.route('/get_shows', methods=['POST'])
def get_folders():
return json.dumps(sorted(os.listdir(source)))
def record_file_stats(file, file_list):
if file.endswith(".mkv") or file.endswith(".avi") or file.endswith(".mp4"):
file_list[file] = (os.stat(file).st_mtime, os.stat(file).st_size,)
@app.route('/')
def shows():
return render_template('shows.html', items=items, edit_url="/edit")
@app.route('/edit')
def edit():
return render_template('edit.html')
def formatSize(size):
prefixes = {
0: "",
1: "k",
2: "M",
3: "G"
}
cnt = 0
while size > 9000:
size /= 1024
cnt += 1
return f"{int(size)}{prefixes[cnt]}"
@app.route('/get_content', methods=['POST'])
def get_content():
content = request.json
to_visit = content["shows"]
all_files = dict()
walk_tree(source, record_file_stats, all_files, to_visit)
print(all_files.items())
res = []
for k, v in list(all_files.items()):
ts, size = v
res.append({
"url": URL_BASE + "/tv/Series/" + "/".join(k.split("/")[3:]),
"name": k.split("/")[-1],
"date": datetime.utcfromtimestamp(ts).strftime("%Y-%m-%d"),
"size": formatSize(size)
})
newest = sorted(res, key=lambda x: x["date"], reverse=True)
return json.dumps(newest[0:30])
if __name__ == '__main__':
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True, host='0.0.0.0') | 0.137272 | 0.081155 |
import inspect
from fedoralink.fedorans import RDF
from fedoralink.utils import fullname
def _type_matches(types_from_metadata, types_being_matched):
for a_type in types_being_matched:
if a_type not in types_from_metadata:
return False
return True
def _has_predicates(metadata, predicates):
for predicate in predicates:
if not metadata[predicate]:
return False
return True
class FedoraTypeManager:
"""
A singleton responsible for creating instance of FedoraObject (and subclasses) out of RDFMetadata
"""
models = set()
# clazz -> (rdf_types, priority)
on_rdf_types = {}
# clazz -> (rdf_predicates, priority)
on_rdf_predicates = {}
@staticmethod
def register_model(model_class, on_rdf_type=(), on_has_predicate=(), priority=1.0):
"""
Register a model class
:param model_class: model class, must be inherited from FedoraObject
:param on_rdf_type: a tuple of rdf types which this class requires in the metadata
:param on_has_predicate: a tuple of predicates which this class required in the metadata
:param priority: priority which this class has in mro
:return:
"""
if model_class in FedoraTypeManager.models:
return # already registered
FedoraTypeManager.models.add(model_class)
if on_rdf_type:
FedoraTypeManager.on_rdf_types[model_class] = (on_rdf_type, priority)
if on_has_predicate:
FedoraTypeManager.on_rdf_predicates[model_class] = (on_has_predicate, priority)
@staticmethod
def get_model_class(classname):
for model in FedoraTypeManager.models:
if model.__name__ == classname:
return model
raise TypeError('Class with name %s is not registered as a model' % classname)
@staticmethod
def get_model_class_from_fullname(classname):
for model in FedoraTypeManager.models:
if fullname(model) == classname:
return model
raise TypeError('Class with name %s is not registered as a model' % classname)
@staticmethod
def get_object_class(metadata, model_class=None):
"""
Returns the best python class for the given metadata
:param metadata: the metadata
:return: python class which fits the metadata
"""
from .models import FedoraObject
types = metadata[RDF.type]
possible_classes = {FedoraObject: 0}
if model_class:
possible_classes[model_class] = 1
# look at classes registered on rdf types and if the class match, add it to the dict of possible classes
for clz, rdf_and_priority in FedoraTypeManager.on_rdf_types.items():
if _type_matches(types, rdf_and_priority[0]):
possible_classes[clz] = max(possible_classes.get(clz, 0), rdf_and_priority[1])
# look at classes registered on rdf predicates and if the class match, add it to the dict of possible classes
for clz, rdf_and_priority in FedoraTypeManager.on_rdf_predicates.items():
if _has_predicates(metadata, rdf_and_priority[0]):
possible_classes[clz] = max(possible_classes.get(clz, 0), rdf_and_priority[1])
# call class method handles_metadata and if it returns a priority, add the class as well
for clz in FedoraTypeManager.models:
priority = getattr(clz, 'handles_metadata')(metadata)
if priority is not None and priority >= 0:
possible_classes[clz] = max(possible_classes.get(clz, 0), priority)
# convert to a list, add priorities from superclasses as well
# (i.e. 2 * current_priority + sum of priorities of superclasses)
propagated_possible_classes = []
for clazz, priority in possible_classes.items():
for clz in inspect.getmro(clazz):
if clz in possible_classes:
priority += possible_classes[clz]
propagated_possible_classes.append((clazz, priority))
# sort by priority
propagated_possible_classes.sort(key=lambda x: -x[1])
# remove classes that are in mro of other classes
classes = []
seen_classes = set()
for clazz, priority in propagated_possible_classes:
if clazz in seen_classes:
continue
classes.append(clazz)
for clz in inspect.getmro(clazz):
seen_classes.add(clz)
# got a list of classes, create a new type (or use a cached one ...)
return FedoraTypeManager.generate_class(classes)
@staticmethod
def generate_class(classes):
"""
generates a class which has the passed classes as superclasses
:param classes: list of superclasses
:return: dynamically generated class
"""
# TODO: class cache
return type('_'.join([x.__name__ for x in classes]) + "_bound", tuple(classes), {'_is_bound':True,
'_type' : classes})
@staticmethod
def populate():
from django.apps import apps
# loads all models.py files so that repository objects are configured ...
apps.get_models() | fedoralink/type_manager.py | import inspect
from fedoralink.fedorans import RDF
from fedoralink.utils import fullname
def _type_matches(types_from_metadata, types_being_matched):
for a_type in types_being_matched:
if a_type not in types_from_metadata:
return False
return True
def _has_predicates(metadata, predicates):
for predicate in predicates:
if not metadata[predicate]:
return False
return True
class FedoraTypeManager:
"""
A singleton responsible for creating instance of FedoraObject (and subclasses) out of RDFMetadata
"""
models = set()
# clazz -> (rdf_types, priority)
on_rdf_types = {}
# clazz -> (rdf_predicates, priority)
on_rdf_predicates = {}
@staticmethod
def register_model(model_class, on_rdf_type=(), on_has_predicate=(), priority=1.0):
"""
Register a model class
:param model_class: model class, must be inherited from FedoraObject
:param on_rdf_type: a tuple of rdf types which this class requires in the metadata
:param on_has_predicate: a tuple of predicates which this class required in the metadata
:param priority: priority which this class has in mro
:return:
"""
if model_class in FedoraTypeManager.models:
return # already registered
FedoraTypeManager.models.add(model_class)
if on_rdf_type:
FedoraTypeManager.on_rdf_types[model_class] = (on_rdf_type, priority)
if on_has_predicate:
FedoraTypeManager.on_rdf_predicates[model_class] = (on_has_predicate, priority)
@staticmethod
def get_model_class(classname):
for model in FedoraTypeManager.models:
if model.__name__ == classname:
return model
raise TypeError('Class with name %s is not registered as a model' % classname)
@staticmethod
def get_model_class_from_fullname(classname):
for model in FedoraTypeManager.models:
if fullname(model) == classname:
return model
raise TypeError('Class with name %s is not registered as a model' % classname)
@staticmethod
def get_object_class(metadata, model_class=None):
"""
Returns the best python class for the given metadata
:param metadata: the metadata
:return: python class which fits the metadata
"""
from .models import FedoraObject
types = metadata[RDF.type]
possible_classes = {FedoraObject: 0}
if model_class:
possible_classes[model_class] = 1
# look at classes registered on rdf types and if the class match, add it to the dict of possible classes
for clz, rdf_and_priority in FedoraTypeManager.on_rdf_types.items():
if _type_matches(types, rdf_and_priority[0]):
possible_classes[clz] = max(possible_classes.get(clz, 0), rdf_and_priority[1])
# look at classes registered on rdf predicates and if the class match, add it to the dict of possible classes
for clz, rdf_and_priority in FedoraTypeManager.on_rdf_predicates.items():
if _has_predicates(metadata, rdf_and_priority[0]):
possible_classes[clz] = max(possible_classes.get(clz, 0), rdf_and_priority[1])
# call class method handles_metadata and if it returns a priority, add the class as well
for clz in FedoraTypeManager.models:
priority = getattr(clz, 'handles_metadata')(metadata)
if priority is not None and priority >= 0:
possible_classes[clz] = max(possible_classes.get(clz, 0), priority)
# convert to a list, add priorities from superclasses as well
# (i.e. 2 * current_priority + sum of priorities of superclasses)
propagated_possible_classes = []
for clazz, priority in possible_classes.items():
for clz in inspect.getmro(clazz):
if clz in possible_classes:
priority += possible_classes[clz]
propagated_possible_classes.append((clazz, priority))
# sort by priority
propagated_possible_classes.sort(key=lambda x: -x[1])
# remove classes that are in mro of other classes
classes = []
seen_classes = set()
for clazz, priority in propagated_possible_classes:
if clazz in seen_classes:
continue
classes.append(clazz)
for clz in inspect.getmro(clazz):
seen_classes.add(clz)
# got a list of classes, create a new type (or use a cached one ...)
return FedoraTypeManager.generate_class(classes)
@staticmethod
def generate_class(classes):
"""
generates a class which has the passed classes as superclasses
:param classes: list of superclasses
:return: dynamically generated class
"""
# TODO: class cache
return type('_'.join([x.__name__ for x in classes]) + "_bound", tuple(classes), {'_is_bound':True,
'_type' : classes})
@staticmethod
def populate():
from django.apps import apps
# loads all models.py files so that repository objects are configured ...
apps.get_models() | 0.676086 | 0.330201 |
import sys
from webots_ros2_core.utils import append_webots_python_lib_to_path
from tf2_msgs.msg import TFMessage
from sensor_msgs.msg import LaserScan
from builtin_interfaces.msg import Time
from geometry_msgs.msg import TransformStamped
import transforms3d
try:
append_webots_python_lib_to_path()
from controller import Node
except Exception as e:
sys.stderr.write('"WEBOTS_HOME" is not correctly set.')
raise e
class LaserPublisher():
"""Publish as ROS topics the laser scans of the lidars."""
def __init__(self, robot, node, prefix='', parameters={}):
"""
Initialize the lidars and the topics.
Arguments:
prefix: prefix for the topic names
parameters: customization parameters dictionnary the key are the device names
the value are dictionaries with the following key:
'timestep' and 'topic name'
"""
self.robot = robot
self.node = node
self.prefix = prefix
self.lidars = []
self.publishers = {}
self.lastUpdate = {}
self.timestep = int(robot.getBasicTimeStep())
self.tfPublisher = self.node.create_publisher(TFMessage, 'tf', 10)
for i in range(robot.getNumberOfDevices()):
device = robot.getDeviceByIndex(i)
if device.getNodeType() == Node.LIDAR:
self.lidars.append(device)
if device.getName() in parameters and 'timestep' in parameters[device.getName()]:
device.enable(parameters[device.getName()]['timestep'])
else:
device.enable(self.timestep)
topicName = device.getName()
if device.getName() in parameters and 'topic name' in parameters[topicName]:
topicName = parameters[topicName]['topic name']
if device.getNumberOfLayers() > 1:
self.publishers[device] = {}
for j in range(device.getNumberOfLayers()):
name = prefix + device.getName() + '_' + str(j)
indexedTopicName = prefix + topicName + '_' + str(j)
publisher = self.node.create_publisher(LaserScan, indexedTopicName, 1)
self.publishers[device][name] = publisher
else:
self.publishers[device] = self.node.create_publisher(LaserScan,
prefix + topicName, 1)
self.lastUpdate[device] = -100
self.jointStateTimer = self.node.create_timer(0.001 * self.timestep, self.callback)
def callback(self):
tFMessage = TFMessage()
for lidar in self.lidars:
if self.robot.getTime() - self.lastUpdate[lidar] >= lidar.getSamplingPeriod():
self.publish(lidar, tFMessage.transforms)
if tFMessage.transforms:
self.tfPublisher.publish(tFMessage)
def publish(self, lidar, transforms):
"""Publish the laser scan topics with up to date value."""
nextTime = self.robot.getTime() + 0.001 * self.timestep
nextSec = int(nextTime)
# rounding prevents precision issues that can cause problems with ROS timers
nextNanosec = int(round(1000 * (nextTime - nextSec)) * 1.0e+6)
for i in range(lidar.getNumberOfLayers()):
name = self.prefix + lidar.getName() + '_scan'
if lidar.getNumberOfLayers() > 1:
name += '_' + str(i)
# publish the lidar to scan transform
transformStamped = TransformStamped()
transformStamped.header.stamp = Time(sec=nextSec, nanosec=nextNanosec)
transformStamped.header.frame_id = self.prefix + lidar.getName()
transformStamped.child_frame_id = name
q1 = transforms3d.quaternions.axangle2quat([0, 1, 0], -1.5708)
q2 = transforms3d.quaternions.axangle2quat([1, 0, 0], 1.5708)
result = transforms3d.quaternions.qmult(q1, q2)
if lidar.getNumberOfLayers() > 1:
angleStep = lidar.getVerticalFov() / (lidar.getNumberOfLayers() - 1)
angle = -0.5 * lidar.getVerticalFov() + i * angleStep
q3 = transforms3d.quaternions.axangle2quat([0, 0, 1], angle)
result = transforms3d.quaternions.qmult(result, q3)
transformStamped.transform.rotation.x = result[0]
transformStamped.transform.rotation.y = result[1]
transformStamped.transform.rotation.z = result[2]
transformStamped.transform.rotation.w = result[3]
transforms.append(transformStamped)
# publish the actual laser scan
msg = LaserScan()
msg.header.stamp = Time(sec=self.node.sec, nanosec=self.node.nanosec)
msg.header.frame_id = name
msg.angle_min = -0.5 * lidar.getFov()
msg.angle_max = 0.5 * lidar.getFov()
msg.angle_increment = lidar.getFov() / (lidar.getHorizontalResolution() - 1)
msg.scan_time = lidar.getSamplingPeriod() / 1000.0
msg.range_min = lidar.getMinRange()
msg.range_max = lidar.getMaxRange()
lidarValues = lidar.getLayerRangeImage(i)
for j in range(lidar.getHorizontalResolution()):
msg.ranges.append(lidarValues[j])
if lidar.getNumberOfLayers() > 1:
key = self.prefix + lidar.getName() + '_' + str(i)
self.publishers[lidar][key].publish(msg)
else:
self.publishers[lidar].publish(msg) | webots_ros2_core/webots_ros2_core/laser_publisher.py | import sys
from webots_ros2_core.utils import append_webots_python_lib_to_path
from tf2_msgs.msg import TFMessage
from sensor_msgs.msg import LaserScan
from builtin_interfaces.msg import Time
from geometry_msgs.msg import TransformStamped
import transforms3d
try:
append_webots_python_lib_to_path()
from controller import Node
except Exception as e:
sys.stderr.write('"WEBOTS_HOME" is not correctly set.')
raise e
class LaserPublisher():
"""Publish as ROS topics the laser scans of the lidars."""
def __init__(self, robot, node, prefix='', parameters={}):
"""
Initialize the lidars and the topics.
Arguments:
prefix: prefix for the topic names
parameters: customization parameters dictionnary the key are the device names
the value are dictionaries with the following key:
'timestep' and 'topic name'
"""
self.robot = robot
self.node = node
self.prefix = prefix
self.lidars = []
self.publishers = {}
self.lastUpdate = {}
self.timestep = int(robot.getBasicTimeStep())
self.tfPublisher = self.node.create_publisher(TFMessage, 'tf', 10)
for i in range(robot.getNumberOfDevices()):
device = robot.getDeviceByIndex(i)
if device.getNodeType() == Node.LIDAR:
self.lidars.append(device)
if device.getName() in parameters and 'timestep' in parameters[device.getName()]:
device.enable(parameters[device.getName()]['timestep'])
else:
device.enable(self.timestep)
topicName = device.getName()
if device.getName() in parameters and 'topic name' in parameters[topicName]:
topicName = parameters[topicName]['topic name']
if device.getNumberOfLayers() > 1:
self.publishers[device] = {}
for j in range(device.getNumberOfLayers()):
name = prefix + device.getName() + '_' + str(j)
indexedTopicName = prefix + topicName + '_' + str(j)
publisher = self.node.create_publisher(LaserScan, indexedTopicName, 1)
self.publishers[device][name] = publisher
else:
self.publishers[device] = self.node.create_publisher(LaserScan,
prefix + topicName, 1)
self.lastUpdate[device] = -100
self.jointStateTimer = self.node.create_timer(0.001 * self.timestep, self.callback)
def callback(self):
tFMessage = TFMessage()
for lidar in self.lidars:
if self.robot.getTime() - self.lastUpdate[lidar] >= lidar.getSamplingPeriod():
self.publish(lidar, tFMessage.transforms)
if tFMessage.transforms:
self.tfPublisher.publish(tFMessage)
def publish(self, lidar, transforms):
"""Publish the laser scan topics with up to date value."""
nextTime = self.robot.getTime() + 0.001 * self.timestep
nextSec = int(nextTime)
# rounding prevents precision issues that can cause problems with ROS timers
nextNanosec = int(round(1000 * (nextTime - nextSec)) * 1.0e+6)
for i in range(lidar.getNumberOfLayers()):
name = self.prefix + lidar.getName() + '_scan'
if lidar.getNumberOfLayers() > 1:
name += '_' + str(i)
# publish the lidar to scan transform
transformStamped = TransformStamped()
transformStamped.header.stamp = Time(sec=nextSec, nanosec=nextNanosec)
transformStamped.header.frame_id = self.prefix + lidar.getName()
transformStamped.child_frame_id = name
q1 = transforms3d.quaternions.axangle2quat([0, 1, 0], -1.5708)
q2 = transforms3d.quaternions.axangle2quat([1, 0, 0], 1.5708)
result = transforms3d.quaternions.qmult(q1, q2)
if lidar.getNumberOfLayers() > 1:
angleStep = lidar.getVerticalFov() / (lidar.getNumberOfLayers() - 1)
angle = -0.5 * lidar.getVerticalFov() + i * angleStep
q3 = transforms3d.quaternions.axangle2quat([0, 0, 1], angle)
result = transforms3d.quaternions.qmult(result, q3)
transformStamped.transform.rotation.x = result[0]
transformStamped.transform.rotation.y = result[1]
transformStamped.transform.rotation.z = result[2]
transformStamped.transform.rotation.w = result[3]
transforms.append(transformStamped)
# publish the actual laser scan
msg = LaserScan()
msg.header.stamp = Time(sec=self.node.sec, nanosec=self.node.nanosec)
msg.header.frame_id = name
msg.angle_min = -0.5 * lidar.getFov()
msg.angle_max = 0.5 * lidar.getFov()
msg.angle_increment = lidar.getFov() / (lidar.getHorizontalResolution() - 1)
msg.scan_time = lidar.getSamplingPeriod() / 1000.0
msg.range_min = lidar.getMinRange()
msg.range_max = lidar.getMaxRange()
lidarValues = lidar.getLayerRangeImage(i)
for j in range(lidar.getHorizontalResolution()):
msg.ranges.append(lidarValues[j])
if lidar.getNumberOfLayers() > 1:
key = self.prefix + lidar.getName() + '_' + str(i)
self.publishers[lidar][key].publish(msg)
else:
self.publishers[lidar].publish(msg) | 0.486088 | 0.193281 |
import calendar
import datetime
import re
from typing import Generator, List, Optional, Tuple, Union
RE_DATE_RANGE = re.compile('\[(?P<start>\d+):(?P<stop>\d+)(:(?P<step>\d+))?\]')
all_months = range(1, 13)
def is_yearmon(s: str) -> bool:
"""
Check if a string represents a valid date in YYYYMM format
"""
if not re.match('^[0-9]{6}$', s):
return False
_, month = parse_yearmon(s)
return 1 <= month <= 12
def parse_yearmon(yearmon: str) -> Tuple[int, int]:
"""
Parse a YYYYMM string and return a (year, month) integer tuple
"""
return int(yearmon[:4]), int(yearmon[4:])
def format_yearmon(year: int, month: int) -> str:
"""
Format a year and month as YYYYMM
"""
return '{:04d}{:02d}'.format(year, month)
def get_yearmons(start: str, stop: str) -> Generator[str, None, None]:
"""
Generate all YYYYMM strings between "start" and "stop"
"""
start_year, start_month = parse_yearmon(start)
stop_year, stop_month = parse_yearmon(stop)
if start_year > stop_year:
raise ValueError("Stop date is before start date.")
if start_year == stop_year and start_month > stop_month:
raise ValueError("Stop date is before start date.")
yield start
while start != stop:
start = get_next_yearmon(start)
yield start
def get_last_day_of_month(yearmon: str) -> int:
"""
Get integer last day or month for YYYYMM
"""
return calendar.monthrange(*parse_yearmon(yearmon))[1]
def get_previous_yearmon(yearmon: str) -> str:
"""
Get previous YYYYMM to input
"""
year, month = parse_yearmon(yearmon)
month -= 1
if month == 0:
month = 12
year -= 1
return format_yearmon(year, month)
def get_next_yearmon(yearmon: str) -> str:
"""
Get next YYYYMM to input
"""
year, month = parse_yearmon(yearmon)
month += 1
if month == 13:
month = 1
year += 1
return format_yearmon(year, month)
def get_previous_yearmons(yearmon: str, n: int) -> List[str]:
"""
Get previous YYYYMMs to input
"""
targets = [get_previous_yearmon(yearmon)]
for _ in range(n - 1):
targets.append(get_previous_yearmon(targets[-1]))
targets.reverse()
return targets
def get_next_yearmons(yearmon: str, n: int) -> List[str]:
"""
Get next n YYYYMMs after input
"""
targets = [get_next_yearmon(yearmon)]
for _ in range(n - 1):
targets.append(get_next_yearmon(targets[-1]))
return targets
def rolling_window(yearmon: str, n: int) -> List[str]:
"""
Return n months ending with (and including) input
"""
window = [yearmon]
while len(window) < n:
window.insert(0, get_previous_yearmon(window[0]))
return window
def days_in_month(yearmon: str) -> List[str]:
"""
Return YYYYMMDD strings for each day in input YYYYMM
"""
return [yearmon + '{:02d}'.format(day + 1) for day in range(calendar.monthrange(*parse_yearmon(yearmon))[1])]
def add_years(yyyy: Union[str, int], n: int) -> str:
"""
Add n years to YYYY
"""
return '{:04d}'.format(int(yyyy) + n)
def add_months(yyyymm: str, n: int) -> str:
"""
Add n months to YYYYMM
"""
year = int(yyyymm[:4])
month = int(yyyymm[4:])
month += n
while month > 12:
month -= 12
year += 1
while month <= 0:
month += 12
year -= 1
return '{:04d}{:02d}'.format(year, month)
def add_days(yyyymmdd: str, n: int) -> str:
"""
Add n days to YYYYMMDD
"""
date = datetime.date(int(yyyymmdd[0:4]),
int(yyyymmdd[4:6]),
int(yyyymmdd[6:8]))
date += datetime.timedelta(days=n)
return date.strftime('%Y%m%d')
def expand_date_range(start: str, stop: str, step: int) -> List[str]:
"""
Return all dates in the list >= start and <= stop, separated by step.
Inputs may be YYYY, YYYYMM, or YYYYMMDD strings
"""
dates = [start]
if len(start) != len(stop):
raise ValueError("Start and stop dates must be in same format")
if len(start) == 4:
increment_date = add_years
elif len(start) == 6:
increment_date = add_months
elif len(start) == 8:
increment_date = add_days
else:
raise ValueError("Unknown date format")
while True:
nxt = increment_date(dates[-1], step)
if nxt <= stop:
dates.append(nxt)
else:
break
return dates
def next_occurence_of_month(yearmon: str, month_b: int) -> str:
assert 0 < month_b <= 12
year, month = parse_yearmon(yearmon)
if month <= month_b:
return format_yearmon(year, month_b)
else:
return format_yearmon(year+1, month_b)
def format_range(start, end, step=None) -> str:
if step:
return '[{}:{}:{}]'.format(start, end, step)
else:
return '[{}:{}]'.format(start, end)
def available_yearmon_range(*, window: int, month: Optional[int] = None, start_year: int, end_year: int) -> str:
assert start_year + (window-1) // 12 <= end_year
range_str = '[{begin}:{end}:{step}]'
available_start = add_months(format_yearmon(start_year, 1), window-1)
start_yearmon = available_start if month is None else next_occurence_of_month(available_start, month)
return range_str.format(begin=start_yearmon,
end=format_yearmon(end_year, 12 if month is None else month),
step=1 if month is None else 12)
def get_lead_months(yearmon: str, target: str) -> int:
assert target >= yearmon
y1, m1 = parse_yearmon(yearmon)
y2, m2 = parse_yearmon(target)
return (y2 - y1)*12 + (m2-m1)
def expand_filename_dates(filename: str) -> List[str]:
# Short-circuit regex test.
# This statement improves program runtime by ~50%.
if '[' not in filename:
return [filename]
match = re.search(RE_DATE_RANGE, filename)
if not match:
return [filename]
start = match.group('start')
stop = match.group('stop')
step = int(match.group('step') or 1)
filenames = []
for d in expand_date_range(start, stop, step):
filenames.append(filename[:match.start()] + d + filename[match.end():])
return filenames | workflow/wsim_workflow/dates.py |
import calendar
import datetime
import re
from typing import Generator, List, Optional, Tuple, Union
RE_DATE_RANGE = re.compile('\[(?P<start>\d+):(?P<stop>\d+)(:(?P<step>\d+))?\]')
all_months = range(1, 13)
def is_yearmon(s: str) -> bool:
"""
Check if a string represents a valid date in YYYYMM format
"""
if not re.match('^[0-9]{6}$', s):
return False
_, month = parse_yearmon(s)
return 1 <= month <= 12
def parse_yearmon(yearmon: str) -> Tuple[int, int]:
"""
Parse a YYYYMM string and return a (year, month) integer tuple
"""
return int(yearmon[:4]), int(yearmon[4:])
def format_yearmon(year: int, month: int) -> str:
"""
Format a year and month as YYYYMM
"""
return '{:04d}{:02d}'.format(year, month)
def get_yearmons(start: str, stop: str) -> Generator[str, None, None]:
"""
Generate all YYYYMM strings between "start" and "stop"
"""
start_year, start_month = parse_yearmon(start)
stop_year, stop_month = parse_yearmon(stop)
if start_year > stop_year:
raise ValueError("Stop date is before start date.")
if start_year == stop_year and start_month > stop_month:
raise ValueError("Stop date is before start date.")
yield start
while start != stop:
start = get_next_yearmon(start)
yield start
def get_last_day_of_month(yearmon: str) -> int:
"""
Get integer last day or month for YYYYMM
"""
return calendar.monthrange(*parse_yearmon(yearmon))[1]
def get_previous_yearmon(yearmon: str) -> str:
"""
Get previous YYYYMM to input
"""
year, month = parse_yearmon(yearmon)
month -= 1
if month == 0:
month = 12
year -= 1
return format_yearmon(year, month)
def get_next_yearmon(yearmon: str) -> str:
"""
Get next YYYYMM to input
"""
year, month = parse_yearmon(yearmon)
month += 1
if month == 13:
month = 1
year += 1
return format_yearmon(year, month)
def get_previous_yearmons(yearmon: str, n: int) -> List[str]:
"""
Get previous YYYYMMs to input
"""
targets = [get_previous_yearmon(yearmon)]
for _ in range(n - 1):
targets.append(get_previous_yearmon(targets[-1]))
targets.reverse()
return targets
def get_next_yearmons(yearmon: str, n: int) -> List[str]:
"""
Get next n YYYYMMs after input
"""
targets = [get_next_yearmon(yearmon)]
for _ in range(n - 1):
targets.append(get_next_yearmon(targets[-1]))
return targets
def rolling_window(yearmon: str, n: int) -> List[str]:
"""
Return n months ending with (and including) input
"""
window = [yearmon]
while len(window) < n:
window.insert(0, get_previous_yearmon(window[0]))
return window
def days_in_month(yearmon: str) -> List[str]:
"""
Return YYYYMMDD strings for each day in input YYYYMM
"""
return [yearmon + '{:02d}'.format(day + 1) for day in range(calendar.monthrange(*parse_yearmon(yearmon))[1])]
def add_years(yyyy: Union[str, int], n: int) -> str:
"""
Add n years to YYYY
"""
return '{:04d}'.format(int(yyyy) + n)
def add_months(yyyymm: str, n: int) -> str:
"""
Add n months to YYYYMM
"""
year = int(yyyymm[:4])
month = int(yyyymm[4:])
month += n
while month > 12:
month -= 12
year += 1
while month <= 0:
month += 12
year -= 1
return '{:04d}{:02d}'.format(year, month)
def add_days(yyyymmdd: str, n: int) -> str:
"""
Add n days to YYYYMMDD
"""
date = datetime.date(int(yyyymmdd[0:4]),
int(yyyymmdd[4:6]),
int(yyyymmdd[6:8]))
date += datetime.timedelta(days=n)
return date.strftime('%Y%m%d')
def expand_date_range(start: str, stop: str, step: int) -> List[str]:
"""
Return all dates in the list >= start and <= stop, separated by step.
Inputs may be YYYY, YYYYMM, or YYYYMMDD strings
"""
dates = [start]
if len(start) != len(stop):
raise ValueError("Start and stop dates must be in same format")
if len(start) == 4:
increment_date = add_years
elif len(start) == 6:
increment_date = add_months
elif len(start) == 8:
increment_date = add_days
else:
raise ValueError("Unknown date format")
while True:
nxt = increment_date(dates[-1], step)
if nxt <= stop:
dates.append(nxt)
else:
break
return dates
def next_occurence_of_month(yearmon: str, month_b: int) -> str:
assert 0 < month_b <= 12
year, month = parse_yearmon(yearmon)
if month <= month_b:
return format_yearmon(year, month_b)
else:
return format_yearmon(year+1, month_b)
def format_range(start, end, step=None) -> str:
if step:
return '[{}:{}:{}]'.format(start, end, step)
else:
return '[{}:{}]'.format(start, end)
def available_yearmon_range(*, window: int, month: Optional[int] = None, start_year: int, end_year: int) -> str:
assert start_year + (window-1) // 12 <= end_year
range_str = '[{begin}:{end}:{step}]'
available_start = add_months(format_yearmon(start_year, 1), window-1)
start_yearmon = available_start if month is None else next_occurence_of_month(available_start, month)
return range_str.format(begin=start_yearmon,
end=format_yearmon(end_year, 12 if month is None else month),
step=1 if month is None else 12)
def get_lead_months(yearmon: str, target: str) -> int:
assert target >= yearmon
y1, m1 = parse_yearmon(yearmon)
y2, m2 = parse_yearmon(target)
return (y2 - y1)*12 + (m2-m1)
def expand_filename_dates(filename: str) -> List[str]:
# Short-circuit regex test.
# This statement improves program runtime by ~50%.
if '[' not in filename:
return [filename]
match = re.search(RE_DATE_RANGE, filename)
if not match:
return [filename]
start = match.group('start')
stop = match.group('stop')
step = int(match.group('step') or 1)
filenames = []
for d in expand_date_range(start, stop, step):
filenames.append(filename[:match.start()] + d + filename[match.end():])
return filenames | 0.858481 | 0.563798 |
from os import listdir,chdir
import warnings
import csv
from random import shuffle
import logging
import cv2
from PIL import Image
from torchvision import transforms
import torch
warnings.filterwarnings('ignore')
loader = transforms.Compose([transforms.ToTensor()])
unloader = transforms.ToPILImage()
def load_label(filename):
"""
process the .csv label file
Args:
filename: /path/to/file/of/csvlabel/ (csv)
Return:
dirname_label_map: a hashmap {dirname <--> label} (dict: str->list[int])
Example:
{..., INDEX10097: [3, 7] ,}
"""
dirname_label_map = {}
with open(filename,'r') as cfile:
reader = csv.reader(cfile)
for row in reader:
name = row[0]
label = (row[1]).split(';')
int_label = [int(lab) for lab in label]
dirname_label_map[name] = int_label
return dirname_label_map
def load_dataset(path_to_trainset, dev_ratio=0.08, num_epoch=40):
"""Generator, generate the partitioned training set and dev set for an epoch
Args:
path_to_trainset: The directory where the training dataset locate
For example: /Users/apple/Downloads/ml_dataset/train/
dev_ratio: the ratio of dev set in the total set
num_epoch: the number of epoches for loop
Return:(generator)
training_set: list of directory_names for train (str in list)
dev_set: list of directory_names for eval (str in list)
"""
if not path_to_trainset:
raise IOError("[ERROR] Empty input!")
total_sets = listdir(path_to_trainset) # list
#dir2label = load_csvLabel(directory_name + "/train.csv") # dict: dirname(str) -> labels(list)
logging.info(' Load directory path done.')
training_size = int(len(total_sets) * (1 - dev_ratio)) # train set size
for epoch in range(num_epoch):
# shuffle(total_sets) ==> generate a partiton: trainingset and devset
shuffle(total_sets)
training_set = total_sets[:training_size]
dev_set = total_sets[training_size:]
yield training_set, dev_set
def unpack_directory(dirname, path_to_trainset, label_map=None):
"""directory_name -> images with label
Each photo in the same bag has SAME label
str ->
list of torchTensor(unsqueezed at dim=0), list of torchTensor(unsqueezed at dim=0)
*Reversable by unloader
Args:
str(local),str(shared),dict(shared)
Examples:
unpack_directory('ENSG00000001630','/Users/apple/Downloads/ml_dataset/train/', label_map)
Returns:
raw_images(Tensors), labels(Tensors)
"""
chdir(path_to_trainset)
training_samples = listdir(dirname)
raw_images = []
labels = []
sample_size = len(training_samples)
if not label_map:
for sample in training_samples:
# imagefiles --> (float but not defined) tensors
image = Image.open("./%s/%s" % (dirname,sample)).convert('RGB')
image = loader(image).unsqueeze(0) # dim: 0
raw_images.append(image)
return raw_images
for sample in training_samples:
gold_label = label_map[dirname]
# Float format
label = torch.zeros((10),dtype=torch.float32)
for index in gold_label:
label[index] = 1.
label = label.unsqueeze(0) # dim: 0
labels.append(label)
# imagefiles --> (float but not defined) tensors
image = Image.open("./%s/%s" % (dirname,sample)).convert('RGB')
image = loader(image).unsqueeze(0) # dim: 0
raw_images.append(image)
return raw_images, labels
def csv_record(filename, rows):
"""One time write
Args:
rows: python list
"""
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in rows:
writer.writerow(row) | datahelper.py | from os import listdir,chdir
import warnings
import csv
from random import shuffle
import logging
import cv2
from PIL import Image
from torchvision import transforms
import torch
warnings.filterwarnings('ignore')
loader = transforms.Compose([transforms.ToTensor()])
unloader = transforms.ToPILImage()
def load_label(filename):
"""
process the .csv label file
Args:
filename: /path/to/file/of/csvlabel/ (csv)
Return:
dirname_label_map: a hashmap {dirname <--> label} (dict: str->list[int])
Example:
{..., INDEX10097: [3, 7] ,}
"""
dirname_label_map = {}
with open(filename,'r') as cfile:
reader = csv.reader(cfile)
for row in reader:
name = row[0]
label = (row[1]).split(';')
int_label = [int(lab) for lab in label]
dirname_label_map[name] = int_label
return dirname_label_map
def load_dataset(path_to_trainset, dev_ratio=0.08, num_epoch=40):
"""Generator, generate the partitioned training set and dev set for an epoch
Args:
path_to_trainset: The directory where the training dataset locate
For example: /Users/apple/Downloads/ml_dataset/train/
dev_ratio: the ratio of dev set in the total set
num_epoch: the number of epoches for loop
Return:(generator)
training_set: list of directory_names for train (str in list)
dev_set: list of directory_names for eval (str in list)
"""
if not path_to_trainset:
raise IOError("[ERROR] Empty input!")
total_sets = listdir(path_to_trainset) # list
#dir2label = load_csvLabel(directory_name + "/train.csv") # dict: dirname(str) -> labels(list)
logging.info(' Load directory path done.')
training_size = int(len(total_sets) * (1 - dev_ratio)) # train set size
for epoch in range(num_epoch):
# shuffle(total_sets) ==> generate a partiton: trainingset and devset
shuffle(total_sets)
training_set = total_sets[:training_size]
dev_set = total_sets[training_size:]
yield training_set, dev_set
def unpack_directory(dirname, path_to_trainset, label_map=None):
"""directory_name -> images with label
Each photo in the same bag has SAME label
str ->
list of torchTensor(unsqueezed at dim=0), list of torchTensor(unsqueezed at dim=0)
*Reversable by unloader
Args:
str(local),str(shared),dict(shared)
Examples:
unpack_directory('ENSG00000001630','/Users/apple/Downloads/ml_dataset/train/', label_map)
Returns:
raw_images(Tensors), labels(Tensors)
"""
chdir(path_to_trainset)
training_samples = listdir(dirname)
raw_images = []
labels = []
sample_size = len(training_samples)
if not label_map:
for sample in training_samples:
# imagefiles --> (float but not defined) tensors
image = Image.open("./%s/%s" % (dirname,sample)).convert('RGB')
image = loader(image).unsqueeze(0) # dim: 0
raw_images.append(image)
return raw_images
for sample in training_samples:
gold_label = label_map[dirname]
# Float format
label = torch.zeros((10),dtype=torch.float32)
for index in gold_label:
label[index] = 1.
label = label.unsqueeze(0) # dim: 0
labels.append(label)
# imagefiles --> (float but not defined) tensors
image = Image.open("./%s/%s" % (dirname,sample)).convert('RGB')
image = loader(image).unsqueeze(0) # dim: 0
raw_images.append(image)
return raw_images, labels
def csv_record(filename, rows):
"""One time write
Args:
rows: python list
"""
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in rows:
writer.writerow(row) | 0.697815 | 0.329338 |
import os
import shutil
import unittest
import tempfile
from bento.core \
import \
PackageDescription, PackageOptions
from bento.core.node \
import \
create_root_with_source_tree
from bento.commands.tests.utils \
import \
create_fake_package_from_bento_infos, prepare_configure
from bento.commands.hooks \
import \
get_pre_hooks, create_hook_module, get_post_hooks
from bento.commands.options \
import \
OptionsContext
from bento.commands.context \
import \
ConfigureYakuContext, BuildYakuContext
from bento.commands.build \
import \
BuildCommand
class TestRecurseBase(unittest.TestCase):
def setUp(self):
self.old_dir = os.getcwd()
self.d = tempfile.mkdtemp()
self.root = create_root_with_source_tree(self.d, os.path.join(self.d, "build"))
self.run_node = self.root.find_node(self.d)
self.top_node = self.run_node
os.chdir(self.d)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.d)
def test_simple(self):
root = self.root
top_node = self.top_node
run_node = self.run_node
bento_info = """\
Name: foo
Recurse:
bar
"""
bento_info2 = """\
Recurse:
foo
Library:
Modules: fubar
Extension: _foo
Sources: foo.c
"""
bento_info3 = """\
Library:
Modules: foufoufou
Packages: sub2
"""
bentos = {"bento.info": bento_info, os.path.join("bar", "bento.info"): bento_info2,
os.path.join("bar", "foo", "bento.info"): bento_info3}
create_fake_package_from_bento_infos(run_node, bentos)
conf, configure = prepare_configure(run_node, bento_info, ConfigureYakuContext)
configure.run(conf)
conf.shutdown()
build = BuildCommand()
opts = OptionsContext.from_command(build)
cmd_argv = []
bld = BuildYakuContext(cmd_argv, opts, conf.pkg, run_node)
build.run(bld)
def test_hook(self):
root = self.root
top_node = self.top_node
bento_info = """\
Name: foo
HookFile:
bar/bscript
Recurse:
bar
"""
bento_info2 = """\
Library:
Modules: fubar
"""
bscript = """\
from bento.commands import hooks
@hooks.pre_configure
def configure(ctx):
py_modules = ctx.local_pkg.py_modules
ctx.local_node.make_node("test").write(str(py_modules))
"""
bentos = {"bento.info": bento_info, os.path.join("bar", "bento.info"): bento_info2}
bscripts = {os.path.join("bar", "bscript"): bscript}
create_fake_package_from_bento_infos(top_node, bentos, bscripts)
conf, configure = prepare_configure(self.run_node, bento_info, ConfigureYakuContext)
hook = top_node.search("bar/bscript")
m = create_hook_module(hook.abspath())
for hook, local_dir, help_bypass in get_pre_hooks("configure"):
conf.pre_recurse(root.find_dir(local_dir))
try:
hook(conf)
finally:
conf.post_recurse()
test = top_node.search("bar/test")
if test:
self.failUnlessEqual(test.read(), "['fubar']")
else:
self.fail("test dummy not found") | bento/commands/tests/test_recursive.py | import os
import shutil
import unittest
import tempfile
from bento.core \
import \
PackageDescription, PackageOptions
from bento.core.node \
import \
create_root_with_source_tree
from bento.commands.tests.utils \
import \
create_fake_package_from_bento_infos, prepare_configure
from bento.commands.hooks \
import \
get_pre_hooks, create_hook_module, get_post_hooks
from bento.commands.options \
import \
OptionsContext
from bento.commands.context \
import \
ConfigureYakuContext, BuildYakuContext
from bento.commands.build \
import \
BuildCommand
class TestRecurseBase(unittest.TestCase):
def setUp(self):
self.old_dir = os.getcwd()
self.d = tempfile.mkdtemp()
self.root = create_root_with_source_tree(self.d, os.path.join(self.d, "build"))
self.run_node = self.root.find_node(self.d)
self.top_node = self.run_node
os.chdir(self.d)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.d)
def test_simple(self):
root = self.root
top_node = self.top_node
run_node = self.run_node
bento_info = """\
Name: foo
Recurse:
bar
"""
bento_info2 = """\
Recurse:
foo
Library:
Modules: fubar
Extension: _foo
Sources: foo.c
"""
bento_info3 = """\
Library:
Modules: foufoufou
Packages: sub2
"""
bentos = {"bento.info": bento_info, os.path.join("bar", "bento.info"): bento_info2,
os.path.join("bar", "foo", "bento.info"): bento_info3}
create_fake_package_from_bento_infos(run_node, bentos)
conf, configure = prepare_configure(run_node, bento_info, ConfigureYakuContext)
configure.run(conf)
conf.shutdown()
build = BuildCommand()
opts = OptionsContext.from_command(build)
cmd_argv = []
bld = BuildYakuContext(cmd_argv, opts, conf.pkg, run_node)
build.run(bld)
def test_hook(self):
root = self.root
top_node = self.top_node
bento_info = """\
Name: foo
HookFile:
bar/bscript
Recurse:
bar
"""
bento_info2 = """\
Library:
Modules: fubar
"""
bscript = """\
from bento.commands import hooks
@hooks.pre_configure
def configure(ctx):
py_modules = ctx.local_pkg.py_modules
ctx.local_node.make_node("test").write(str(py_modules))
"""
bentos = {"bento.info": bento_info, os.path.join("bar", "bento.info"): bento_info2}
bscripts = {os.path.join("bar", "bscript"): bscript}
create_fake_package_from_bento_infos(top_node, bentos, bscripts)
conf, configure = prepare_configure(self.run_node, bento_info, ConfigureYakuContext)
hook = top_node.search("bar/bscript")
m = create_hook_module(hook.abspath())
for hook, local_dir, help_bypass in get_pre_hooks("configure"):
conf.pre_recurse(root.find_dir(local_dir))
try:
hook(conf)
finally:
conf.post_recurse()
test = top_node.search("bar/test")
if test:
self.failUnlessEqual(test.read(), "['fubar']")
else:
self.fail("test dummy not found") | 0.271155 | 0.170646 |
import torch
from torch import nn
import math
class LanguageTransformer(nn.Module):
def __init__(self, vocab_size,
d_model, nhead,
num_encoder_layers, num_decoder_layers,
dim_feedforward, max_seq_length,
pos_dropout, trans_dropout):
super().__init__()
self.d_model = d_model
self.embed_tgt = nn.Embedding(vocab_size, d_model)
self.pos_enc = PositionalEncoding(d_model, pos_dropout, max_seq_length)
self.transformer = nn.Transformer(d_model, nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward, trans_dropout)
self.fc = nn.Linear(d_model, vocab_size)
def forward(self, src, tgt, src_key_padding_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""
Shape:
- src: (W, N, C)
- tgt: (T, N)
- src_key_padding_mask: (N, S)
- tgt_key_padding_mask: (N, T)
- memory_key_padding_mask: (N, S)
- output: (N, T, E)
"""
tgt_mask = self.gen_nopeek_mask(tgt.shape[0]).to(src.device)
src = self.pos_enc(src * math.sqrt(self.d_model))
tgt = self.pos_enc(self.embed_tgt(tgt) * math.sqrt(self.d_model))
output = self.transformer(src, tgt, tgt_mask=tgt_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output = output.transpose(0, 1)
return self.fc(output)
def gen_nopeek_mask(self, length):
mask = (torch.triu(torch.ones(length, length)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (
-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
# register_buffer => Tensor which is not a parameter, but should be part of the modules state.
# Used for tensors that need to be on the same device as the module.
# persistent=False tells PyTorch to not add the buffer to the state dict (e.g. when we save the model)
self.register_buffer('pe', pe, persistent=False)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x) | model/sequence/transformer.py | import torch
from torch import nn
import math
class LanguageTransformer(nn.Module):
def __init__(self, vocab_size,
d_model, nhead,
num_encoder_layers, num_decoder_layers,
dim_feedforward, max_seq_length,
pos_dropout, trans_dropout):
super().__init__()
self.d_model = d_model
self.embed_tgt = nn.Embedding(vocab_size, d_model)
self.pos_enc = PositionalEncoding(d_model, pos_dropout, max_seq_length)
self.transformer = nn.Transformer(d_model, nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward, trans_dropout)
self.fc = nn.Linear(d_model, vocab_size)
def forward(self, src, tgt, src_key_padding_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""
Shape:
- src: (W, N, C)
- tgt: (T, N)
- src_key_padding_mask: (N, S)
- tgt_key_padding_mask: (N, T)
- memory_key_padding_mask: (N, S)
- output: (N, T, E)
"""
tgt_mask = self.gen_nopeek_mask(tgt.shape[0]).to(src.device)
src = self.pos_enc(src * math.sqrt(self.d_model))
tgt = self.pos_enc(self.embed_tgt(tgt) * math.sqrt(self.d_model))
output = self.transformer(src, tgt, tgt_mask=tgt_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output = output.transpose(0, 1)
return self.fc(output)
def gen_nopeek_mask(self, length):
mask = (torch.triu(torch.ones(length, length)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (
-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
# register_buffer => Tensor which is not a parameter, but should be part of the modules state.
# Used for tensors that need to be on the same device as the module.
# persistent=False tells PyTorch to not add the buffer to the state dict (e.g. when we save the model)
self.register_buffer('pe', pe, persistent=False)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x) | 0.938927 | 0.361334 |
from keras.applications import InceptionV3
from keras.models import Model, load_model
from keras.layers import Input, Dropout, Dense, BatchNormalization
from keras.layers import GlobalAveragePooling2D, Concatenate
from keras.utils import to_categorical
import tensorflow as tf
import pandas as pd
import os
import numpy as np
from sklearn.model_selection import train_test_split
from data_generator import DataGenerator
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau
import pickle
n_classes = 34
input_shape = (75, 75, 3)
feat_shape = (16,)
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'inception_v3_training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'inception_v3_validation')
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def get_single_cls_model():
pretrain_model = InceptionV3(
include_top=False,
weights=None,
input_shape=input_shape)
input_image = Input(shape=input_shape)
x = pretrain_model(input_image)
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
c1 = Dense(256-feat_shape[0], activation='relu')(x)
c2 = Input(shape=feat_shape)
c = Concatenate(axis=-1,)([c1, c2])
x = BatchNormalization()(c)
x = Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
output = Dense(n_classes, activation='softmax')(x)
model = Model([input_image, c2], output)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(1e-3),
metrics=['acc'])
model.summary()
return model
def check_dirs(dirs):
for d in dirs:
exists = os.path.join(os.getcwd(), d)
if os.path.isdir(exists) is False:
os.mkdir(exists)
check_dirs(['logs', 'models'])
with open('class_map.p', 'rb') as handle:
class_map = pickle.load(handle)
df = pd.read_csv('plankton.csv')
df.drop_duplicates(subset='im_name', inplace=True, keep=False)
params = {'n_classes': n_classes,
'shape': input_shape,
'feat_shape': feat_shape,
'batch_size': 128,
'shuffle': True}
frames = []
for c in np.unique(df.label):
frames.append(df[df.label==c].sample(n=5000, replace=True, random_state=0))
df_sample = pd.concat(frames)
paths = []
labels = []
data_path = os.path.join(os.getcwd(), 'pad')
for im_name, label in zip(df_sample.im_name, df_sample.label):
im_dir = os.path.join(data_path, class_map[label])
im_path = os.path.join(im_dir, im_name)
paths.append(im_path)
labels.append(to_categorical(y=label, num_classes=n_classes))
paths = np.array(paths)
labels = np.array(labels)
X_train, X_val, y_train, y_val = train_test_split(paths, labels, test_size=0.05, random_state=0)
checkpoint = ModelCheckpoint('./models/inception_v3.model', monitor='val_acc', verbose=1, mode='max',
save_best_only=True, save_weights_only=False, period=1)
reduceLROnPlato = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=3, verbose=1, mode='min')
tensorboard = TrainValTensorBoard(write_graph=False)
tg = DataGenerator(paths=X_train, labels=y_train, augment=True, **params)
vg = DataGenerator(paths=X_val, labels=y_val, **params)
model = get_single_cls_model()
#model = load_model('./models/inception_v3.model')
model.fit_generator(generator=tg, validation_data=vg,
steps_per_epoch=len(tg)/10, validation_steps=len(vg),
epochs=1000, verbose=1,
callbacks=[tensorboard, checkpoint]) | model_generator.py | from keras.applications import InceptionV3
from keras.models import Model, load_model
from keras.layers import Input, Dropout, Dense, BatchNormalization
from keras.layers import GlobalAveragePooling2D, Concatenate
from keras.utils import to_categorical
import tensorflow as tf
import pandas as pd
import os
import numpy as np
from sklearn.model_selection import train_test_split
from data_generator import DataGenerator
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau
import pickle
n_classes = 34
input_shape = (75, 75, 3)
feat_shape = (16,)
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'inception_v3_training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'inception_v3_validation')
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def get_single_cls_model():
pretrain_model = InceptionV3(
include_top=False,
weights=None,
input_shape=input_shape)
input_image = Input(shape=input_shape)
x = pretrain_model(input_image)
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
c1 = Dense(256-feat_shape[0], activation='relu')(x)
c2 = Input(shape=feat_shape)
c = Concatenate(axis=-1,)([c1, c2])
x = BatchNormalization()(c)
x = Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
output = Dense(n_classes, activation='softmax')(x)
model = Model([input_image, c2], output)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(1e-3),
metrics=['acc'])
model.summary()
return model
def check_dirs(dirs):
for d in dirs:
exists = os.path.join(os.getcwd(), d)
if os.path.isdir(exists) is False:
os.mkdir(exists)
check_dirs(['logs', 'models'])
with open('class_map.p', 'rb') as handle:
class_map = pickle.load(handle)
df = pd.read_csv('plankton.csv')
df.drop_duplicates(subset='im_name', inplace=True, keep=False)
params = {'n_classes': n_classes,
'shape': input_shape,
'feat_shape': feat_shape,
'batch_size': 128,
'shuffle': True}
frames = []
for c in np.unique(df.label):
frames.append(df[df.label==c].sample(n=5000, replace=True, random_state=0))
df_sample = pd.concat(frames)
paths = []
labels = []
data_path = os.path.join(os.getcwd(), 'pad')
for im_name, label in zip(df_sample.im_name, df_sample.label):
im_dir = os.path.join(data_path, class_map[label])
im_path = os.path.join(im_dir, im_name)
paths.append(im_path)
labels.append(to_categorical(y=label, num_classes=n_classes))
paths = np.array(paths)
labels = np.array(labels)
X_train, X_val, y_train, y_val = train_test_split(paths, labels, test_size=0.05, random_state=0)
checkpoint = ModelCheckpoint('./models/inception_v3.model', monitor='val_acc', verbose=1, mode='max',
save_best_only=True, save_weights_only=False, period=1)
reduceLROnPlato = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=3, verbose=1, mode='min')
tensorboard = TrainValTensorBoard(write_graph=False)
tg = DataGenerator(paths=X_train, labels=y_train, augment=True, **params)
vg = DataGenerator(paths=X_val, labels=y_val, **params)
model = get_single_cls_model()
#model = load_model('./models/inception_v3.model')
model.fit_generator(generator=tg, validation_data=vg,
steps_per_epoch=len(tg)/10, validation_steps=len(vg),
epochs=1000, verbose=1,
callbacks=[tensorboard, checkpoint]) | 0.85814 | 0.393909 |
def selection_sort(array: list) -> int:
comparisons = 0
for i in range(len(array)):
min_index = i
for j in range(i, len(array)):
if array[min_index] < array[j]:
min_index = j
comparisons += 1
array[i], array[min_index] = array[min_index], array[i]
return comparisons
def insertion_sort(array: list) -> int:
comparisons = 0
for index in range(1, len(array)):
current_value = array[index]
position = index
while position > 0 and array[position - 1] > current_value:
comparisons += 1
array[position] = array[position - 1]
position = position - 1
comparisons += 1
array[position] = current_value
return comparisons
def merge_sort(array: list) -> int:
comparisons = 0
if len(array) > 1:
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
comparisons += merge_sort(left_half)
comparisons += merge_sort(right_half)
i = 0
j = 0
k = 0
while i < len(left_half) and j < len(right_half):
comparisons += 1
if left_half[i] < right_half[j]:
array[k] = left_half[i]
i = i + 1
else:
array[k] = right_half[j]
j = j + 1
k = k + 1
while i < len(left_half):
array[k] = left_half[i]
i = i + 1
k = k + 1
while j < len(right_half):
array[k] = right_half[j]
j = j + 1
k = k + 1
return comparisons
def shell_sort(array: list) -> int:
def __shell_sort(array: list) -> int:
comparisons = 0
sublist_count = len(array) // 2
while sublist_count > 0:
for start_position in range(sublist_count):
comparisons += gap_insertion_sort(array, start_position, sublist_count)
sublist_count = sublist_count // 2
return comparisons
def gap_insertion_sort(array: list, start: int, gap: int) -> int:
comparisons = 0
for i in range(start + gap, len(array), gap):
current_value = array[i]
position = i
while position >= gap and array[position - gap] > current_value:
comparisons += 1
array[position] = array[position - gap]
position = position - gap
array[position] = current_value
comparisons += 1
return comparisons
return __shell_sort(array) | sorting_algorytms.py | def selection_sort(array: list) -> int:
comparisons = 0
for i in range(len(array)):
min_index = i
for j in range(i, len(array)):
if array[min_index] < array[j]:
min_index = j
comparisons += 1
array[i], array[min_index] = array[min_index], array[i]
return comparisons
def insertion_sort(array: list) -> int:
comparisons = 0
for index in range(1, len(array)):
current_value = array[index]
position = index
while position > 0 and array[position - 1] > current_value:
comparisons += 1
array[position] = array[position - 1]
position = position - 1
comparisons += 1
array[position] = current_value
return comparisons
def merge_sort(array: list) -> int:
comparisons = 0
if len(array) > 1:
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
comparisons += merge_sort(left_half)
comparisons += merge_sort(right_half)
i = 0
j = 0
k = 0
while i < len(left_half) and j < len(right_half):
comparisons += 1
if left_half[i] < right_half[j]:
array[k] = left_half[i]
i = i + 1
else:
array[k] = right_half[j]
j = j + 1
k = k + 1
while i < len(left_half):
array[k] = left_half[i]
i = i + 1
k = k + 1
while j < len(right_half):
array[k] = right_half[j]
j = j + 1
k = k + 1
return comparisons
def shell_sort(array: list) -> int:
def __shell_sort(array: list) -> int:
comparisons = 0
sublist_count = len(array) // 2
while sublist_count > 0:
for start_position in range(sublist_count):
comparisons += gap_insertion_sort(array, start_position, sublist_count)
sublist_count = sublist_count // 2
return comparisons
def gap_insertion_sort(array: list, start: int, gap: int) -> int:
comparisons = 0
for i in range(start + gap, len(array), gap):
current_value = array[i]
position = i
while position >= gap and array[position - gap] > current_value:
comparisons += 1
array[position] = array[position - gap]
position = position - gap
array[position] = current_value
comparisons += 1
return comparisons
return __shell_sort(array) | 0.303629 | 0.594963 |
MARKDOWN = "markdown"
HTML = "html"
# List of supported documentation formats
SUPPORTED_DOC_FORMATS = [
MARKDOWN,
HTML,
]
class FileExtension:
"""The FileExtension class makes it possible to get the file extension
based on the format of the file.
"""
@classmethod
def get(cls, docFormat):
"""Return the file extension string (e.g., "html") for the
desired documentation format.
* docFormat -- the documentation format identifier
"""
extensionMap = {
MARKDOWN: "md",
HTML: "html",
}
return extensionMap.get(docFormat)
class MarkdownToHtml:
"""The MarkdownToHtml class deals with converting markdown content
to HTML content.
"""
@classmethod
def convert(cls, lines):
"""Conver the given markdown data (as a list of strings) into HTML
content (also as a list of strings).
* lines -- the input list of markdown lines
"""
htmlLines = [
"<html>",
"<head>"
"</head>",
]
writingList = False
for line in lines:
isList = False
if line.startswith("## "):
# Subsection
data = line[3:]
line = "<h2>%s</h2>" % data
elif line.startswith("# "):
# Section
data = line[2:]
line = "<h1>%s</h1>" % data
elif line.startswith("- "):
# List entry
isList = True
if not writingList:
htmlLines.append("<ul>")
writingList = True
data = line[2:]
line = "<li>%s</li>" % data
elif len(line.strip()) > 0:
# Normal text content
line = "<p>%s</p>" % line
# Close a list that has ended
if not isList and writingList:
htmlLines.append("</ul>")
writingList = False
htmlLines.append(line)
# Close the list if one was being created
if writingList:
htmlLines.append("</ul>")
htmlLines.extend([
"</html>",
])
return htmlLines | rosautodoc/formatConverters.py | MARKDOWN = "markdown"
HTML = "html"
# List of supported documentation formats
SUPPORTED_DOC_FORMATS = [
MARKDOWN,
HTML,
]
class FileExtension:
"""The FileExtension class makes it possible to get the file extension
based on the format of the file.
"""
@classmethod
def get(cls, docFormat):
"""Return the file extension string (e.g., "html") for the
desired documentation format.
* docFormat -- the documentation format identifier
"""
extensionMap = {
MARKDOWN: "md",
HTML: "html",
}
return extensionMap.get(docFormat)
class MarkdownToHtml:
"""The MarkdownToHtml class deals with converting markdown content
to HTML content.
"""
@classmethod
def convert(cls, lines):
"""Conver the given markdown data (as a list of strings) into HTML
content (also as a list of strings).
* lines -- the input list of markdown lines
"""
htmlLines = [
"<html>",
"<head>"
"</head>",
]
writingList = False
for line in lines:
isList = False
if line.startswith("## "):
# Subsection
data = line[3:]
line = "<h2>%s</h2>" % data
elif line.startswith("# "):
# Section
data = line[2:]
line = "<h1>%s</h1>" % data
elif line.startswith("- "):
# List entry
isList = True
if not writingList:
htmlLines.append("<ul>")
writingList = True
data = line[2:]
line = "<li>%s</li>" % data
elif len(line.strip()) > 0:
# Normal text content
line = "<p>%s</p>" % line
# Close a list that has ended
if not isList and writingList:
htmlLines.append("</ul>")
writingList = False
htmlLines.append(line)
# Close the list if one was being created
if writingList:
htmlLines.append("</ul>")
htmlLines.extend([
"</html>",
])
return htmlLines | 0.665519 | 0.303758 |
import os
import pytest
import sqlalchemy as sa
import pandas as pd
from pandas.testing import assert_frame_equal
import pemi
from pemi.fields import *
class TestPdDataSubject():
def test_it_creates_an_empty_dataframe(self):
dsubj = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
))
assert_frame_equal(dsubj.df, pd.DataFrame(columns=['f1', 'f2']))
def test_it_raises_schema_invalid_on_connection(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
))
ds2 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f3=StringField()
))
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [4, 5, 6]
})
with pytest.raises(pemi.data_subject.MissingFieldsError):
ds2.connect_from(ds1)
def test_it_catch_inexact_schema(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
),
strict_match_schema=True)
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [1, 2, 3],
'f3': [1, 2, 3]
})
with pytest.raises(pemi.data_subject.MissingFieldsError):
ds1.validate_schema()
def test_it_pass_exact_df_and_scema(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField(),
f3=StringField()
),
strict_match_schema=True)
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [1, 2, 3],
'f3': [1, 2, 3]
})
assert ds1.validate_schema()
def test_it_creates_an_empty_df_with_schema_when_connected_to_empty(self):
ds1 = pemi.PdDataSubject()
ds2 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f3=StringField()
))
ds2.connect_from(ds1)
assert_frame_equal(ds2.df, pd.DataFrame(columns=['f1', 'f3']))
class TestSaDataSubject:
@pytest.fixture
def sa_engine(self):
return sa.create_engine('postgresql://{user}:{password}@{host}/{dbname}'.format(
user=os.environ.get('POSTGRES_USER'),
password=<PASSWORD>('<PASSWORD>'),
host=os.environ.get('POSTGRES_HOST'),
dbname=os.environ.get('POSTGRES_DB')
))
@pytest.fixture
def sa_subject(self, sa_engine):
return pemi.SaDataSubject(
engine=sa_engine,
schema=pemi.Schema(
json_field=JsonField()
),
table='some_data'
)
@pytest.fixture(autouse=True)
def dbconn(self, sa_engine):
with sa_engine.connect() as conn:
conn.execute(
'''
DROP TABLE IF EXISTS some_data;
CREATE TABLE some_data (
afield TEXT,
bfield TEXT,
json_field JSON
);
'''
)
yield
with sa_engine.connect() as conn:
conn.execute('DROP TABLE IF EXISTS some_data;')
def test_it_loads_some_json_data(self, sa_subject):
df = pd.DataFrame({
'json_field': [{'a': 'alpha', 'three': 3}] * 2
})
sa_subject.from_pd(df)
assert_frame_equal(df, sa_subject.to_pd())
def test_it_includes_all_columns(self, sa_subject):
'even if they are not in the pemi schema'
df = sa_subject.to_pd()
assert sorted(df.columns) == sorted(['afield', 'bfield', 'json_field']) | tests/test_data_subject.py | import os
import pytest
import sqlalchemy as sa
import pandas as pd
from pandas.testing import assert_frame_equal
import pemi
from pemi.fields import *
class TestPdDataSubject():
def test_it_creates_an_empty_dataframe(self):
dsubj = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
))
assert_frame_equal(dsubj.df, pd.DataFrame(columns=['f1', 'f2']))
def test_it_raises_schema_invalid_on_connection(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
))
ds2 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f3=StringField()
))
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [4, 5, 6]
})
with pytest.raises(pemi.data_subject.MissingFieldsError):
ds2.connect_from(ds1)
def test_it_catch_inexact_schema(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField()
),
strict_match_schema=True)
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [1, 2, 3],
'f3': [1, 2, 3]
})
with pytest.raises(pemi.data_subject.MissingFieldsError):
ds1.validate_schema()
def test_it_pass_exact_df_and_scema(self):
ds1 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f2=StringField(),
f3=StringField()
),
strict_match_schema=True)
ds1.df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': [1, 2, 3],
'f3': [1, 2, 3]
})
assert ds1.validate_schema()
def test_it_creates_an_empty_df_with_schema_when_connected_to_empty(self):
ds1 = pemi.PdDataSubject()
ds2 = pemi.PdDataSubject(schema=pemi.Schema(
f1=StringField(),
f3=StringField()
))
ds2.connect_from(ds1)
assert_frame_equal(ds2.df, pd.DataFrame(columns=['f1', 'f3']))
class TestSaDataSubject:
@pytest.fixture
def sa_engine(self):
return sa.create_engine('postgresql://{user}:{password}@{host}/{dbname}'.format(
user=os.environ.get('POSTGRES_USER'),
password=<PASSWORD>('<PASSWORD>'),
host=os.environ.get('POSTGRES_HOST'),
dbname=os.environ.get('POSTGRES_DB')
))
@pytest.fixture
def sa_subject(self, sa_engine):
return pemi.SaDataSubject(
engine=sa_engine,
schema=pemi.Schema(
json_field=JsonField()
),
table='some_data'
)
@pytest.fixture(autouse=True)
def dbconn(self, sa_engine):
with sa_engine.connect() as conn:
conn.execute(
'''
DROP TABLE IF EXISTS some_data;
CREATE TABLE some_data (
afield TEXT,
bfield TEXT,
json_field JSON
);
'''
)
yield
with sa_engine.connect() as conn:
conn.execute('DROP TABLE IF EXISTS some_data;')
def test_it_loads_some_json_data(self, sa_subject):
df = pd.DataFrame({
'json_field': [{'a': 'alpha', 'three': 3}] * 2
})
sa_subject.from_pd(df)
assert_frame_equal(df, sa_subject.to_pd())
def test_it_includes_all_columns(self, sa_subject):
'even if they are not in the pemi schema'
df = sa_subject.to_pd()
assert sorted(df.columns) == sorted(['afield', 'bfield', 'json_field']) | 0.358241 | 0.473414 |
from contextlib import closing
from dataclasses import dataclass, field
import hashlib
import os
from pathlib import Path
import re
import shutil
import socket
import subprocess
import sys
import textwrap
from threading import Thread
from time import sleep
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import urljoin, urlparse
import dill as pickle
import requests.exceptions
from stickybeak import utils
from stickybeak.handle_requests import INJECT_ENDPOINT, SERVER_DATA_ENDPOINT, InjectData, Requirement, get_requirements
from stickybeak.utils import Client
from stickybeak.vendored import inspect, pip
__all__ = ["InjectorException", "Injector", "DjangoInjector", "FlaskInjector", "ConnectionError"]
class InjectorException(Exception):
pass
class ConnectionError(Exception):
pass
@dataclass
class DependencyInstallError(Exception):
return_code: int
@dataclass
class BaseInjector:
"""Provides interface for code injection."""
name: str # injector name. Will be used as id in stickybeak_dir
host: str
port: int
download_deps: bool
stickybeak_dir: Path = field(init=False) # directory where remote dependencies and project source is kept
_client: Optional[Client] = field(init=False)
_server_data: Dict[str, Any] = field(
init=False, default_factory=dict
) # server data like source or pip freeze requirements
connected: bool = field(init=False, default=False)
def __post_init__(self) -> None:
self._init()
def _init(self) -> None:
self._client = Client(self.host)
self.stickybeak_dir = Path.home() / ".stickybeak" / Path(f"{self.name}")
def connect(self, blocking: bool = True, timeout: float = 5.0) -> None:
def target() -> None:
try:
# ########## Get data
self._server_data = self._client.get(SERVER_DATA_ENDPOINT, timeout=timeout)
# ########## Collect remote code
sources: Dict[str, str] = self._server_data["source"]
for path, source in sources.items():
abs_path: Path = self.stickybeak_dir / Path(path)
abs_path.parent.mkdir(parents=True, exist_ok=True)
abs_path.touch()
abs_path.write_text(source, "utf-8")
# ########## collect requirements
if self.download_deps:
self._do_download_deps()
self.connected = True
except requests.exceptions.ConnectionError as e:
raise ConnectionError from None
if blocking:
target()
else:
Thread(target=target).start()
def wait_until_connected(self, timeout: float = 5.0) -> None:
waited = 0.0
one_sleep = 0.1
while not self.connected:
sleep(one_sleep)
waited += one_sleep
if waited >= timeout:
raise TimeoutError
def _do_download_deps(self) -> None:
venv_dir = (self.stickybeak_dir / ".venv").absolute()
if not venv_dir.exists():
subprocess.check_output([f"virtualenv", f"{venv_dir}"], stderr=subprocess.DEVNULL)
remote_reqs = self._server_data["requirements"]
local_reqs = get_requirements(venv_dir)
@dataclass
class ReqDiff:
local: Optional[Requirement]
remote: Requirement
reqs_diff = {}
for p, v in remote_reqs.items():
remote = remote_reqs[p]
local = local_reqs.get(p)
if not local or remote["version"] != local["version"]:
reqs_diff[p] = ReqDiff(local=local, remote=remote)
if reqs_diff:
# delete packages manualy (sometimes pip doesn't downgrade for some reason)
site_packages = utils.get_site_packages_dir_from_venv(venv_dir)
for p, r in reqs_diff.items():
if not r.local:
continue
package_dir = site_packages / r.local["key"]
shutil.rmtree(package_dir, ignore_errors=True)
shutil.rmtree(r.local["egg_info"], ignore_errors=True)
reqs = [f"{p}=={r.remote['version']}" for p, r in reqs_diff.items()]
ret = pip.main(["install", f"--target={str(site_packages)}", "--upgrade", *reqs])
if ret:
raise DependencyInstallError(return_code=ret)
def _raise_if_not_connected(self) -> None:
if not self.connected:
raise InjectorException("Injector not connected! Run connect() first.")
def _run_remote_fun(self, source: str, filename: str, offset: int, call: str, args: Any, kwargs: Any) -> object:
"""Execute code.
Returns:
Dictionary containing all local variables.
Raises:
All exceptions from the code run remotely.
"""
self._raise_if_not_connected()
# we have to unload all the django modules so django accepts the new configuration
# make a module copy so we can iterate over it and delete modules from the original one
modules_before: List[str] = list(sys.modules.keys())[:]
sys_path_before = sys.path[:]
envs_before: os._Environ = os.environ.copy() # type: ignore
os.environ = self._server_data["envs"] # type: ignore
if self.download_deps:
sys.path = [p for p in sys.path if "site-packages" not in p]
# remove project dir from sys.path so there's no conflicts
sys.path.pop(0)
sys.path.insert(0, str(self.stickybeak_dir.absolute()))
site_packages = utils.get_site_packages_dir_from_venv(self.stickybeak_dir.absolute() / ".venv")
sys.path = [str(site_packages), *sys.path]
self._before_execute()
data = InjectData(source=source, filename=filename, offset=offset, call=call, args=list(args), kwargs=kwargs)
pickled_data = pickle.dumps(data)
try:
content: bytes = self._client.post(INJECT_ENDPOINT, data=pickled_data).content
except requests.exceptions.ConnectionError:
raise ConnectionError from None
ret: object = pickle.loads(content)
os.environ = envs_before
sys.path = sys_path_before
modules_after: List[str] = list(sys.modules.keys())[:]
diff: List[str] = list(set(modules_after) - set(modules_before))
for m in diff:
sys.modules.pop(m)
if isinstance(ret, Exception):
sys.stderr.write(ret.__traceback_str__) # type: ignore
raise ret
return ret
def _before_execute(self) -> None:
pass
def _get_fun_src(self, fun: Callable[[], None]) -> Tuple[str, int]:
code: str = inspect.getsource(fun)
offset = inspect.getsourcelines(fun)[1]
code_lines: List[str] = code.splitlines(True)
if "@" in code_lines[0]:
code_lines.pop(0)
offset += 1
code = "".join(code_lines)
# remove indent that's left
code = textwrap.dedent(code)
return code, offset
def run_fun(self, fun: Callable, *args: Any, **kwargs: Any) -> object:
self._raise_if_not_connected()
source, offset = self._get_fun_src(fun)
filename = inspect.getsourcefile(fun)
ret = self._run_remote_fun(
source, filename=filename, offset=offset, call=fun.__name__, args=args, kwargs=kwargs
)
return ret
def run_klass_fun(
self,
klass: type,
fun: Callable,
args: Tuple[Any],
kwargs: Dict[str, Any],
) -> object:
self._raise_if_not_connected()
filename = inspect.getsourcefile(klass)
offset = inspect.getsourcelines(klass)[1]
ret = self._run_remote_fun(
klass.__source__, # type: ignore
filename=filename,
offset=offset,
call=f"{klass.__name__}.{fun.__name__}",
args=args,
kwargs=kwargs,
)
return ret
def _get_class_source(self, klass: type) -> str:
mro = reversed(klass.mro()[:-1])
sources = []
for c in mro:
sources.append(textwrap.dedent(inspect.getsource(c)) + "\n")
ret = "".join(sources)
return ret
def klass(self, cls: type) -> type:
# re execute class to get copy
# this way original class can yield multiple injected classes
# first_instance = injector1.klass(Klass)
# second_instance = injector2.klass(Klass)
source = self._get_class_source(cls)
definition_module = sys.modules[cls.__module__]
sandbox: Dict[str, Any] = {}
exec(source, definition_module.__dict__, sandbox)
cls_cpy = sandbox[cls.__name__]
cls_cpy._injector = self # type: ignore
cls_cpy.__source__ = source
methods: List[str] = [a for a in dir(cls_cpy) if not a.startswith("__") and callable(getattr(cls_cpy, a))]
for m in methods:
def decorator(func: Callable[[], None]) -> Callable:
def wrapped(*args: Any, **kwargs: Any) -> object:
return cls_cpy._injector.run_klass_fun(cls_cpy, func, args, kwargs) # type: ignore
return wrapped
method: Callable[[], None] = getattr(cls_cpy, m)
setattr(cls_cpy, m, decorator(method))
return cls_cpy
def function(self, fun: Callable[[], None]) -> Callable:
"""
Decorator
:param fun: function to be decorated:
:return decorated function:
"""
def wrapped(*args: Any, **kwargs: Any) -> object:
ret = self.run_fun(fun, *args, **kwargs)
return ret
return wrapped
@dataclass
class Injector(BaseInjector):
port: int = field(init=False)
def __post_init__(self) -> None:
pass
def _get_free_port(self) -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def prepare(self, port: Optional[int] = None) -> None:
self.port = port or self._get_free_port()
self.host = f"{self.host}:{self.port}"
self._init()
@dataclass
class DjangoInjector(BaseInjector):
port: int = field(init=False)
django_settings_module: str
def _before_execute(self) -> None:
if not self.download_deps:
return
modules = list(sys.modules.keys())[:]
for m in modules:
if "django" in m:
sys.modules.pop(m)
os.environ["DJANGO_SETTINGS_MODULE"] = self.django_settings_module
import django
django.setup()
@dataclass
class FlaskInjector(BaseInjector):
port: int = field(init=False) | stickybeak/injector.py | from contextlib import closing
from dataclasses import dataclass, field
import hashlib
import os
from pathlib import Path
import re
import shutil
import socket
import subprocess
import sys
import textwrap
from threading import Thread
from time import sleep
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import urljoin, urlparse
import dill as pickle
import requests.exceptions
from stickybeak import utils
from stickybeak.handle_requests import INJECT_ENDPOINT, SERVER_DATA_ENDPOINT, InjectData, Requirement, get_requirements
from stickybeak.utils import Client
from stickybeak.vendored import inspect, pip
__all__ = ["InjectorException", "Injector", "DjangoInjector", "FlaskInjector", "ConnectionError"]
class InjectorException(Exception):
pass
class ConnectionError(Exception):
pass
@dataclass
class DependencyInstallError(Exception):
return_code: int
@dataclass
class BaseInjector:
"""Provides interface for code injection."""
name: str # injector name. Will be used as id in stickybeak_dir
host: str
port: int
download_deps: bool
stickybeak_dir: Path = field(init=False) # directory where remote dependencies and project source is kept
_client: Optional[Client] = field(init=False)
_server_data: Dict[str, Any] = field(
init=False, default_factory=dict
) # server data like source or pip freeze requirements
connected: bool = field(init=False, default=False)
def __post_init__(self) -> None:
self._init()
def _init(self) -> None:
self._client = Client(self.host)
self.stickybeak_dir = Path.home() / ".stickybeak" / Path(f"{self.name}")
def connect(self, blocking: bool = True, timeout: float = 5.0) -> None:
def target() -> None:
try:
# ########## Get data
self._server_data = self._client.get(SERVER_DATA_ENDPOINT, timeout=timeout)
# ########## Collect remote code
sources: Dict[str, str] = self._server_data["source"]
for path, source in sources.items():
abs_path: Path = self.stickybeak_dir / Path(path)
abs_path.parent.mkdir(parents=True, exist_ok=True)
abs_path.touch()
abs_path.write_text(source, "utf-8")
# ########## collect requirements
if self.download_deps:
self._do_download_deps()
self.connected = True
except requests.exceptions.ConnectionError as e:
raise ConnectionError from None
if blocking:
target()
else:
Thread(target=target).start()
def wait_until_connected(self, timeout: float = 5.0) -> None:
waited = 0.0
one_sleep = 0.1
while not self.connected:
sleep(one_sleep)
waited += one_sleep
if waited >= timeout:
raise TimeoutError
def _do_download_deps(self) -> None:
venv_dir = (self.stickybeak_dir / ".venv").absolute()
if not venv_dir.exists():
subprocess.check_output([f"virtualenv", f"{venv_dir}"], stderr=subprocess.DEVNULL)
remote_reqs = self._server_data["requirements"]
local_reqs = get_requirements(venv_dir)
@dataclass
class ReqDiff:
local: Optional[Requirement]
remote: Requirement
reqs_diff = {}
for p, v in remote_reqs.items():
remote = remote_reqs[p]
local = local_reqs.get(p)
if not local or remote["version"] != local["version"]:
reqs_diff[p] = ReqDiff(local=local, remote=remote)
if reqs_diff:
# delete packages manualy (sometimes pip doesn't downgrade for some reason)
site_packages = utils.get_site_packages_dir_from_venv(venv_dir)
for p, r in reqs_diff.items():
if not r.local:
continue
package_dir = site_packages / r.local["key"]
shutil.rmtree(package_dir, ignore_errors=True)
shutil.rmtree(r.local["egg_info"], ignore_errors=True)
reqs = [f"{p}=={r.remote['version']}" for p, r in reqs_diff.items()]
ret = pip.main(["install", f"--target={str(site_packages)}", "--upgrade", *reqs])
if ret:
raise DependencyInstallError(return_code=ret)
def _raise_if_not_connected(self) -> None:
if not self.connected:
raise InjectorException("Injector not connected! Run connect() first.")
def _run_remote_fun(self, source: str, filename: str, offset: int, call: str, args: Any, kwargs: Any) -> object:
"""Execute code.
Returns:
Dictionary containing all local variables.
Raises:
All exceptions from the code run remotely.
"""
self._raise_if_not_connected()
# we have to unload all the django modules so django accepts the new configuration
# make a module copy so we can iterate over it and delete modules from the original one
modules_before: List[str] = list(sys.modules.keys())[:]
sys_path_before = sys.path[:]
envs_before: os._Environ = os.environ.copy() # type: ignore
os.environ = self._server_data["envs"] # type: ignore
if self.download_deps:
sys.path = [p for p in sys.path if "site-packages" not in p]
# remove project dir from sys.path so there's no conflicts
sys.path.pop(0)
sys.path.insert(0, str(self.stickybeak_dir.absolute()))
site_packages = utils.get_site_packages_dir_from_venv(self.stickybeak_dir.absolute() / ".venv")
sys.path = [str(site_packages), *sys.path]
self._before_execute()
data = InjectData(source=source, filename=filename, offset=offset, call=call, args=list(args), kwargs=kwargs)
pickled_data = pickle.dumps(data)
try:
content: bytes = self._client.post(INJECT_ENDPOINT, data=pickled_data).content
except requests.exceptions.ConnectionError:
raise ConnectionError from None
ret: object = pickle.loads(content)
os.environ = envs_before
sys.path = sys_path_before
modules_after: List[str] = list(sys.modules.keys())[:]
diff: List[str] = list(set(modules_after) - set(modules_before))
for m in diff:
sys.modules.pop(m)
if isinstance(ret, Exception):
sys.stderr.write(ret.__traceback_str__) # type: ignore
raise ret
return ret
def _before_execute(self) -> None:
pass
def _get_fun_src(self, fun: Callable[[], None]) -> Tuple[str, int]:
code: str = inspect.getsource(fun)
offset = inspect.getsourcelines(fun)[1]
code_lines: List[str] = code.splitlines(True)
if "@" in code_lines[0]:
code_lines.pop(0)
offset += 1
code = "".join(code_lines)
# remove indent that's left
code = textwrap.dedent(code)
return code, offset
def run_fun(self, fun: Callable, *args: Any, **kwargs: Any) -> object:
self._raise_if_not_connected()
source, offset = self._get_fun_src(fun)
filename = inspect.getsourcefile(fun)
ret = self._run_remote_fun(
source, filename=filename, offset=offset, call=fun.__name__, args=args, kwargs=kwargs
)
return ret
def run_klass_fun(
self,
klass: type,
fun: Callable,
args: Tuple[Any],
kwargs: Dict[str, Any],
) -> object:
self._raise_if_not_connected()
filename = inspect.getsourcefile(klass)
offset = inspect.getsourcelines(klass)[1]
ret = self._run_remote_fun(
klass.__source__, # type: ignore
filename=filename,
offset=offset,
call=f"{klass.__name__}.{fun.__name__}",
args=args,
kwargs=kwargs,
)
return ret
def _get_class_source(self, klass: type) -> str:
mro = reversed(klass.mro()[:-1])
sources = []
for c in mro:
sources.append(textwrap.dedent(inspect.getsource(c)) + "\n")
ret = "".join(sources)
return ret
def klass(self, cls: type) -> type:
# re execute class to get copy
# this way original class can yield multiple injected classes
# first_instance = injector1.klass(Klass)
# second_instance = injector2.klass(Klass)
source = self._get_class_source(cls)
definition_module = sys.modules[cls.__module__]
sandbox: Dict[str, Any] = {}
exec(source, definition_module.__dict__, sandbox)
cls_cpy = sandbox[cls.__name__]
cls_cpy._injector = self # type: ignore
cls_cpy.__source__ = source
methods: List[str] = [a for a in dir(cls_cpy) if not a.startswith("__") and callable(getattr(cls_cpy, a))]
for m in methods:
def decorator(func: Callable[[], None]) -> Callable:
def wrapped(*args: Any, **kwargs: Any) -> object:
return cls_cpy._injector.run_klass_fun(cls_cpy, func, args, kwargs) # type: ignore
return wrapped
method: Callable[[], None] = getattr(cls_cpy, m)
setattr(cls_cpy, m, decorator(method))
return cls_cpy
def function(self, fun: Callable[[], None]) -> Callable:
"""
Decorator
:param fun: function to be decorated:
:return decorated function:
"""
def wrapped(*args: Any, **kwargs: Any) -> object:
ret = self.run_fun(fun, *args, **kwargs)
return ret
return wrapped
@dataclass
class Injector(BaseInjector):
port: int = field(init=False)
def __post_init__(self) -> None:
pass
def _get_free_port(self) -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def prepare(self, port: Optional[int] = None) -> None:
self.port = port or self._get_free_port()
self.host = f"{self.host}:{self.port}"
self._init()
@dataclass
class DjangoInjector(BaseInjector):
port: int = field(init=False)
django_settings_module: str
def _before_execute(self) -> None:
if not self.download_deps:
return
modules = list(sys.modules.keys())[:]
for m in modules:
if "django" in m:
sys.modules.pop(m)
os.environ["DJANGO_SETTINGS_MODULE"] = self.django_settings_module
import django
django.setup()
@dataclass
class FlaskInjector(BaseInjector):
port: int = field(init=False) | 0.548915 | 0.123339 |
import torch
import re
import os
import collections
from torch._six import string_classes, int_classes
import cv2
from opt import opt
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import math
import copy
from put_gif import put_gif
import statistics
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
_use_shared_memory = True
def collate_fn(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def collate_fn_list(batch):
img, inp, im_name = zip(*batch)
img = collate_fn(img)
im_name = collate_fn(im_name)
return img, inp, im_name
def vis_frame_fast(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED,BLUE,BLUE]
else:
NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy, line_color[i], 2*(kp_scores[start_p] + kp_scores[end_p]) + 1)
return img
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
height,width = img.shape[:2]
img = cv2.resize(img,(int(width/2), int(height/2)))
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (int(cor_x/2), int(cor_y/2))
bg = img.copy()
# print(part_line[n][1])
# cv2.circle(bg, (int(cor_x/2), int(cor_y/2)), 2, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
# transparency = max(0, min(1, kp_scores[n]))
# img = cv2.addWeighted(bg, transparency, img, 1-transparency, 0)
# if n==1:
try:
# both chin 25 is left part 26 is right (Example: Face chin)
part_line[25] = (statistics.mean([part_line[0][0],part_line[3][0]]), statistics.mean([part_line[0][1],part_line[3][1]]))
part_line[26] = (statistics.mean([part_line[0][0],part_line[4][0]]), statistics.mean([part_line[0][1],part_line[4][1]]))
# middle of eyes (Example: Sunglasses)
part_line[18] = (statistics.mean([part_line[1][0], part_line[2][0]]), statistics.mean([part_line[1][1],part_line[2][1]]))
# mean of ears
part_line[19] = (statistics.mean([part_line[3][0], part_line[4][0]]), statistics.mean([part_line[3][1],part_line[4][1]]))
part_line[20] = (part_line[18][0]-part_line[0][0], part_line[18][1]-part_line[0][1])
# expected head, mouth (Example: Crowns for 21 Fire for 31)
part_line[21] = (part_line[0][0]+5*part_line[20][0], part_line[0][1]+5*part_line[20][1])
part_line[31] = (part_line[0][0]+6*part_line[20][0], part_line[0][1]+6*part_line[20][1])
part_line[23] = (part_line[0][0]-2*part_line[20][0], part_line[0][1]-2*part_line[20][1])
# hip part calulation
part_line[40] = (part_line[11][0]-part_line[12][0], part_line[11][1]-part_line[12][1])
part_line[41] = (part_line[11][0]+2*part_line[40][0], part_line[11][1]+2*part_line[40][1])
part_line[42] = (part_line[11][0]+part_line[40][0], part_line[11][1]+part_line[40][1])
part_line[43] = (part_line[12][0]-2*part_line[40][0], part_line[12][1]-2*part_line[40][1])
part_line[44] = (part_line[12][0]-part_line[40][0], part_line[12][1]-part_line[40][1])
# expected static
part_line[50] = (int(3*width/7), int(height/4))
part_line[51] = (int(width/8), int(height/4))
part_line[52] = (int(width/4), int(height/4))
part_line[53] = (int(3*width/8), int(height/4))
part_line[54] = (int(width/6.2), int(height/9.157))
part_line[55] = (int(width/3.326), int(height/58))
part_line[56] = (int(width/6.514), int(height/6.842))
part_line[57] = (int(width/3.406), int(height/8.533))
part_line[58] = (int(width/2.319), int(height/9.846))
part_line[59] = (int(width/4.24), int(height/3.052))
part_line[60] = (int(width/3.212), int(height/2.74))
# 2~3.5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/devil_mask.gif', 15, 2, 64, 112)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 3.5~5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 112, 160)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 128, 160)
img = put_gif(im_name, img, part_line, 52, 52, 52, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 144, 160)
img = put_gif(im_name, img, part_line, 53, 53, 53, scale, replay_speed, start, end, file_name)
# 5~6
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 174, 189)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame_edited.gif', 20, 1, 189, 207)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 6~7
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/angel_ring.gif', 25, 1, 223, 233)
img = put_gif(im_name, img, part_line, 31, 31, 31, scale, replay_speed, start, end, file_name)
# 7~9
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/sunglass.gif', 7, 2, 240, 257)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 9~11
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/skull_yellow.gif', 7, 3, 277, 329)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 11~13
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/bear_Line.gif', 78, 2, 351, 401)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/bear_Line.gif', 78, 2, 351, 401)
img = put_gif(im_name, img, part_line, 50, 50, 50, scale, replay_speed, start, end, file_name)
# 13~14
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 406, 432)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 406, 432)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 14~15
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/mad_rabbit.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 18, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/triple_scratch2.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 41, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/triple_scratch2_mirrored.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 43, 1, 2, scale, replay_speed, start, end, file_name)
# 15~16
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/smoke.gif', 10, 2, 473, 488)
img = put_gif(im_name, img, part_line, 54, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/smoke.gif', 10, 2, 473, 488)
img = put_gif(im_name, img, part_line, 55, 1, 2, scale, replay_speed, start, end, file_name)
# 16~17
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/wing_1.gif', 25, 1, 483, 511)
img = put_gif(im_name, img, part_line, 31, 5, 6, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money_edited.gif', 2, 2, 483, 511)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money_edited.gif', 2, 2, 483, 511)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 483, 511)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 483, 511)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 17~18
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X2.gif', 5, 2, 511, 564)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X2.gif', 5, 2, 511, 564)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/crown.gif', 13, 2, 511, 533)
img = put_gif(im_name, img, part_line, 31, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spread_down.gif', 13, 2, 540, 550)
img = put_gif(im_name, img, part_line, 10, 10, 8, scale, replay_speed, start, end, file_name)
# 18~19
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/increase.gif', 30, 2, 564, 579)
img = put_gif(im_name, img, part_line, 59, 59, 59, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/increase.gif', 30, 2, 564, 579)
img = put_gif(im_name, img, part_line, 60, 60, 60, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame2.gif', 7, 2, 564, 579)
img = put_gif(im_name, img, part_line, 31, 31, 31, scale, replay_speed, start, end, file_name)
# 19~20
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/devil_tail.gif', 10, 2, 579, 603)
img = put_gif(im_name, img, part_line, 42, 11, 12, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_1.gif', 16, 2, 593, 616)
img = put_gif(im_name, img, part_line, 56, 56, 56, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_2.gif', 16, 2, 598, 616)
img = put_gif(im_name, img, part_line, 57, 57, 57, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_3.gif', 16, 2, 602, 616)
img = put_gif(im_name, img, part_line, 58, 58, 58, scale, replay_speed, start, end, file_name)
# 20~20
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/pop_text.gif', 3, 2, 610, 626)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 20~22
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 644, 653)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 644, 653)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X.gif', 5, 1, 625, 666)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X.gif', 5, 1, 625, 666)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
# 22~23
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame_edited.gif', 13, 1, 678, 689)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# end
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/error.gif', 100, 1, 696, 736)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/error.gif', 100, 1, 696, 736)
img = put_gif(im_name, img, part_line, 53, 53, 53, scale, replay_speed, start, end, file_name)
except KeyError:
img = bg
img = cv2.resize(img,(width,height),interpolation=cv2.INTER_CUBIC)
return img
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval | fn.py | import torch
import re
import os
import collections
from torch._six import string_classes, int_classes
import cv2
from opt import opt
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import math
import copy
from put_gif import put_gif
import statistics
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
_use_shared_memory = True
def collate_fn(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def collate_fn_list(batch):
img, inp, im_name = zip(*batch)
img = collate_fn(img)
im_name = collate_fn(im_name)
return img, inp, im_name
def vis_frame_fast(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED,BLUE,BLUE]
else:
NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy, line_color[i], 2*(kp_scores[start_p] + kp_scores[end_p]) + 1)
return img
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
height,width = img.shape[:2]
img = cv2.resize(img,(int(width/2), int(height/2)))
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (int(cor_x/2), int(cor_y/2))
bg = img.copy()
# print(part_line[n][1])
# cv2.circle(bg, (int(cor_x/2), int(cor_y/2)), 2, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
# transparency = max(0, min(1, kp_scores[n]))
# img = cv2.addWeighted(bg, transparency, img, 1-transparency, 0)
# if n==1:
try:
# both chin 25 is left part 26 is right (Example: Face chin)
part_line[25] = (statistics.mean([part_line[0][0],part_line[3][0]]), statistics.mean([part_line[0][1],part_line[3][1]]))
part_line[26] = (statistics.mean([part_line[0][0],part_line[4][0]]), statistics.mean([part_line[0][1],part_line[4][1]]))
# middle of eyes (Example: Sunglasses)
part_line[18] = (statistics.mean([part_line[1][0], part_line[2][0]]), statistics.mean([part_line[1][1],part_line[2][1]]))
# mean of ears
part_line[19] = (statistics.mean([part_line[3][0], part_line[4][0]]), statistics.mean([part_line[3][1],part_line[4][1]]))
part_line[20] = (part_line[18][0]-part_line[0][0], part_line[18][1]-part_line[0][1])
# expected head, mouth (Example: Crowns for 21 Fire for 31)
part_line[21] = (part_line[0][0]+5*part_line[20][0], part_line[0][1]+5*part_line[20][1])
part_line[31] = (part_line[0][0]+6*part_line[20][0], part_line[0][1]+6*part_line[20][1])
part_line[23] = (part_line[0][0]-2*part_line[20][0], part_line[0][1]-2*part_line[20][1])
# hip part calulation
part_line[40] = (part_line[11][0]-part_line[12][0], part_line[11][1]-part_line[12][1])
part_line[41] = (part_line[11][0]+2*part_line[40][0], part_line[11][1]+2*part_line[40][1])
part_line[42] = (part_line[11][0]+part_line[40][0], part_line[11][1]+part_line[40][1])
part_line[43] = (part_line[12][0]-2*part_line[40][0], part_line[12][1]-2*part_line[40][1])
part_line[44] = (part_line[12][0]-part_line[40][0], part_line[12][1]-part_line[40][1])
# expected static
part_line[50] = (int(3*width/7), int(height/4))
part_line[51] = (int(width/8), int(height/4))
part_line[52] = (int(width/4), int(height/4))
part_line[53] = (int(3*width/8), int(height/4))
part_line[54] = (int(width/6.2), int(height/9.157))
part_line[55] = (int(width/3.326), int(height/58))
part_line[56] = (int(width/6.514), int(height/6.842))
part_line[57] = (int(width/3.406), int(height/8.533))
part_line[58] = (int(width/2.319), int(height/9.846))
part_line[59] = (int(width/4.24), int(height/3.052))
part_line[60] = (int(width/3.212), int(height/2.74))
# 2~3.5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/devil_mask.gif', 15, 2, 64, 112)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 3.5~5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 112, 160)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 128, 160)
img = put_gif(im_name, img, part_line, 52, 52, 52, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 144, 160)
img = put_gif(im_name, img, part_line, 53, 53, 53, scale, replay_speed, start, end, file_name)
# 5~6
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 174, 189)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame_edited.gif', 20, 1, 189, 207)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 6~7
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/angel_ring.gif', 25, 1, 223, 233)
img = put_gif(im_name, img, part_line, 31, 31, 31, scale, replay_speed, start, end, file_name)
# 7~9
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/sunglass.gif', 7, 2, 240, 257)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 9~11
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/skull_yellow.gif', 7, 3, 277, 329)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 11~13
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/bear_Line.gif', 78, 2, 351, 401)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/bear_Line.gif', 78, 2, 351, 401)
img = put_gif(im_name, img, part_line, 50, 50, 50, scale, replay_speed, start, end, file_name)
# 13~14
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 406, 432)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 406, 432)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 14~15
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/mad_rabbit.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 18, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/triple_scratch2.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 41, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/triple_scratch2_mirrored.gif', 15, 2, 446, 467)
img = put_gif(im_name, img, part_line, 43, 1, 2, scale, replay_speed, start, end, file_name)
# 15~16
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/smoke.gif', 10, 2, 473, 488)
img = put_gif(im_name, img, part_line, 54, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/smoke.gif', 10, 2, 473, 488)
img = put_gif(im_name, img, part_line, 55, 1, 2, scale, replay_speed, start, end, file_name)
# 16~17
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/wing_1.gif', 25, 1, 483, 511)
img = put_gif(im_name, img, part_line, 31, 5, 6, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money_edited.gif', 2, 2, 483, 511)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money_edited.gif', 2, 2, 483, 511)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 483, 511)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 483, 511)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 17~18
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X2.gif', 5, 2, 511, 564)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X2.gif', 5, 2, 511, 564)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/crown.gif', 13, 2, 511, 533)
img = put_gif(im_name, img, part_line, 31, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spread_down.gif', 13, 2, 540, 550)
img = put_gif(im_name, img, part_line, 10, 10, 8, scale, replay_speed, start, end, file_name)
# 18~19
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/increase.gif', 30, 2, 564, 579)
img = put_gif(im_name, img, part_line, 59, 59, 59, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/increase.gif', 30, 2, 564, 579)
img = put_gif(im_name, img, part_line, 60, 60, 60, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame2.gif', 7, 2, 564, 579)
img = put_gif(im_name, img, part_line, 31, 31, 31, scale, replay_speed, start, end, file_name)
# 19~20
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/devil_tail.gif', 10, 2, 579, 603)
img = put_gif(im_name, img, part_line, 42, 11, 12, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_1.gif', 16, 2, 593, 616)
img = put_gif(im_name, img, part_line, 56, 56, 56, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_2.gif', 16, 2, 598, 616)
img = put_gif(im_name, img, part_line, 57, 57, 57, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/fixed_monster_3.gif', 16, 2, 602, 616)
img = put_gif(im_name, img, part_line, 58, 58, 58, scale, replay_speed, start, end, file_name)
# 20~20
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/pop_text.gif', 3, 2, 610, 626)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 20~22
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 644, 653)
img = put_gif(im_name, img, part_line, 9, 9, 9, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark.gif', 10, 2, 644, 653)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X.gif', 5, 1, 625, 666)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/X.gif', 5, 1, 625, 666)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
# 22~23
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame_edited.gif', 13, 1, 678, 689)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# end
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/error.gif', 100, 1, 696, 736)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/error.gif', 100, 1, 696, 736)
img = put_gif(im_name, img, part_line, 53, 53, 53, scale, replay_speed, start, end, file_name)
except KeyError:
img = bg
img = cv2.resize(img,(width,height),interpolation=cv2.INTER_CUBIC)
return img
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval | 0.473414 | 0.225822 |
__author__ = 'Praveenkumar'
import os
import zmq
import sys
import time
import argparse
import thread
import json
from naoqi import ALProxy
from os.path import dirname
from os.path import abspath
dev = os.environ["INDRIYA_ROOT"]
dir1 = os.path.join(dev,"scripts","msgs")
sys.path.append(dir1)
import kinect_body_pb2
useSensors = False
if __name__ == "__main__":
# Real Robot
SUB_IP = "tcp://localhost"
SUB_PORT = "5570"
SUB_TOPIC = "KSP"
try:
if (len(sys.argv) >= 3):
print sys.argv
parser = argparse.ArgumentParser(description='Kinect state listener')
parser.add_argument('-p','--param', help='Parameter server address', required=True)
parser.add_argument('-n','--name', help='Name of the node', required=True)
args = vars(parser.parse_args())
name = args["name"]
paramServer = args["param"]
# Utils
import parameter_utils
node = parameter_utils.getNodeParameters(name,paramServer,1000)
if node != None:
print node
sub = parameter_utils.getSubscriberInfo(node,"KinectBodies")
if sub != None:
SUB_IP = sub.host.encode('utf-8')
SUB_PORT = sub.port
SUB_TOPIC = sub.topic.encode('utf-8')
print "Subscriber info retrieved"
else:
print "Start locally"
context = zmq.Context()
sock = context.socket(zmq.SUB)
sock.connect("%s:%s" % (SUB_IP,SUB_PORT))
sock.setsockopt(zmq.SUBSCRIBE, SUB_TOPIC)
while True:
topic = sock.recv()
message = sock.recv()
bodies = kinect_body_pb2.KinectBodies()
#print message
bodies.ParseFromString(message)
print bodies
except:
print "Exception occured : ", sys.exc_info()
raw_input("Press enter to continue ... ") | src/indriya_robot_interface/KinectStateListener.py | __author__ = 'Praveenkumar'
import os
import zmq
import sys
import time
import argparse
import thread
import json
from naoqi import ALProxy
from os.path import dirname
from os.path import abspath
dev = os.environ["INDRIYA_ROOT"]
dir1 = os.path.join(dev,"scripts","msgs")
sys.path.append(dir1)
import kinect_body_pb2
useSensors = False
if __name__ == "__main__":
# Real Robot
SUB_IP = "tcp://localhost"
SUB_PORT = "5570"
SUB_TOPIC = "KSP"
try:
if (len(sys.argv) >= 3):
print sys.argv
parser = argparse.ArgumentParser(description='Kinect state listener')
parser.add_argument('-p','--param', help='Parameter server address', required=True)
parser.add_argument('-n','--name', help='Name of the node', required=True)
args = vars(parser.parse_args())
name = args["name"]
paramServer = args["param"]
# Utils
import parameter_utils
node = parameter_utils.getNodeParameters(name,paramServer,1000)
if node != None:
print node
sub = parameter_utils.getSubscriberInfo(node,"KinectBodies")
if sub != None:
SUB_IP = sub.host.encode('utf-8')
SUB_PORT = sub.port
SUB_TOPIC = sub.topic.encode('utf-8')
print "Subscriber info retrieved"
else:
print "Start locally"
context = zmq.Context()
sock = context.socket(zmq.SUB)
sock.connect("%s:%s" % (SUB_IP,SUB_PORT))
sock.setsockopt(zmq.SUBSCRIBE, SUB_TOPIC)
while True:
topic = sock.recv()
message = sock.recv()
bodies = kinect_body_pb2.KinectBodies()
#print message
bodies.ParseFromString(message)
print bodies
except:
print "Exception occured : ", sys.exc_info()
raw_input("Press enter to continue ... ") | 0.086925 | 0.037999 |
import json
import requests
# 7
class Organization:
"""`Organization` is for handling activity for an organization.
This class handles registering a license, creating a user within an organization, and
logging in an organization.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations
*Currently incomplete and more endpoints will be added in subsequent updates.*
"""
def __init__(self, license_key: str, server_url: str):
"""[summary]
Arguments:
license_key {str} -- DFX API license key
server_url {str} -- DFX API REST server URL
"""
self.license_key = license_key
self.server_url = server_url
# 705
def registerLicense(self, device_name: str):
"""Register a license given a DFX API license key and a device name
in `string` format, using a REST `post` request.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/register-license
Arguments:
device_name {str} -- Device name
Returns:
{str} -- JSON encoded response
"""
# [ 705, "1.0", "POST", "registerLicense", "/organizations/licenses" ],
values = {
"Key": self.license_key,
"DeviceTypeID": "LINUX",
"Name": device_name,
"Identifier": "DFXCLIENT",
"Version": "1.0.0"
}
values = json.dumps(values)
headers = {'Content-Type': 'application/json'}
uri = self.server_url + '/organizations/licenses'
r = requests.post(uri, data=values, headers=headers)
return r.json()
# 713
def createUser(self, api_token: str, data: dict):
"""Create a user with the current organization given user data using a POST.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/create-user
Arguments:
api_token {str} -- DFX API token
data {dict} -- User data
Returns:
{str} -- JSON encoded response
"""
# [ 713, "1.0", "POST", "createUser", "/organizations/users" ],
# Data format:
# values = """
# {
# "FirstName": "John",
# "LastName": "Appleseed",
# "Email": "<EMAIL>",
# "Password": "<PASSWORD>",
# "Gender": "male",
# "DateOfBirth": "1986-02-10",
# "HeightCm": "180",
# "WeightKg": "70"
# }
values = {}
for key, val in data:
values[key] = str(val)
values = json.dumps(values)
auth = 'Bearer ' + api_token
header = {'Content-Type': 'application/json', 'Authorization': auth}
uri = self.server_url + '/organizations/users'
r = requests.post(uri, data=values, headers=header)
return r.json()
# 717
def login(self, api_token, email, pw, orgID):
"""Login into an organization using a POST.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/login
Arguments:
api_token {str} -- DFX token
email {str} -- Email address
pw {str} -- Password
orgID {str} -- Organisation ID
Returns:
str -- JSON encoded response
"""
# [ 717, "1.0", "POST", "login", "/organizations/auth" ],
values = {"Email": email, "Password": pw, "Identifier": orgID}
auth = 'Bearer ' + api_token
header = {'Content-Type': 'application/json', 'Authorization': auth}
uri = self.server_url + '/organizations/auth'
r = requests.post(uri, data=values, headers=header)
return r.json() | dfxapiclient/organizations.py | import json
import requests
# 7
class Organization:
"""`Organization` is for handling activity for an organization.
This class handles registering a license, creating a user within an organization, and
logging in an organization.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations
*Currently incomplete and more endpoints will be added in subsequent updates.*
"""
def __init__(self, license_key: str, server_url: str):
"""[summary]
Arguments:
license_key {str} -- DFX API license key
server_url {str} -- DFX API REST server URL
"""
self.license_key = license_key
self.server_url = server_url
# 705
def registerLicense(self, device_name: str):
"""Register a license given a DFX API license key and a device name
in `string` format, using a REST `post` request.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/register-license
Arguments:
device_name {str} -- Device name
Returns:
{str} -- JSON encoded response
"""
# [ 705, "1.0", "POST", "registerLicense", "/organizations/licenses" ],
values = {
"Key": self.license_key,
"DeviceTypeID": "LINUX",
"Name": device_name,
"Identifier": "DFXCLIENT",
"Version": "1.0.0"
}
values = json.dumps(values)
headers = {'Content-Type': 'application/json'}
uri = self.server_url + '/organizations/licenses'
r = requests.post(uri, data=values, headers=headers)
return r.json()
# 713
def createUser(self, api_token: str, data: dict):
"""Create a user with the current organization given user data using a POST.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/create-user
Arguments:
api_token {str} -- DFX API token
data {dict} -- User data
Returns:
{str} -- JSON encoded response
"""
# [ 713, "1.0", "POST", "createUser", "/organizations/users" ],
# Data format:
# values = """
# {
# "FirstName": "John",
# "LastName": "Appleseed",
# "Email": "<EMAIL>",
# "Password": "<PASSWORD>",
# "Gender": "male",
# "DateOfBirth": "1986-02-10",
# "HeightCm": "180",
# "WeightKg": "70"
# }
values = {}
for key, val in data:
values[key] = str(val)
values = json.dumps(values)
auth = 'Bearer ' + api_token
header = {'Content-Type': 'application/json', 'Authorization': auth}
uri = self.server_url + '/organizations/users'
r = requests.post(uri, data=values, headers=header)
return r.json()
# 717
def login(self, api_token, email, pw, orgID):
"""Login into an organization using a POST.
https://dfxapiversion10.docs.apiary.io/#reference/0/organizations/login
Arguments:
api_token {str} -- DFX token
email {str} -- Email address
pw {str} -- Password
orgID {str} -- Organisation ID
Returns:
str -- JSON encoded response
"""
# [ 717, "1.0", "POST", "login", "/organizations/auth" ],
values = {"Email": email, "Password": pw, "Identifier": orgID}
auth = 'Bearer ' + api_token
header = {'Content-Type': 'application/json', 'Authorization': auth}
uri = self.server_url + '/organizations/auth'
r = requests.post(uri, data=values, headers=header)
return r.json() | 0.806891 | 0.438725 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from sklearn.preprocessing import StandardScaler
import sklearn.metrics as metrics
import streamlit as st
from bokeh.models.widgets import Div
from PIL import Image
st.set_option('deprecation.showPyplotGlobalUse', False)
model_results = st.sidebar.checkbox('SHOW MODEL RESULTS')
algorithms = ('','DecisionTreeClassifier' ,'LogisticRegression' ,'RandomForestClassifier' ,'CatBoostClassifier' ,'SVMCLassifier')
ml_algorithm = st.sidebar.selectbox("ML ALGORITHM" , algorithms)
st.write("""
# Football Player Ranking Project
""")
st.text("")
image = Image.open('mbappe.png')
st.image(image, use_column_width = True)
st.text("")
df = pd.read_csv("Players.csv")
abc = sns.histplot(data=df, x="overall_rating")
for i in range(df.shape[0]) :
if (df.loc[i, "overall_rating"] >= 42.0) & (df.loc[i , "overall_rating"] < 85.0) :
df.loc[i, "overall_rating"] = 0
elif (df.loc[i, "overall_rating"] >= 85.0) :
df.loc[i, "overall_rating"] = 1
df["overall_rating"] = df["overall_rating"].astype("int64")
df["attacking_work_rate"].fillna("medium", inplace = True)
col_list = ["volleys","curve","agility","balance","jumping","vision","sliding_tackle"]
for item in col_list :
df[item].fillna(round(df[item].mean() , 0) , inplace = True)
selected_columns = ["potential" , "reactions" , "short_passing" , "vision" , "long_passing" , "ball_control" , "shot_power" , "long_shots" , "curve" , "dribbling" , "crossing" , "volleys" , "positioning" , "free_kick_accuracy" , "penalties" ,
"aggression" , "finishing" , "stamina" , "heading_accuracy" , "overall_rating"]
modelling_df = df[selected_columns]
trainData = modelling_df.tail(8678)
testData = modelling_df.head(2170)
X = trainData.drop("overall_rating", 1)
y = trainData.overall_rating
X_test = testData.drop("overall_rating", 1)
y_test = testData.overall_rating
sc = StandardScaler()
X = pd.DataFrame(sc.fit_transform(X))
X_test = pd.DataFrame(sc.transform(X_test))
oversample = SMOTE( random_state=42)
X_train, y_train = oversample.fit_resample(X, y)
chck = pd.DataFrame()
chck['overall_rating'] = y_train
def DTClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Decision Tree Classifier Model Results")
dtc=DecisionTreeClassifier(random_state=42)
dtc.fit(X, y)
preds = dtc.predict(X1)
score = dtc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Decision Tree Classifier Prediction Result")
prediction = dtc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def LRClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Logistic Regression Model Results")
lgr=LogisticRegression(random_state=42 , max_iter = 200)
lgr.fit(X, y)
preds = lgr.predict(X1)
score = lgr.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("Logistic Regression Prediction Result")
prediction = lgr.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def RFClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Random Forest Classifier Model Results")
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X, y)
preds = rfc.predict(X1)
score = rfc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("Random Forest Classifier Prediction Result")
prediction = rfc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def CBClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("CatBoost Classifier Model Results")
cbc=CatBoostClassifier(n_estimators = 200, max_depth = 5, verbose = 0 , random_state=42)
cbc.fit(X, y)
preds = cbc.predict(X1)
score = cbc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("CatBoost Classifier Prediction Result")
prediction = cbc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def SVMClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("SVM Classifier Model Results")
svc = SVC(random_state=42)
svc.fit(X, y)
preds = svc.predict(X1)
score = svc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("SVM CLassifier Prediction Result")
prediction = svc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
if (model_results == False) :
st.subheader("About Data")
st.text("")
st.write("""
##### The Ultimate Soccer database for data analysis and machine learning
What you get:
• +25,000 matches
• +10,000 players
• 11 European Countries with their lead championship
• Seasons 2008 to 2016
• Players and Teams' attributes* sourced from EA Sports' FIFA video game series, including the weekly updates
• Team line up with squad formation (X, Y coordinates)
• Betting odds from up to 10 providers
• Detailed match events (goal types, possession, corner, cross, fouls, cards etc…) for +10,000 matches
""")
st.text("")
if st.button('👉🏻 Data Source 👈🏻'):
js = "window.open('https://www.kaggle.com/hugomathien/soccer')" # New tab or window
js = "window.location.href = 'https://www.kaggle.com/hugomathien/soccer'" # Current tab
html = '<img src onerror="{}">'.format(js)
div = Div(text=html)
st.bokeh_chart(div)
st.text("")
st.text("")
st.text("")
st.subheader("Overall Rating Distribution")
abc
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Overall Rating Correlation Heatmap")
k = 10 #number of variables for heatmap
corrmat = df.corr()
cols = corrmat.nlargest(k, 'overall_rating')['overall_rating'].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
plt.figure(figsize=(20,20))
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 13}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Imbalanced Data Distribution")
plt.figure(figsize=(10,8))
sns.countplot(modelling_df['overall_rating'])
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Oversampled Data Distribution")
plt.figure(figsize=(10,8))
sns.countplot(chck['overall_rating'])
plt.show()
st.pyplot()
elif model_results == True :
potential = st.sidebar.slider(' potential ', 0, 100)
reactions = st.sidebar.slider(' reactions ', 0, 100)
vision = st.sidebar.slider(' vision ', 0, 100)
volleys = st.sidebar.slider(' volleys ', 0, 100)
penalties = st.sidebar.slider(' penalties ', 0, 100)
long_passing = st.sidebar.slider(' long_passing ', 0, 100)
short_passing = st.sidebar.slider(' short_passing ', 0, 100)
ball_control = 50
curve = 50
finishing = 50
free_kick_accuracy = 50
positioning = 50
long_shots = 50
dribbling = 50
crossing = 50
shot_power = 50
aggression = 50
stamina = 50
heading_accuracy = 50
new = [potential,reactions,vision,volleys,penalties,long_passing,short_passing,ball_control,curve,finishing,free_kick_accuracy,positioning,long_shots,dribbling,crossing,shot_power,aggression,stamina,heading_accuracy ]
if (ml_algorithm == 'DecisionTreeClassifier') :
DTClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'LogisticRegression') :
LRClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'RandomForestClassifier') :
RFClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'CatBoostClassifier') :
CBClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'SVMCLassifier') :
SVMClassifier(X_train,y_train,X_test,y_test,new) | Project3_Classification_SQL/FootballPlayerRankingApp/footballPlayerRankingApp.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from sklearn.preprocessing import StandardScaler
import sklearn.metrics as metrics
import streamlit as st
from bokeh.models.widgets import Div
from PIL import Image
st.set_option('deprecation.showPyplotGlobalUse', False)
model_results = st.sidebar.checkbox('SHOW MODEL RESULTS')
algorithms = ('','DecisionTreeClassifier' ,'LogisticRegression' ,'RandomForestClassifier' ,'CatBoostClassifier' ,'SVMCLassifier')
ml_algorithm = st.sidebar.selectbox("ML ALGORITHM" , algorithms)
st.write("""
# Football Player Ranking Project
""")
st.text("")
image = Image.open('mbappe.png')
st.image(image, use_column_width = True)
st.text("")
df = pd.read_csv("Players.csv")
abc = sns.histplot(data=df, x="overall_rating")
for i in range(df.shape[0]) :
if (df.loc[i, "overall_rating"] >= 42.0) & (df.loc[i , "overall_rating"] < 85.0) :
df.loc[i, "overall_rating"] = 0
elif (df.loc[i, "overall_rating"] >= 85.0) :
df.loc[i, "overall_rating"] = 1
df["overall_rating"] = df["overall_rating"].astype("int64")
df["attacking_work_rate"].fillna("medium", inplace = True)
col_list = ["volleys","curve","agility","balance","jumping","vision","sliding_tackle"]
for item in col_list :
df[item].fillna(round(df[item].mean() , 0) , inplace = True)
selected_columns = ["potential" , "reactions" , "short_passing" , "vision" , "long_passing" , "ball_control" , "shot_power" , "long_shots" , "curve" , "dribbling" , "crossing" , "volleys" , "positioning" , "free_kick_accuracy" , "penalties" ,
"aggression" , "finishing" , "stamina" , "heading_accuracy" , "overall_rating"]
modelling_df = df[selected_columns]
trainData = modelling_df.tail(8678)
testData = modelling_df.head(2170)
X = trainData.drop("overall_rating", 1)
y = trainData.overall_rating
X_test = testData.drop("overall_rating", 1)
y_test = testData.overall_rating
sc = StandardScaler()
X = pd.DataFrame(sc.fit_transform(X))
X_test = pd.DataFrame(sc.transform(X_test))
oversample = SMOTE( random_state=42)
X_train, y_train = oversample.fit_resample(X, y)
chck = pd.DataFrame()
chck['overall_rating'] = y_train
def DTClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Decision Tree Classifier Model Results")
dtc=DecisionTreeClassifier(random_state=42)
dtc.fit(X, y)
preds = dtc.predict(X1)
score = dtc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Decision Tree Classifier Prediction Result")
prediction = dtc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def LRClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Logistic Regression Model Results")
lgr=LogisticRegression(random_state=42 , max_iter = 200)
lgr.fit(X, y)
preds = lgr.predict(X1)
score = lgr.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("Logistic Regression Prediction Result")
prediction = lgr.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def RFClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("Random Forest Classifier Model Results")
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X, y)
preds = rfc.predict(X1)
score = rfc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("Random Forest Classifier Prediction Result")
prediction = rfc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def CBClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("CatBoost Classifier Model Results")
cbc=CatBoostClassifier(n_estimators = 200, max_depth = 5, verbose = 0 , random_state=42)
cbc.fit(X, y)
preds = cbc.predict(X1)
score = cbc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("CatBoost Classifier Prediction Result")
prediction = cbc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
def SVMClassifier(X,y,X1,y1,valueList) :
st.text("")
st.text("")
st.subheader("SVM Classifier Model Results")
svc = SVC(random_state=42)
svc.fit(X, y)
preds = svc.predict(X1)
score = svc.score(X1, y1)
st.text('Model Report:\n ' + classification_report(y1, preds))
cm = confusion_matrix(y1, preds)
plt.figure(figsize=(10,8))
sns.heatmap(cm, annot=True)
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
fpr, tpr, threshold = metrics.roc_curve(y1, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('ROC Curve')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
st.pyplot()
st.subheader("SVM CLassifier Prediction Result")
prediction = svc.predict([valueList])
if prediction == 1 :
st.write("A class football player !")
else :
st.write("B class football player !")
if (model_results == False) :
st.subheader("About Data")
st.text("")
st.write("""
##### The Ultimate Soccer database for data analysis and machine learning
What you get:
• +25,000 matches
• +10,000 players
• 11 European Countries with their lead championship
• Seasons 2008 to 2016
• Players and Teams' attributes* sourced from EA Sports' FIFA video game series, including the weekly updates
• Team line up with squad formation (X, Y coordinates)
• Betting odds from up to 10 providers
• Detailed match events (goal types, possession, corner, cross, fouls, cards etc…) for +10,000 matches
""")
st.text("")
if st.button('👉🏻 Data Source 👈🏻'):
js = "window.open('https://www.kaggle.com/hugomathien/soccer')" # New tab or window
js = "window.location.href = 'https://www.kaggle.com/hugomathien/soccer'" # Current tab
html = '<img src onerror="{}">'.format(js)
div = Div(text=html)
st.bokeh_chart(div)
st.text("")
st.text("")
st.text("")
st.subheader("Overall Rating Distribution")
abc
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Overall Rating Correlation Heatmap")
k = 10 #number of variables for heatmap
corrmat = df.corr()
cols = corrmat.nlargest(k, 'overall_rating')['overall_rating'].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
plt.figure(figsize=(20,20))
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 13}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Imbalanced Data Distribution")
plt.figure(figsize=(10,8))
sns.countplot(modelling_df['overall_rating'])
plt.show()
st.pyplot()
st.text("")
st.text("")
st.text("")
st.text("")
st.subheader("Oversampled Data Distribution")
plt.figure(figsize=(10,8))
sns.countplot(chck['overall_rating'])
plt.show()
st.pyplot()
elif model_results == True :
potential = st.sidebar.slider(' potential ', 0, 100)
reactions = st.sidebar.slider(' reactions ', 0, 100)
vision = st.sidebar.slider(' vision ', 0, 100)
volleys = st.sidebar.slider(' volleys ', 0, 100)
penalties = st.sidebar.slider(' penalties ', 0, 100)
long_passing = st.sidebar.slider(' long_passing ', 0, 100)
short_passing = st.sidebar.slider(' short_passing ', 0, 100)
ball_control = 50
curve = 50
finishing = 50
free_kick_accuracy = 50
positioning = 50
long_shots = 50
dribbling = 50
crossing = 50
shot_power = 50
aggression = 50
stamina = 50
heading_accuracy = 50
new = [potential,reactions,vision,volleys,penalties,long_passing,short_passing,ball_control,curve,finishing,free_kick_accuracy,positioning,long_shots,dribbling,crossing,shot_power,aggression,stamina,heading_accuracy ]
if (ml_algorithm == 'DecisionTreeClassifier') :
DTClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'LogisticRegression') :
LRClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'RandomForestClassifier') :
RFClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'CatBoostClassifier') :
CBClassifier(X_train,y_train,X_test,y_test,new)
elif (ml_algorithm == 'SVMCLassifier') :
SVMClassifier(X_train,y_train,X_test,y_test,new) | 0.464173 | 0.194999 |
import poplib
import sys
from importlib import reload
from email.parser import Parser
from email.parser import BytesParser
from email.header import decode_header
from email.utils import parseaddr
import email.iterators
def decode_str(s):
value, charset = decode_header(s)[0]
if charset:
value = value.decode(charset)
return value
def savefile(filename, data, path):
try:
filepath = path + filename
print('Save as: ' + filepath)
f = open(filepath, 'wb')
except:
print(filepath + ' open failed')
else:
f.write(data)
finally
f.close()
def guess_charset(msg):
charset = msg.get_charset()
if charset is None:
content_type = msg.get('Content-Type', '').lower()
pos = content_type.find('charset=')
if pos >= 0:
charset = content_type[pos+8:].strip()
return charset
def print_info(msg):
for header in ['From', 'To', 'Subject']:
value = msg.get(header, '')
if value:
if header == 'Subject':
value = decode_str(value)
else:
hdr, addr = parseaddr(value)
name = decode_str(addr)
value = name + ' < ' + addr + ' > '
print(header + ':' + value)
for part in msg.walk():
filename = part.get_filename()
content_type = part.get_content_type()
charset = guess_charset(part)
if filename:
filename = decode_str(filename)
data = part.get_payload(decode = True)
if filename != None or filename != '':
print('Accessory: ' + filename)
savefile(filename, data, mypath)
else:
email_content_type = ''
content = ''
if content_type == 'text/plain':
email_content_type = 'text'
elif content_type == 'text/html':
email_content_type = 'html'
if charset:
content = part.get_payload(decode=True).decode(charset)
print(email_content_type + ' ' + content)
email = '<EMAIL>'
password = '<PASSWORD>'
pop3_server = 'pop.163.com'
mypath = 'D://email/'
server = poplib.POP3(pop3_server, 110)
server.user(email)
server.pass_(password)
print('Message: %s. Size: %s' % server.stat())
resp, mails, objects = server.list()
index = len(mails)
resp, lines, octets = server.retr(index)
lists = []
for e in lines:
lists.append(e.decode())
msg_content = '\r\n'.join(lists)
msg = Parser().parsestr(msg_content)
print_info(msg)
#server.dele(index)
server.quit()
class POP3Customize(object):
def __init__(self):
self.pop3 = None
def execute(self) -> POP3:
if self.pop3:
return self.pop3
server = poplib.POP3(pop3_server, 110)
server.user(email)
server.pass_(password)
return server
def get_stat_of_mail(self):
return self.pop3.list()
def get_all_info_one_of_mail(self, index: int):
assert type(index) == int and index > 0
return self.pop3.retr(index)
def quit(self):
self.pop3.quit()
self.pop3 = None | spider/common/pop.py | import poplib
import sys
from importlib import reload
from email.parser import Parser
from email.parser import BytesParser
from email.header import decode_header
from email.utils import parseaddr
import email.iterators
def decode_str(s):
value, charset = decode_header(s)[0]
if charset:
value = value.decode(charset)
return value
def savefile(filename, data, path):
try:
filepath = path + filename
print('Save as: ' + filepath)
f = open(filepath, 'wb')
except:
print(filepath + ' open failed')
else:
f.write(data)
finally
f.close()
def guess_charset(msg):
charset = msg.get_charset()
if charset is None:
content_type = msg.get('Content-Type', '').lower()
pos = content_type.find('charset=')
if pos >= 0:
charset = content_type[pos+8:].strip()
return charset
def print_info(msg):
for header in ['From', 'To', 'Subject']:
value = msg.get(header, '')
if value:
if header == 'Subject':
value = decode_str(value)
else:
hdr, addr = parseaddr(value)
name = decode_str(addr)
value = name + ' < ' + addr + ' > '
print(header + ':' + value)
for part in msg.walk():
filename = part.get_filename()
content_type = part.get_content_type()
charset = guess_charset(part)
if filename:
filename = decode_str(filename)
data = part.get_payload(decode = True)
if filename != None or filename != '':
print('Accessory: ' + filename)
savefile(filename, data, mypath)
else:
email_content_type = ''
content = ''
if content_type == 'text/plain':
email_content_type = 'text'
elif content_type == 'text/html':
email_content_type = 'html'
if charset:
content = part.get_payload(decode=True).decode(charset)
print(email_content_type + ' ' + content)
email = '<EMAIL>'
password = '<PASSWORD>'
pop3_server = 'pop.163.com'
mypath = 'D://email/'
server = poplib.POP3(pop3_server, 110)
server.user(email)
server.pass_(password)
print('Message: %s. Size: %s' % server.stat())
resp, mails, objects = server.list()
index = len(mails)
resp, lines, octets = server.retr(index)
lists = []
for e in lines:
lists.append(e.decode())
msg_content = '\r\n'.join(lists)
msg = Parser().parsestr(msg_content)
print_info(msg)
#server.dele(index)
server.quit()
class POP3Customize(object):
def __init__(self):
self.pop3 = None
def execute(self) -> POP3:
if self.pop3:
return self.pop3
server = poplib.POP3(pop3_server, 110)
server.user(email)
server.pass_(password)
return server
def get_stat_of_mail(self):
return self.pop3.list()
def get_all_info_one_of_mail(self, index: int):
assert type(index) == int and index > 0
return self.pop3.retr(index)
def quit(self):
self.pop3.quit()
self.pop3 = None | 0.143698 | 0.078572 |
from rewpapi.common.http import Request
class RemoteListingResidential(Request):
def __init__(self, base_site, auth):
super(RemoteListingResidential, self).__init__(auth)
self._base_site = base_site
self._auth = auth
self._endpoint = base_site + "/api/listings/residential/"
def get_all(self):
"""
Returns a list of Listings
"""
remote_listings = self.execute()
listings = []
if remote_listings:
for a in remote_listings:
new_listing = ListingResidential(self._base_site, self._auth)
new_listing.FIELDS = []
for k, v in a.items():
setattr(new_listing, k, v)
new_listing.FIELDS.append(k)
listings.append(new_listing)
return listings
return None
def get(self, uuid):
"""
Returns a single Listing instance, matching uuid.
Raises a DoesNotExist exception if the object does not exist.
"""
b = ListingResidential()
b.branch_name = "Foo"
return b
class ListingResidential(RemoteListingResidential):
"""
A Listing object represents a Listing. Once instantiated, you can:
- Change its values and send an update()
- Delete it
- Create it if it doesn't exist
"""
def set_fields(self, listing_object):
self.FIELDS = listing_object.FIELDS
for field in listing_object.FIELDS:
setattr(self, field, getattr(listing_object, field))
def update(self):
"""
Update this listing.
"""
self._endpoint = self._base_site + "/api/listings/residential/%s/" % self.uuid
listing_dict = {}
for a in self.FIELDS:
listing_dict[a] = getattr(self, a)
listing_dict['country'] = listing_dict['location']['country']
listing_dict['province'] = listing_dict['location']['province']
listing_dict['area'] = listing_dict['location']['region']
listing_dict['suburb'] = listing_dict['location']['suburb']
del listing_dict['agent']
del listing_dict['location']
del listing_dict['website_url']
del listing_dict['images']
del listing_dict['floorplans']
self.execute("PUT", listing_dict)
def delete(self):
"""
Delete this listing.
"""
pass
def create(self):
"""
Create a new listing.
"""
self._endpoint = self._base_site + "/api/listings/residential/"
listing_dict = {}
for a in self.FIELDS:
listing_dict[a] = getattr(self, a)
listing_dict['country'] = listing_dict['location']['country']
listing_dict['province'] = listing_dict['location']['province']
listing_dict['area'] = listing_dict['location']['region']
listing_dict['suburb'] = listing_dict['location']['suburb']
del listing_dict['agent']
del listing_dict['location']
del listing_dict['website_url']
del listing_dict['images']
del listing_dict['floorplans']
self.execute("POST", listing_dict) | rewpapi/listings/listing.py | from rewpapi.common.http import Request
class RemoteListingResidential(Request):
def __init__(self, base_site, auth):
super(RemoteListingResidential, self).__init__(auth)
self._base_site = base_site
self._auth = auth
self._endpoint = base_site + "/api/listings/residential/"
def get_all(self):
"""
Returns a list of Listings
"""
remote_listings = self.execute()
listings = []
if remote_listings:
for a in remote_listings:
new_listing = ListingResidential(self._base_site, self._auth)
new_listing.FIELDS = []
for k, v in a.items():
setattr(new_listing, k, v)
new_listing.FIELDS.append(k)
listings.append(new_listing)
return listings
return None
def get(self, uuid):
"""
Returns a single Listing instance, matching uuid.
Raises a DoesNotExist exception if the object does not exist.
"""
b = ListingResidential()
b.branch_name = "Foo"
return b
class ListingResidential(RemoteListingResidential):
"""
A Listing object represents a Listing. Once instantiated, you can:
- Change its values and send an update()
- Delete it
- Create it if it doesn't exist
"""
def set_fields(self, listing_object):
self.FIELDS = listing_object.FIELDS
for field in listing_object.FIELDS:
setattr(self, field, getattr(listing_object, field))
def update(self):
"""
Update this listing.
"""
self._endpoint = self._base_site + "/api/listings/residential/%s/" % self.uuid
listing_dict = {}
for a in self.FIELDS:
listing_dict[a] = getattr(self, a)
listing_dict['country'] = listing_dict['location']['country']
listing_dict['province'] = listing_dict['location']['province']
listing_dict['area'] = listing_dict['location']['region']
listing_dict['suburb'] = listing_dict['location']['suburb']
del listing_dict['agent']
del listing_dict['location']
del listing_dict['website_url']
del listing_dict['images']
del listing_dict['floorplans']
self.execute("PUT", listing_dict)
def delete(self):
"""
Delete this listing.
"""
pass
def create(self):
"""
Create a new listing.
"""
self._endpoint = self._base_site + "/api/listings/residential/"
listing_dict = {}
for a in self.FIELDS:
listing_dict[a] = getattr(self, a)
listing_dict['country'] = listing_dict['location']['country']
listing_dict['province'] = listing_dict['location']['province']
listing_dict['area'] = listing_dict['location']['region']
listing_dict['suburb'] = listing_dict['location']['suburb']
del listing_dict['agent']
del listing_dict['location']
del listing_dict['website_url']
del listing_dict['images']
del listing_dict['floorplans']
self.execute("POST", listing_dict) | 0.545286 | 0.110807 |
from collections import namedtuple
import tensorflow as tf
import numpy as np
from mvc.action_output import ActionOutput
from mvc.models.networks.base_network import BaseNetwork
from mvc.models.networks.ddpg import initializer, build_target_update
from mvc.models.networks.ddpg import build_optim
from mvc.parametric_function import deterministic_policy_function
from mvc.parametric_function import q_function
from mvc.misc.assertion import assert_scalar
def build_smoothed_target(policy_tp1, sigma, c):
smoothing_noise = tf.random.normal(policy_tp1.shape[1:], 0.0, sigma)
clipped_noise = tf.clip_by_value(smoothing_noise, -c, c)
return tf.clip_by_value(policy_tp1 + clipped_noise, -1.0, 1.0)
def build_target(rewards_tp1, q1_tp1, q2_tp1, dones_tp1, gamma):
assert_scalar(rewards_tp1)
assert_scalar(q1_tp1)
assert_scalar(q2_tp1)
assert_scalar(dones_tp1)
q_tp1 = tf.minimum(q1_tp1, q2_tp1)
target = rewards_tp1 + gamma * q_tp1 * (1.0 - dones_tp1)
return tf.stop_gradient(target)
def build_critic_loss(q1_t, q2_t, target):
q1_loss = tf.reduce_mean(tf.square(target - q1_t))
q2_loss = tf.reduce_mean(tf.square(target - q2_t))
return q1_loss + q2_loss
def build_actor_loss(q1_t, q2_t):
assert_scalar(q1_t)
assert_scalar(q2_t)
q_t = tf.minimum(q1_t, q2_t)
loss = tf.reduce_mean(q_t)
return loss
TD3NetworkParams = namedtuple(
'TD3NetworkParams', ('fcs', 'concat_index', 'state_shape', 'num_actions',
'gamma', 'tau', 'actor_lr', 'critic_lr',
'target_noise_sigma', 'target_noise_clip'))
last_initializer = tf.random_uniform_initializer(-3e-3, 3e-3)
def _q_function(params, obs, action, scope):
return q_function(params.fcs, obs, action, params.concat_index,
tf.nn.tanh, w_init=initializer,
last_w_init=last_initializer,
last_b_init=last_initializer, scope=scope)
def _policy_function(params, obs, scope):
return deterministic_policy_function(
params.fcs, obs, params.num_actions, tf.nn.tanh, w_init=initializer,
last_w_init=last_initializer, last_b_init=last_initializer,
scope=scope)
class TD3Network(BaseNetwork):
def __init__(self, params):
self._build(params)
def _infer(self, **kwargs):
feed_dict = {
self.obs_t_ph: np.array([kwargs['obs_t']])
}
sess = tf.get_default_session()
ops = [self.action, self.value]
action, value = sess.run(ops, feed_dict=feed_dict)
return ActionOutput(action=action[0], log_prob=None, value=value[0])
def _update(self, **kwargs):
sess = tf.get_default_session()
# critic update
critic_feed_dict = {
self.obs_t_ph: kwargs['obs_t'],
self.actions_t_ph: kwargs['actions_t'],
self.rewards_tp1_ph: kwargs['rewards_tp1'],
self.obs_tp1_ph: kwargs['obs_tp1'],
self.dones_tp1_ph: kwargs['dones_tp1']
}
critic_ops = [self.critic_loss, self.critic_optimize_expr]
critic_loss, _ = sess.run(critic_ops, feed_dict=critic_feed_dict)
# actor update (delayed policy update)
if kwargs['update_actor']:
actor_feed_dict = {
self.obs_t_ph: kwargs['obs_t']
}
actor_ops = [self.actor_loss, self.actor_optimize_expr]
actor_loss, _ = sess.run(actor_ops, feed_dict=actor_feed_dict)
# target update
sess.run([self.update_target_critic, self.update_target_actor])
else:
actor_loss = None
return critic_loss, actor_loss
def _build(self, params):
with tf.variable_scope('td3', reuse=tf.AUTO_REUSE):
self.obs_t_ph = tf.placeholder(
tf.float32, [None] + list(params.state_shape), name='obs_t')
self.actions_t_ph = tf.placeholder(
tf.float32, [None, params.num_actions], name='actions_t')
self.rewards_tp1_ph = tf.placeholder(
tf.float32, [None], name='rewards_tp1')
self.obs_tp1_ph = tf.placeholder(
tf.float32, [None] + list(params.state_shape), name='obs_tp1')
self.dones_tp1_ph = tf.placeholder(
tf.float32, [None], name='dones_tp1')
# policy function
raw_policy_t = _policy_function(params, self.obs_t_ph, 'actor')
policy_t = tf.nn.tanh(raw_policy_t)
# target policy function
raw_policy_tp1 = _policy_function(params, self.obs_tp1_ph,
'target_actor')
policy_tp1 = tf.nn.tanh(raw_policy_tp1)
# target policy smoothing reguralization
smoothed_policy_tp1 = build_smoothed_target(
policy_tp1, params.target_noise_sigma,
params.target_noise_clip)
# first critic
q1_t = _q_function(
params, self.obs_t_ph, self.actions_t_ph, 'critic/1')
q1_t_with_actor = _q_function(
params, self.obs_t_ph, policy_t, 'critic/1')
# first target critic
q1_tp1 = _q_function(params, self.obs_tp1_ph, smoothed_policy_tp1,
'target_critic/1')
# second critic
q2_t = _q_function(
params, self.obs_t_ph, self.actions_t_ph, 'critic/2')
q2_t_with_actor = _q_function(
params, self.obs_t_ph, policy_t, 'critic/2')
# second target critic
q2_tp1 = _q_function(params, self.obs_tp1_ph, smoothed_policy_tp1,
'target_critic/2')
# prepare for loss calculation
rewards_tp1 = tf.reshape(self.rewards_tp1_ph, [-1, 1])
dones_tp1 = tf.reshape(self.dones_tp1_ph, [-1, 1])
# critic loss
target = build_target(
rewards_tp1, q1_tp1, q2_tp1, dones_tp1, params.gamma)
self.critic_loss = build_critic_loss(q1_t, q2_t, target)
# actor loss
self.actor_loss = -build_actor_loss(
q1_t_with_actor, q2_t_with_actor)
# target update
self.update_target_critic = build_target_update(
'td3/critic', 'td3/target_critic', params.tau)
self.update_target_actor = build_target_update(
'td3/actor', 'td3/target_actor', params.tau)
# optimization
self.critic_optimize_expr = build_optim(
self.critic_loss, params.critic_lr, 'td3/critic')
self.actor_optimize_expr = build_optim(
self.actor_loss, params.actor_lr, 'td3/actor')
# action
self.action = policy_t
self.value = tf.reshape(q1_t_with_actor, [-1])
def _infer_arguments(self):
return ['obs_t']
def _update_arguments(self):
return [
'obs_t', 'actions_t', 'rewards_tp1', 'obs_tp1', 'dones_tp1',
'update_actor'
] | mvc/models/networks/td3.py | from collections import namedtuple
import tensorflow as tf
import numpy as np
from mvc.action_output import ActionOutput
from mvc.models.networks.base_network import BaseNetwork
from mvc.models.networks.ddpg import initializer, build_target_update
from mvc.models.networks.ddpg import build_optim
from mvc.parametric_function import deterministic_policy_function
from mvc.parametric_function import q_function
from mvc.misc.assertion import assert_scalar
def build_smoothed_target(policy_tp1, sigma, c):
smoothing_noise = tf.random.normal(policy_tp1.shape[1:], 0.0, sigma)
clipped_noise = tf.clip_by_value(smoothing_noise, -c, c)
return tf.clip_by_value(policy_tp1 + clipped_noise, -1.0, 1.0)
def build_target(rewards_tp1, q1_tp1, q2_tp1, dones_tp1, gamma):
assert_scalar(rewards_tp1)
assert_scalar(q1_tp1)
assert_scalar(q2_tp1)
assert_scalar(dones_tp1)
q_tp1 = tf.minimum(q1_tp1, q2_tp1)
target = rewards_tp1 + gamma * q_tp1 * (1.0 - dones_tp1)
return tf.stop_gradient(target)
def build_critic_loss(q1_t, q2_t, target):
q1_loss = tf.reduce_mean(tf.square(target - q1_t))
q2_loss = tf.reduce_mean(tf.square(target - q2_t))
return q1_loss + q2_loss
def build_actor_loss(q1_t, q2_t):
assert_scalar(q1_t)
assert_scalar(q2_t)
q_t = tf.minimum(q1_t, q2_t)
loss = tf.reduce_mean(q_t)
return loss
TD3NetworkParams = namedtuple(
'TD3NetworkParams', ('fcs', 'concat_index', 'state_shape', 'num_actions',
'gamma', 'tau', 'actor_lr', 'critic_lr',
'target_noise_sigma', 'target_noise_clip'))
last_initializer = tf.random_uniform_initializer(-3e-3, 3e-3)
def _q_function(params, obs, action, scope):
return q_function(params.fcs, obs, action, params.concat_index,
tf.nn.tanh, w_init=initializer,
last_w_init=last_initializer,
last_b_init=last_initializer, scope=scope)
def _policy_function(params, obs, scope):
return deterministic_policy_function(
params.fcs, obs, params.num_actions, tf.nn.tanh, w_init=initializer,
last_w_init=last_initializer, last_b_init=last_initializer,
scope=scope)
class TD3Network(BaseNetwork):
def __init__(self, params):
self._build(params)
def _infer(self, **kwargs):
feed_dict = {
self.obs_t_ph: np.array([kwargs['obs_t']])
}
sess = tf.get_default_session()
ops = [self.action, self.value]
action, value = sess.run(ops, feed_dict=feed_dict)
return ActionOutput(action=action[0], log_prob=None, value=value[0])
def _update(self, **kwargs):
sess = tf.get_default_session()
# critic update
critic_feed_dict = {
self.obs_t_ph: kwargs['obs_t'],
self.actions_t_ph: kwargs['actions_t'],
self.rewards_tp1_ph: kwargs['rewards_tp1'],
self.obs_tp1_ph: kwargs['obs_tp1'],
self.dones_tp1_ph: kwargs['dones_tp1']
}
critic_ops = [self.critic_loss, self.critic_optimize_expr]
critic_loss, _ = sess.run(critic_ops, feed_dict=critic_feed_dict)
# actor update (delayed policy update)
if kwargs['update_actor']:
actor_feed_dict = {
self.obs_t_ph: kwargs['obs_t']
}
actor_ops = [self.actor_loss, self.actor_optimize_expr]
actor_loss, _ = sess.run(actor_ops, feed_dict=actor_feed_dict)
# target update
sess.run([self.update_target_critic, self.update_target_actor])
else:
actor_loss = None
return critic_loss, actor_loss
def _build(self, params):
with tf.variable_scope('td3', reuse=tf.AUTO_REUSE):
self.obs_t_ph = tf.placeholder(
tf.float32, [None] + list(params.state_shape), name='obs_t')
self.actions_t_ph = tf.placeholder(
tf.float32, [None, params.num_actions], name='actions_t')
self.rewards_tp1_ph = tf.placeholder(
tf.float32, [None], name='rewards_tp1')
self.obs_tp1_ph = tf.placeholder(
tf.float32, [None] + list(params.state_shape), name='obs_tp1')
self.dones_tp1_ph = tf.placeholder(
tf.float32, [None], name='dones_tp1')
# policy function
raw_policy_t = _policy_function(params, self.obs_t_ph, 'actor')
policy_t = tf.nn.tanh(raw_policy_t)
# target policy function
raw_policy_tp1 = _policy_function(params, self.obs_tp1_ph,
'target_actor')
policy_tp1 = tf.nn.tanh(raw_policy_tp1)
# target policy smoothing reguralization
smoothed_policy_tp1 = build_smoothed_target(
policy_tp1, params.target_noise_sigma,
params.target_noise_clip)
# first critic
q1_t = _q_function(
params, self.obs_t_ph, self.actions_t_ph, 'critic/1')
q1_t_with_actor = _q_function(
params, self.obs_t_ph, policy_t, 'critic/1')
# first target critic
q1_tp1 = _q_function(params, self.obs_tp1_ph, smoothed_policy_tp1,
'target_critic/1')
# second critic
q2_t = _q_function(
params, self.obs_t_ph, self.actions_t_ph, 'critic/2')
q2_t_with_actor = _q_function(
params, self.obs_t_ph, policy_t, 'critic/2')
# second target critic
q2_tp1 = _q_function(params, self.obs_tp1_ph, smoothed_policy_tp1,
'target_critic/2')
# prepare for loss calculation
rewards_tp1 = tf.reshape(self.rewards_tp1_ph, [-1, 1])
dones_tp1 = tf.reshape(self.dones_tp1_ph, [-1, 1])
# critic loss
target = build_target(
rewards_tp1, q1_tp1, q2_tp1, dones_tp1, params.gamma)
self.critic_loss = build_critic_loss(q1_t, q2_t, target)
# actor loss
self.actor_loss = -build_actor_loss(
q1_t_with_actor, q2_t_with_actor)
# target update
self.update_target_critic = build_target_update(
'td3/critic', 'td3/target_critic', params.tau)
self.update_target_actor = build_target_update(
'td3/actor', 'td3/target_actor', params.tau)
# optimization
self.critic_optimize_expr = build_optim(
self.critic_loss, params.critic_lr, 'td3/critic')
self.actor_optimize_expr = build_optim(
self.actor_loss, params.actor_lr, 'td3/actor')
# action
self.action = policy_t
self.value = tf.reshape(q1_t_with_actor, [-1])
def _infer_arguments(self):
return ['obs_t']
def _update_arguments(self):
return [
'obs_t', 'actions_t', 'rewards_tp1', 'obs_tp1', 'dones_tp1',
'update_actor'
] | 0.923605 | 0.444384 |
PupDB_FILENAME = "SVTB-DB.json_db"
PupDB_MRTkey = "MostRecentTweet"
PupDB_MRLkey = "MostRecentRiverLevel"
PupDB_MRFkey = "MostRecentForecastLevel"
PupDB_ACTIONkey = "CurrentFloodingActionLevel"
HIGHEST_TAG = "Highest Observation:"
LATEST_TAG = "Latest observed"
FORECAST_TAG = "Highest Forecast:"
OBSERVATION_TAGS = [LATEST_TAG, HIGHEST_TAG, FORECAST_TAG]
GUAGE_URL_KEY = "guage_URL"
GUAGE_MILEMARKER_KEY = "milemarker"
GUAGE_ELEVATION_KEY = "guage_elevation"
GUAGE_NAME_KEY = "Friendly_Name"
ACTION_LABELS = [
"No Flooding",
"First-action",
"Minor-flood",
"Moderate-flood",
"Major-flood",
]
MINIMUM_CONCERN_LEVEL = 30
TWEET_FREQUENCY = [
18000,
9000,
8000,
7000,
6000,
5000,
4000,
3600,
] # delay time in seconds
# Time between tweets decreases as flooding increases
# ACTION_LEVELS = [21, 23, 30, 38]
# ACTION_DICT = dict(zip(ACTION_LEVELS, ACTION_LABELS))
LOCATION_OF_INTEREST = 584 # river mile marker @ Bushman's Lake
NWS_website_baseaddress = (
"https://water.weather.gov/"
"/ahps2/river.php?wfo=lmk&wfoid=18699&"
"riverid=204624&pt%5B%5D="
)
NWS_website_tailaddress = (
"&pt%5B%5D=144523&allpoints=150960&data%5B%5D=obs&data%5B%5D=xml"
)
# river guage ID is inserted between these strings
NWS_OHIO_RIVER_ID = "204624"
MARKLAND_DAM_URL = "https://water.weather.gov/ahps2/hydrograph.php?wfo=iln&gage=mklk2"
MARKLAND_DAM_NAME = "Markland"
MARKLAND_DAM_LOWER_GUAGE_ID = "144523"
MARKLAND_GUAGE_XML_URL = (
f"{NWS_website_baseaddress}{MARKLAND_DAM_LOWER_GUAGE_ID}{NWS_website_tailaddress}"
)
MCALPINE_DAM_NAME = "McAlpine"
MCALPINE_DAM_UPPER_GUAGE_ID = "142935"
MCALPINE_GUAGE_XML_URL = (
f"{NWS_website_baseaddress}{MCALPINE_DAM_UPPER_GUAGE_ID}{NWS_website_tailaddress}"
)
MCALPINE_DAM_DETAILS = { # some of these values can be used to test and verify data scraped from website
GUAGE_NAME_KEY: "McAlpine Dam Upper Guage",
GUAGE_URL_KEY: MCALPINE_GUAGE_XML_URL,
GUAGE_MILEMARKER_KEY: 606.8,
GUAGE_ELEVATION_KEY: 407.18,
ACTION_LABELS[0]: 21,
ACTION_LABELS[1]: 23,
ACTION_LABELS[2]: 30,
ACTION_LABELS[3]: 38,
}
MARKLAND_DAM_DETAILS = {
GUAGE_NAME_KEY: "Markland Dam Lower Guage",
GUAGE_URL_KEY: MARKLAND_GUAGE_XML_URL,
GUAGE_MILEMARKER_KEY: 531,
GUAGE_ELEVATION_KEY: 408,
ACTION_LABELS[0]: 49,
ACTION_LABELS[1]: 51,
ACTION_LABELS[2]: 62,
ACTION_LABELS[3]: 74,
}
RIVER_MONITORING_POINTS = {
MCALPINE_DAM_NAME: MCALPINE_DAM_DETAILS,
MARKLAND_DAM_NAME: MARKLAND_DAM_DETAILS,
}
RIVER_GUAGES = list(RIVER_MONITORING_POINTS.keys())
if __name__ == "__main__":
for guage in RIVER_GUAGES:
print()
print(guage)
print(RIVER_MONITORING_POINTS[guage])
print(f'Total guages known: {len(RIVER_GUAGES)}') | RiverGuages.py | PupDB_FILENAME = "SVTB-DB.json_db"
PupDB_MRTkey = "MostRecentTweet"
PupDB_MRLkey = "MostRecentRiverLevel"
PupDB_MRFkey = "MostRecentForecastLevel"
PupDB_ACTIONkey = "CurrentFloodingActionLevel"
HIGHEST_TAG = "Highest Observation:"
LATEST_TAG = "Latest observed"
FORECAST_TAG = "Highest Forecast:"
OBSERVATION_TAGS = [LATEST_TAG, HIGHEST_TAG, FORECAST_TAG]
GUAGE_URL_KEY = "guage_URL"
GUAGE_MILEMARKER_KEY = "milemarker"
GUAGE_ELEVATION_KEY = "guage_elevation"
GUAGE_NAME_KEY = "Friendly_Name"
ACTION_LABELS = [
"No Flooding",
"First-action",
"Minor-flood",
"Moderate-flood",
"Major-flood",
]
MINIMUM_CONCERN_LEVEL = 30
TWEET_FREQUENCY = [
18000,
9000,
8000,
7000,
6000,
5000,
4000,
3600,
] # delay time in seconds
# Time between tweets decreases as flooding increases
# ACTION_LEVELS = [21, 23, 30, 38]
# ACTION_DICT = dict(zip(ACTION_LEVELS, ACTION_LABELS))
LOCATION_OF_INTEREST = 584 # river mile marker @ Bushman's Lake
NWS_website_baseaddress = (
"https://water.weather.gov/"
"/ahps2/river.php?wfo=lmk&wfoid=18699&"
"riverid=204624&pt%5B%5D="
)
NWS_website_tailaddress = (
"&pt%5B%5D=144523&allpoints=150960&data%5B%5D=obs&data%5B%5D=xml"
)
# river guage ID is inserted between these strings
NWS_OHIO_RIVER_ID = "204624"
MARKLAND_DAM_URL = "https://water.weather.gov/ahps2/hydrograph.php?wfo=iln&gage=mklk2"
MARKLAND_DAM_NAME = "Markland"
MARKLAND_DAM_LOWER_GUAGE_ID = "144523"
MARKLAND_GUAGE_XML_URL = (
f"{NWS_website_baseaddress}{MARKLAND_DAM_LOWER_GUAGE_ID}{NWS_website_tailaddress}"
)
MCALPINE_DAM_NAME = "McAlpine"
MCALPINE_DAM_UPPER_GUAGE_ID = "142935"
MCALPINE_GUAGE_XML_URL = (
f"{NWS_website_baseaddress}{MCALPINE_DAM_UPPER_GUAGE_ID}{NWS_website_tailaddress}"
)
MCALPINE_DAM_DETAILS = { # some of these values can be used to test and verify data scraped from website
GUAGE_NAME_KEY: "McAlpine Dam Upper Guage",
GUAGE_URL_KEY: MCALPINE_GUAGE_XML_URL,
GUAGE_MILEMARKER_KEY: 606.8,
GUAGE_ELEVATION_KEY: 407.18,
ACTION_LABELS[0]: 21,
ACTION_LABELS[1]: 23,
ACTION_LABELS[2]: 30,
ACTION_LABELS[3]: 38,
}
MARKLAND_DAM_DETAILS = {
GUAGE_NAME_KEY: "Markland Dam Lower Guage",
GUAGE_URL_KEY: MARKLAND_GUAGE_XML_URL,
GUAGE_MILEMARKER_KEY: 531,
GUAGE_ELEVATION_KEY: 408,
ACTION_LABELS[0]: 49,
ACTION_LABELS[1]: 51,
ACTION_LABELS[2]: 62,
ACTION_LABELS[3]: 74,
}
RIVER_MONITORING_POINTS = {
MCALPINE_DAM_NAME: MCALPINE_DAM_DETAILS,
MARKLAND_DAM_NAME: MARKLAND_DAM_DETAILS,
}
RIVER_GUAGES = list(RIVER_MONITORING_POINTS.keys())
if __name__ == "__main__":
for guage in RIVER_GUAGES:
print()
print(guage)
print(RIVER_MONITORING_POINTS[guage])
print(f'Total guages known: {len(RIVER_GUAGES)}') | 0.443118 | 0.109921 |
from __future__ import annotations
from typing import Any, Iterable, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .odm.basefield import Field
class Key:
def __init__(self, project: str, kind: str, namespace: Optional[str] = None, id: Optional[int] = None, name: Optional[str] = None) -> None:
self.kind = kind
self.project = project
self.namespace = namespace
self.partial = True
self._backup_id = None
self._backup_id_type = None
try:
self._complete(id, name)
except ValueError:
self.id = None
self.id_type = None
def _backup(self) -> None:
self._backup_id = self.id
self._backup_id_type = self.id_type
def _rollback(self) -> None:
if self._backup_id is not None:
self.id = self._backup_id
if self._backup_id_type is not None:
self.id_type = self._backup_id_type
def _clear_backup(self) -> None:
self._backup_id = None
self._backup_id_type = None
@property
def _entity(self) -> dict:
partition = {"projectId": self.project, "namespaceId": self.namespace}
path = [{"kind": self.kind}]
if self.id_type is not None:
path[0].update({self.id_type: self.id})
return {"partitionId": partition, "path": path}
@classmethod
def _from_entity(cls, entity: dict) -> Key:
partition = entity.get("partitionId")
path = entity.get("path")[0]
return cls(project=partition.get("projectId"), namespace=partition.get("namespaceId"), id=path.get("id"), name=path.get("name"),
kind=path.get("kind"))
def _complete(self, id: Optional[int] = None, name: Optional[str] = None) -> None:
if not self.partial:
raise TypeError("Key is not partial. Only partial key can be completed")
if not (id is None or name is None) or (id is None and name is None):
raise ValueError("Either \"id\" or \"name\" should be specified to complete a Key")
if id is not None:
self.id = id
self.id_type = "id"
else:
self.id = name
self.id_type = "name"
def _uncomplete(self) -> None:
self.id = None
self.id_type = None
self.partial = True
class Array(list):
def __init__(self, content: Field, iterable: Iterable) -> None:
self._content = content
super().__init__(self._content._mold(value) for value in iterable)
def __setitem__(self, key: int, value: Any) -> None:
super().__setitem__(key, self._content._mold(value))
def append(self, value: Any) -> None:
super().append(self._content._mold(value))
def extend(self, iterable: Iterable) -> None:
super().extend(self._content._mold(value) for value in iterable)
def insert(self, index: int, value: Any) -> None:
super().insert(index, self._content._mold(value))
class Location:
def __init__(self, latitude: float, longitude: float) -> None:
self.latitude = latitude
self.longitude = longitude | datatypes.py | from __future__ import annotations
from typing import Any, Iterable, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .odm.basefield import Field
class Key:
def __init__(self, project: str, kind: str, namespace: Optional[str] = None, id: Optional[int] = None, name: Optional[str] = None) -> None:
self.kind = kind
self.project = project
self.namespace = namespace
self.partial = True
self._backup_id = None
self._backup_id_type = None
try:
self._complete(id, name)
except ValueError:
self.id = None
self.id_type = None
def _backup(self) -> None:
self._backup_id = self.id
self._backup_id_type = self.id_type
def _rollback(self) -> None:
if self._backup_id is not None:
self.id = self._backup_id
if self._backup_id_type is not None:
self.id_type = self._backup_id_type
def _clear_backup(self) -> None:
self._backup_id = None
self._backup_id_type = None
@property
def _entity(self) -> dict:
partition = {"projectId": self.project, "namespaceId": self.namespace}
path = [{"kind": self.kind}]
if self.id_type is not None:
path[0].update({self.id_type: self.id})
return {"partitionId": partition, "path": path}
@classmethod
def _from_entity(cls, entity: dict) -> Key:
partition = entity.get("partitionId")
path = entity.get("path")[0]
return cls(project=partition.get("projectId"), namespace=partition.get("namespaceId"), id=path.get("id"), name=path.get("name"),
kind=path.get("kind"))
def _complete(self, id: Optional[int] = None, name: Optional[str] = None) -> None:
if not self.partial:
raise TypeError("Key is not partial. Only partial key can be completed")
if not (id is None or name is None) or (id is None and name is None):
raise ValueError("Either \"id\" or \"name\" should be specified to complete a Key")
if id is not None:
self.id = id
self.id_type = "id"
else:
self.id = name
self.id_type = "name"
def _uncomplete(self) -> None:
self.id = None
self.id_type = None
self.partial = True
class Array(list):
def __init__(self, content: Field, iterable: Iterable) -> None:
self._content = content
super().__init__(self._content._mold(value) for value in iterable)
def __setitem__(self, key: int, value: Any) -> None:
super().__setitem__(key, self._content._mold(value))
def append(self, value: Any) -> None:
super().append(self._content._mold(value))
def extend(self, iterable: Iterable) -> None:
super().extend(self._content._mold(value) for value in iterable)
def insert(self, index: int, value: Any) -> None:
super().insert(index, self._content._mold(value))
class Location:
def __init__(self, latitude: float, longitude: float) -> None:
self.latitude = latitude
self.longitude = longitude | 0.889433 | 0.121712 |
import numpy
import numpy.matlib
import style
import matplotlib.pyplot
WATER_DENSITY = 1000
GRAVITATIONAL_ACCELERATION = 9.8
class Beam:
'''
Get natural frequencies and shapes of modes
'''
def __init__(self, node_number, mass_ratio, top_tension,
length, diameter, bending_stiffness, horizontal, tension_include_boyancy):
self._node_number = node_number
self._mass_ratio = mass_ratio
self._top_tension = top_tension
self._horizontal = horizontal
self._tension_include_boyancy = tension_include_boyancy
self._length = length
self._diameter = diameter
self._bending_stiffness = bending_stiffness
self._calculate_modes()
def _calculate_modes(self):
'''
To obtain the modal natural frequency
'''
# element length
self._node_spacing = self._length / (self._node_number - 1)
l = self._node_spacing
heights = numpy.linspace(
self._length, 0, num=self._node_number, endpoint=True)
heights = (heights[:-1] + heights[1:]) / 2
water_density_per_length = numpy.pi * self._diameter**2 / 4 * WATER_DENSITY
density_minus_water = (self._mass_ratio - 1) * water_density_per_length
density = self._mass_ratio * water_density_per_length
n_dof = self._node_number * 2
# mass matrix
M = numpy.matlib.zeros((n_dof, n_dof))
# spngness matrix plus
K = numpy.matlib.zeros((n_dof, n_dof))
ke = numpy.matrix([
[12, 6 * l, -12, 6 * l], [6 * l, 4 * l**2, -6 * l, 2 * l**2],
[-12, -6 * l, 12, -6 * l], [6 * l, 2 * l**2, -6 * l, 4 * l**2]
]) * self._bending_stiffness / l**3
me = numpy.matrix([
[156, 22 * l, 54, -13 * l],
[22 * l, 4 * l**2, 13 * l, -3 * l**2],
[54, 13 * l, 156, -22 * l],
[-13 * l, -3 * l**2, -22 * l, 4 * l**2],
]) * density * l / 420
# geometric spngness matrix element
kge_ = numpy.matrix(
[[36, 3 * l, -36, 3 * l], [3 * l, 4 * l**2, -3 * l, -l**2],
[-36, -3 * l, 36, -3 * l], [3 * l, -l**2, -3 * l, 4 * l**2]])
for i, height in enumerate(heights):
T = self._top_tension
if self._horizontal is False:
if self._tension_include_boyancy:
T = self._top_tension - density_minus_water * \
GRAVITATIONAL_ACCELERATION * height
else:
T = self._top_tension - density * \
GRAVITATIONAL_ACCELERATION * height
kge = kge_ * T / l / 30
S = numpy.matlib.zeros((4, 2 * self._node_number))
for j in range(4):
S[j, 2 * i + j] = 1
K += S.T * (ke + kge) * S
M += S.T * me * S
self.K = K[1:, 1:]
self.K = numpy.delete(self.K, numpy.s_[-2], 0)
self.K = numpy.delete(self.K, numpy.s_[-2], 1)
self.M = M[1:, 1:]
self.M = numpy.delete(self.M, numpy.s_[-2], 0)
self.M = numpy.delete(self.M, numpy.s_[-2], 1)
invM = numpy.linalg.inv(self.M)
invMK = invM * self.K
w, shapes = numpy.linalg.eig(invMK)
zero_r = numpy.zeros((1, shapes.shape[1]))
shapes = numpy.r_[zero_r, shapes[:-1, :], zero_r, shapes[-1, :]]
order = numpy.argsort(w)
w = numpy.sqrt(w[order].real)
shapes = shapes[:, order]
# python slice from 0
# so the first one must be included
shapes = shapes[2:-3:2, :]
shapes = shapes[:, :shapes.shape[0]]
shapes_max = numpy.amax(numpy.abs(shapes), axis=0)
zero_r = numpy.zeros((1, shapes.shape[1]))
shapes = numpy.r_[zero_r, shapes, zero_r]
self._shapes = shapes / shapes_max
if self._shapes[self._shapes.shape[0]//2, 0] < 0:
self._shapes *= -1
self._natural_frequencies = w / numpy.pi / 2
@property
def shapes(self):
'''
Modal shapes of forces [mode_number, node_number].
'''
return self._shapes
@property
def natural_frequencies(self):
'''
Natural frequencies of forces (rank-1 array).
'''
return self._natural_frequencies
@property
def diameter(self):
'''
Diameter of the beam.
'''
return self._diameter
@property
def node_number(self):
'''
Node number of the beam.
'''
return self._node_number
@property
def node_spacing(self):
'''
Node spacing of the beam.
'''
return self._node_spacing
def plot_modal_shapes(self, out_filenames, max_order):
'''
Plot modal shapes up to max_order.
Parameters
----------
out_filenames : string list.
A list of filenames for figure saving.
max_order : int.
The max order up to which the figure is plotted.
'''
print('plot_modal_shapes')
matplotlib.pyplot.clf()
matplotlib.pyplot.gcf().set_size_inches(
style.SINGLE_COLUMN_WIDTH, style.SINGLE_COLUMN_SHORT_HEIGHT)
matplotlib.pyplot.xlabel(r'$z\cdot L^{-1}$')
matplotlib.pyplot.ylabel(r'$\phi^m$')
matplotlib.pyplot.grid()
z = numpy.linspace(0, 1, num=self._node_number)
matplotlib.pyplot.plot(z, self.shapes[:, :max_order])
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
matplotlib.pyplot.savefig(out_filename) | Beam.py | import numpy
import numpy.matlib
import style
import matplotlib.pyplot
WATER_DENSITY = 1000
GRAVITATIONAL_ACCELERATION = 9.8
class Beam:
'''
Get natural frequencies and shapes of modes
'''
def __init__(self, node_number, mass_ratio, top_tension,
length, diameter, bending_stiffness, horizontal, tension_include_boyancy):
self._node_number = node_number
self._mass_ratio = mass_ratio
self._top_tension = top_tension
self._horizontal = horizontal
self._tension_include_boyancy = tension_include_boyancy
self._length = length
self._diameter = diameter
self._bending_stiffness = bending_stiffness
self._calculate_modes()
def _calculate_modes(self):
'''
To obtain the modal natural frequency
'''
# element length
self._node_spacing = self._length / (self._node_number - 1)
l = self._node_spacing
heights = numpy.linspace(
self._length, 0, num=self._node_number, endpoint=True)
heights = (heights[:-1] + heights[1:]) / 2
water_density_per_length = numpy.pi * self._diameter**2 / 4 * WATER_DENSITY
density_minus_water = (self._mass_ratio - 1) * water_density_per_length
density = self._mass_ratio * water_density_per_length
n_dof = self._node_number * 2
# mass matrix
M = numpy.matlib.zeros((n_dof, n_dof))
# spngness matrix plus
K = numpy.matlib.zeros((n_dof, n_dof))
ke = numpy.matrix([
[12, 6 * l, -12, 6 * l], [6 * l, 4 * l**2, -6 * l, 2 * l**2],
[-12, -6 * l, 12, -6 * l], [6 * l, 2 * l**2, -6 * l, 4 * l**2]
]) * self._bending_stiffness / l**3
me = numpy.matrix([
[156, 22 * l, 54, -13 * l],
[22 * l, 4 * l**2, 13 * l, -3 * l**2],
[54, 13 * l, 156, -22 * l],
[-13 * l, -3 * l**2, -22 * l, 4 * l**2],
]) * density * l / 420
# geometric spngness matrix element
kge_ = numpy.matrix(
[[36, 3 * l, -36, 3 * l], [3 * l, 4 * l**2, -3 * l, -l**2],
[-36, -3 * l, 36, -3 * l], [3 * l, -l**2, -3 * l, 4 * l**2]])
for i, height in enumerate(heights):
T = self._top_tension
if self._horizontal is False:
if self._tension_include_boyancy:
T = self._top_tension - density_minus_water * \
GRAVITATIONAL_ACCELERATION * height
else:
T = self._top_tension - density * \
GRAVITATIONAL_ACCELERATION * height
kge = kge_ * T / l / 30
S = numpy.matlib.zeros((4, 2 * self._node_number))
for j in range(4):
S[j, 2 * i + j] = 1
K += S.T * (ke + kge) * S
M += S.T * me * S
self.K = K[1:, 1:]
self.K = numpy.delete(self.K, numpy.s_[-2], 0)
self.K = numpy.delete(self.K, numpy.s_[-2], 1)
self.M = M[1:, 1:]
self.M = numpy.delete(self.M, numpy.s_[-2], 0)
self.M = numpy.delete(self.M, numpy.s_[-2], 1)
invM = numpy.linalg.inv(self.M)
invMK = invM * self.K
w, shapes = numpy.linalg.eig(invMK)
zero_r = numpy.zeros((1, shapes.shape[1]))
shapes = numpy.r_[zero_r, shapes[:-1, :], zero_r, shapes[-1, :]]
order = numpy.argsort(w)
w = numpy.sqrt(w[order].real)
shapes = shapes[:, order]
# python slice from 0
# so the first one must be included
shapes = shapes[2:-3:2, :]
shapes = shapes[:, :shapes.shape[0]]
shapes_max = numpy.amax(numpy.abs(shapes), axis=0)
zero_r = numpy.zeros((1, shapes.shape[1]))
shapes = numpy.r_[zero_r, shapes, zero_r]
self._shapes = shapes / shapes_max
if self._shapes[self._shapes.shape[0]//2, 0] < 0:
self._shapes *= -1
self._natural_frequencies = w / numpy.pi / 2
@property
def shapes(self):
'''
Modal shapes of forces [mode_number, node_number].
'''
return self._shapes
@property
def natural_frequencies(self):
'''
Natural frequencies of forces (rank-1 array).
'''
return self._natural_frequencies
@property
def diameter(self):
'''
Diameter of the beam.
'''
return self._diameter
@property
def node_number(self):
'''
Node number of the beam.
'''
return self._node_number
@property
def node_spacing(self):
'''
Node spacing of the beam.
'''
return self._node_spacing
def plot_modal_shapes(self, out_filenames, max_order):
'''
Plot modal shapes up to max_order.
Parameters
----------
out_filenames : string list.
A list of filenames for figure saving.
max_order : int.
The max order up to which the figure is plotted.
'''
print('plot_modal_shapes')
matplotlib.pyplot.clf()
matplotlib.pyplot.gcf().set_size_inches(
style.SINGLE_COLUMN_WIDTH, style.SINGLE_COLUMN_SHORT_HEIGHT)
matplotlib.pyplot.xlabel(r'$z\cdot L^{-1}$')
matplotlib.pyplot.ylabel(r'$\phi^m$')
matplotlib.pyplot.grid()
z = numpy.linspace(0, 1, num=self._node_number)
matplotlib.pyplot.plot(z, self.shapes[:, :max_order])
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
matplotlib.pyplot.savefig(out_filename) | 0.811974 | 0.390127 |