hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7264187ecb771c3ebeca5e866aa88ceae28828f | 1,923 | py | Python | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | null | null | null | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | 201 | 2019-05-20T15:06:05.000Z | 2019-07-16T12:48:59.000Z | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | 1 | 2019-12-20T21:45:39.000Z | 2019-12-20T21:45:39.000Z | from tfchain.polyfill.encoding.jsmods.ipaddrjs import api as ipaddrjs
import tfchain.polyfill.array as jsarr
class IPAddress:
def __init__(self, value):
if isinstance(value, str):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.parse(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid str value {}: {}".format(value, err))
self._value = v
elif isinstance(value, (bytes, bytearray)) or jsarr.is_uint8_array(value):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.fromByteArray(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid raw value {}: {}".format(value, err))
self._value = v
elif isinstance(value, IPAddress):
self._value = value.value
else:
raise TypeError("value {} of type {} is not supported as an IPAddress".format(value, type(value)))
@property
def value(self):
return self._value
def is_ipv4(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv4;
""")
return result
def is_ipv6(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv6;
""")
return result
def __str__(self):
return self._value.toString()
def str(self):
return self.__str__()
def bytes(self):
v = self._value
__pragma__("js", "{}", """
v = new Uint8Array(v.toByteArray());
""")
return v
| 27.869565 | 110 | 0.481019 | from tfchain.polyfill.encoding.jsmods.ipaddrjs import api as ipaddrjs
import tfchain.polyfill.array as jsarr
class IPAddress:
def __init__(self, value):
if isinstance(value, str):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.parse(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid str value {}: {}".format(value, err))
self._value = v
elif isinstance(value, (bytes, bytearray)) or jsarr.is_uint8_array(value):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.fromByteArray(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid raw value {}: {}".format(value, err))
self._value = v
elif isinstance(value, IPAddress):
self._value = value.value
else:
raise TypeError("value {} of type {} is not supported as an IPAddress".format(value, type(value)))
@property
def value(self):
return self._value
def is_ipv4(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv4;
""")
return result
def is_ipv6(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv6;
""")
return result
def __str__(self):
return self._value.toString()
def str(self):
return self.__str__()
def bytes(self):
v = self._value
__pragma__("js", "{}", """
v = new Uint8Array(v.toByteArray());
""")
return v
| true | true |
f7264223ea3b2e4a8450d6eba91beec89e57b290 | 15,290 | py | Python | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# ----------------------------------------------------------------------------------
# Commonly used layers and operations based on ethereon's implementation
# https://github.com/ethereon/caffe-tensorflow
# Slight modifications may apply. FCRN-specific operations have also been appended.
# ----------------------------------------------------------------------------------
# Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.
# ----------------------------------------------------------------------------------
DEFAULT_PADDING = 'SAME'
def get_incoming_shape(incoming):
""" Returns the incoming data shape """
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def interleave(tensors, axis):
old_shape = get_incoming_shape(tensors[0])[1:]
new_shape = [-1] + old_shape
new_shape[axis] *= len(tensors)
return tf.reshape(tf.stack(tensors, axis + 1), new_shape)
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.batch_size = batch
self.keep_prob = keep_prob
self.is_training = is_training
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iter(data_dict[op_name].items()):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input_data,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input_data.get_shape()[-1]
if (padding == 'SAME'):
input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], "CONSTANT")
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input_data, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input_data)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input_data, name):
return tf.nn.relu(input_data, name=name)
@layer
def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input_data,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input_data, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input_data.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input_data, [-1, dim])
else:
feed_in, dim = (input_data, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input_data, name):
input_shape = map(lambda v: v.value, input_data.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input_data, name)
@layer
def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
with tf.variable_scope(name) as scope:
shape = [input_data.get_shape()[-1]]
pop_mean = tf.get_variable("mean", shape, initializer = tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable("variance", shape, initializer = tf.constant_initializer(1.0), trainable=False)
epsilon = 1e-4
decay = 0.999
if scale_offset:
scale = tf.get_variable("scale", shape, initializer = tf.constant_initializer(1.0))
offset = tf.get_variable("offset", shape, initializer = tf.constant_initializer(0.0))
else:
scale, offset = (None, None)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
output = tf.nn.batch_normalization(input_data,
batch_mean, batch_var, offset, scale, epsilon, name = name)
else:
output = tf.nn.batch_normalization(input_data,
pop_mean, pop_var, offset, scale, epsilon, name = name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input_data, keep_prob, name):
return tf.nn.dropout(input_data, keep_prob, name=name)
def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):
# Model upconvolutions (unpooling + convolution) as interleaving feature
# maps of four convolutions (A,B,C,D). Building block for up-projections.
# Convolution A (3x3)
# --------------------------------------------------
layerName = "layer%s_ConvA" % (id)
self.feed(input_data)
self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)
outputA = self.get_output()
# Convolution B (2x3)
# --------------------------------------------------
layerName = "layer%s_ConvB" % (id)
padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], "CONSTANT")
self.feed(padded_input_B)
self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputB = self.get_output()
# Convolution C (3x2)
# --------------------------------------------------
layerName = "layer%s_ConvC" % (id)
padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_C)
self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputC = self.get_output()
# Convolution D (2x2)
# --------------------------------------------------
layerName = "layer%s_ConvD" % (id)
padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_D)
self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputD = self.get_output()
# Interleaving elements of the four feature maps
# --------------------------------------------------
left = interleave([outputA, outputB], axis=1) # columns
right = interleave([outputC, outputD], axis=1) # columns
Y = interleave([left, right], axis=2) # rows
if BN:
layerName = "layer%s_BN" % (id)
self.feed(Y)
self.batch_normalization(name = layerName, scale_offset = True, relu = False)
Y = self.get_output()
if ReLU:
Y = tf.nn.relu(Y, name = layerName)
return Y
def up_project(self, size, id, stride = 1, BN = True):
# Create residual upsampling layer (UpProjection)
input_data = self.get_output()
# Branch 1
id_br1 = "%s_br1" % (id)
# Interleaving Convs of 1st branch
out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
# Convolution following the upProjection on the 1st branch
layerName = "layer%s_Conv" % (id)
self.feed(out)
self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)
if BN:
layerName = "layer%s_BN" % (id)
self.batch_normalization(name = layerName, scale_offset=True, relu = False)
# Output of 1st branch
branch1_output = self.get_output()
# Branch 2
id_br2 = "%s_br2" % (id)
# Interleaving convolutions and output of 2nd branch
branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
# sum branches
layerName = "layer%s_Sum" % (id)
output = tf.add_n([branch1_output, branch2_output], name = layerName)
# ReLU
layerName = "layer%s_ReLU" % (id)
output = tf.nn.relu(output, name=layerName)
self.feed(output)
return self
| 39.205128 | 133 | 0.548136 | import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# https://github.com/ethereon/caffe-tensorflow
# Slight modifications may apply. FCRN-specific operations have also been appended.
# ----------------------------------------------------------------------------------
# Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.
# ----------------------------------------------------------------------------------
DEFAULT_PADDING = 'SAME'
def get_incoming_shape(incoming):
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def interleave(tensors, axis):
old_shape = get_incoming_shape(tensors[0])[1:]
new_shape = [-1] + old_shape
new_shape[axis] *= len(tensors)
return tf.reshape(tf.stack(tensors, axis + 1), new_shape)
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.batch_size = batch
self.keep_prob = keep_prob
self.is_training = is_training
self.setup()
def setup(self):
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
data_dict = np.load(data_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iter(data_dict[op_name].items()):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
return self.terminals[-1]
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input_data,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input_data.get_shape()[-1]
if (padding == 'SAME'):
input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], "CONSTANT")
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input_data, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input_data)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input_data, name):
return tf.nn.relu(input_data, name=name)
@layer
def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input_data,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input_data, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input_data.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input_data, [-1, dim])
else:
feed_in, dim = (input_data, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input_data, name):
input_shape = map(lambda v: v.value, input_data.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
if input_shape[1] == 1 and input_shape[2] == 1:
input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input_data, name)
@layer
def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
with tf.variable_scope(name) as scope:
shape = [input_data.get_shape()[-1]]
pop_mean = tf.get_variable("mean", shape, initializer = tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable("variance", shape, initializer = tf.constant_initializer(1.0), trainable=False)
epsilon = 1e-4
decay = 0.999
if scale_offset:
scale = tf.get_variable("scale", shape, initializer = tf.constant_initializer(1.0))
offset = tf.get_variable("offset", shape, initializer = tf.constant_initializer(0.0))
else:
scale, offset = (None, None)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
output = tf.nn.batch_normalization(input_data,
batch_mean, batch_var, offset, scale, epsilon, name = name)
else:
output = tf.nn.batch_normalization(input_data,
pop_mean, pop_var, offset, scale, epsilon, name = name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input_data, keep_prob, name):
return tf.nn.dropout(input_data, keep_prob, name=name)
def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):
layerName = "layer%s_ConvA" % (id)
self.feed(input_data)
self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)
outputA = self.get_output()
layerName = "layer%s_ConvB" % (id)
padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], "CONSTANT")
self.feed(padded_input_B)
self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputB = self.get_output()
layerName = "layer%s_ConvC" % (id)
padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_C)
self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputC = self.get_output()
layerName = "layer%s_ConvD" % (id)
padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_D)
self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputD = self.get_output()
left = interleave([outputA, outputB], axis=1)
right = interleave([outputC, outputD], axis=1)
Y = interleave([left, right], axis=2)
if BN:
layerName = "layer%s_BN" % (id)
self.feed(Y)
self.batch_normalization(name = layerName, scale_offset = True, relu = False)
Y = self.get_output()
if ReLU:
Y = tf.nn.relu(Y, name = layerName)
return Y
def up_project(self, size, id, stride = 1, BN = True):
input_data = self.get_output()
id_br1 = "%s_br1" % (id)
out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
layerName = "layer%s_Conv" % (id)
self.feed(out)
self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)
if BN:
layerName = "layer%s_BN" % (id)
self.batch_normalization(name = layerName, scale_offset=True, relu = False)
branch1_output = self.get_output()
id_br2 = "%s_br2" % (id)
branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
layerName = "layer%s_Sum" % (id)
output = tf.add_n([branch1_output, branch2_output], name = layerName)
layerName = "layer%s_ReLU" % (id)
output = tf.nn.relu(output, name=layerName)
self.feed(output)
return self
| true | true |
f726433b3c15d6223a75c1dacfab5a53d9b7791b | 350 | py | Python | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| 20.588235 | 39 | 0.805714 | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| true | true |
f7264465fb955d98f2220ac0a57a91bd63fff024 | 1,331 | py | Python | xlsxwriter/test/comparison/test_cond_format06.py | eddiechapman/XlsxWriter | c636117ab30e64e4b7b824c9105595c42887c2c9 | [
"BSD-2-Clause-FreeBSD"
] | 2,766 | 2015-01-02T17:36:42.000Z | 2022-03-31T09:23:30.000Z | xlsxwriter/test/comparison/test_cond_format06.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 683 | 2015-01-03T09:55:02.000Z | 2022-03-31T07:18:15.000Z | xlsxwriter/test/comparison/test_cond_format06.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 636 | 2015-01-05T01:57:08.000Z | 2022-03-25T18:42:41.000Z | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('cond_format06.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({
'pattern': 15,
'fg_color': '#FF0000',
'bg_color': '#FFFF00'
})
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1',
{'type': 'cell',
'format': format1,
'criteria': '>',
'value': 7,
})
workbook.close()
self.assertExcelEqual()
| 26.62 | 88 | 0.496619 | true | true | |
f7264528198aeeae3d454c3855027f09c988ee7a | 1,793 | py | Python | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | """
@brief test log(time=10s)
"""
import os
import sys
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.special.tsp_kohonen import pygame_simulation
from src.ensae_teaching_cs.helpers.video_helper import make_video
class TestTspKohonen(unittest.TestCase):
def test_image_video_kohonen(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_image_video_tsp_kohonen")
if is_travis_or_appveyor() in ("travis",):
# pygame.error: No available video device
return
import pygame
if is_travis_or_appveyor() == "circleci":
# os.environ["SDL_VIDEODRIVER"] = "x11"
flags = pygame.NOFRAME
else:
flags = 0
pygame_simulation(pygame, fLOG=fLOG, folder=temp,
nb=200 if __name__ == "__main__" else 20,
size=(400, 250), flags=flags)
files = os.listdir(temp)
assert len(files) > 9
png = [os.path.join(temp, _)
for _ in files if os.path.splitext(_)[-1] == ".png"]
assert len(png) > 0
out = os.path.join(temp, "tsp_kohonen.avi")
v = make_video(png, out, size=(200, 125), format="XVID", fps=20)
assert v is not None
if __name__ == "__main__":
unittest.main()
| 28.015625 | 72 | 0.591746 | import os
import sys
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.special.tsp_kohonen import pygame_simulation
from src.ensae_teaching_cs.helpers.video_helper import make_video
class TestTspKohonen(unittest.TestCase):
def test_image_video_kohonen(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_image_video_tsp_kohonen")
if is_travis_or_appveyor() in ("travis",):
return
import pygame
if is_travis_or_appveyor() == "circleci":
flags = pygame.NOFRAME
else:
flags = 0
pygame_simulation(pygame, fLOG=fLOG, folder=temp,
nb=200 if __name__ == "__main__" else 20,
size=(400, 250), flags=flags)
files = os.listdir(temp)
assert len(files) > 9
png = [os.path.join(temp, _)
for _ in files if os.path.splitext(_)[-1] == ".png"]
assert len(png) > 0
out = os.path.join(temp, "tsp_kohonen.avi")
v = make_video(png, out, size=(200, 125), format="XVID", fps=20)
assert v is not None
if __name__ == "__main__":
unittest.main()
| true | true |
f7264580ec84ba5145f46f58d0d1265932dadf9b | 3,505 | py | Python | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 1 | 2020-01-25T05:23:00.000Z | 2020-01-25T05:23:00.000Z | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 3 | 2019-11-02T05:52:07.000Z | 2020-06-05T21:56:17.000Z | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 2 | 2019-06-10T21:45:05.000Z | 2019-07-10T17:16:35.000Z | import datetime as dt
from django.core.cache import cache
from django.utils import translation
from django.utils.timezone import now
from parler import appsettings
from .utils import AppTestCase, override_parler_settings
from .testapp.models import SimpleModel, DateTimeModel
class QueryCountTests(AppTestCase):
"""
Test model construction
"""
@classmethod
def setUpClass(cls):
super(QueryCountTests, cls).setUpClass()
cls.country_list = (
'Mexico',
'Monaco',
'Morocco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'South Africa',
)
for country in cls.country_list:
SimpleModel.objects.create(_current_language=cls.conf_fallback, tr_title=country)
DateTimeModel.objects.create(_current_language=cls.conf_fallback,
tr_title=country, datetime=now())
#def setUp(self):
# cache.clear()
def assertNumTranslatedQueries(self, num, qs, language_code=None):
# Use default language if available.
if language_code is None:
language_code = self.conf_fallback
# Easier to understand then a oneline lambda
# Using str(), not unicode() to be python 3 compatible.
def test_qs():
for obj in qs:
str(obj.tr_title)
# Queryset is not set to a language, the individual models
# will default to the currently active project language.
with translation.override(language_code):
self.assertNumQueries(num, test_qs)
def test_uncached_queries(self):
"""
Test that uncached queries work, albeit slowly.
"""
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(1 + len(self.country_list), SimpleModel.objects.all())
def test_iteration_with_non_qs_methods(self):
"""
Test QuerySet methods that do not return QuerySets of models.
"""
# We have at least one object created in setUpClass.
obj = DateTimeModel.objects.all()[0]
self.assertEqual(
obj,
DateTimeModel.objects.language(self.conf_fallback).all()[0])
# Test iteration through QuerySet of non-model objects.
self.assertIsInstance(
DateTimeModel.objects.language(self.conf_fallback).dates(
'datetime', 'day')[0],
dt.date)
def test_prefetch_queries(self):
"""
Test that .prefetch_related() works
"""
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(2, SimpleModel.objects.prefetch_related('translations'))
def test_model_cache_queries(self):
"""
Test that the ``_translations_cache`` works.
"""
cache.clear()
with override_parler_settings(PARLER_ENABLE_CACHING=False):
qs = SimpleModel.objects.all()
self.assertNumTranslatedQueries(1 + len(self.country_list), qs)
self.assertNumTranslatedQueries(0, qs) # All should be cached on the QuerySet and object now.
qs = SimpleModel.objects.prefetch_related('translations')
self.assertNumTranslatedQueries(2, qs)
self.assertNumTranslatedQueries(0, qs) # All should be cached on the QuerySet and object now.
| 34.029126 | 107 | 0.633666 | import datetime as dt
from django.core.cache import cache
from django.utils import translation
from django.utils.timezone import now
from parler import appsettings
from .utils import AppTestCase, override_parler_settings
from .testapp.models import SimpleModel, DateTimeModel
class QueryCountTests(AppTestCase):
@classmethod
def setUpClass(cls):
super(QueryCountTests, cls).setUpClass()
cls.country_list = (
'Mexico',
'Monaco',
'Morocco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'South Africa',
)
for country in cls.country_list:
SimpleModel.objects.create(_current_language=cls.conf_fallback, tr_title=country)
DateTimeModel.objects.create(_current_language=cls.conf_fallback,
tr_title=country, datetime=now())
def assertNumTranslatedQueries(self, num, qs, language_code=None):
if language_code is None:
language_code = self.conf_fallback
def test_qs():
for obj in qs:
str(obj.tr_title)
with translation.override(language_code):
self.assertNumQueries(num, test_qs)
def test_uncached_queries(self):
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(1 + len(self.country_list), SimpleModel.objects.all())
def test_iteration_with_non_qs_methods(self):
obj = DateTimeModel.objects.all()[0]
self.assertEqual(
obj,
DateTimeModel.objects.language(self.conf_fallback).all()[0])
self.assertIsInstance(
DateTimeModel.objects.language(self.conf_fallback).dates(
'datetime', 'day')[0],
dt.date)
def test_prefetch_queries(self):
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(2, SimpleModel.objects.prefetch_related('translations'))
def test_model_cache_queries(self):
cache.clear()
with override_parler_settings(PARLER_ENABLE_CACHING=False):
qs = SimpleModel.objects.all()
self.assertNumTranslatedQueries(1 + len(self.country_list), qs)
self.assertNumTranslatedQueries(0, qs)
qs = SimpleModel.objects.prefetch_related('translations')
self.assertNumTranslatedQueries(2, qs)
self.assertNumTranslatedQueries(0, qs)
| true | true |
f72645db2eb553529b0393f9bc851543b325fd14 | 3,880 | py | Python | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | null | null | null | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | 6 | 2019-08-09T08:04:37.000Z | 2019-08-14T15:05:35.000Z | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | omichel/webots_ros2 | 5b59d0b1fbeff4c3f75a447bd152c10853f4691b | [
"Apache-2.0"
] | null | null | null | # Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic client for the FollowJointTrajectory action used for multi-robot demonstration."""
from action_msgs.msg import GoalStatus
from control_msgs.action import FollowJointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
from trajectory_msgs.msg import JointTrajectoryPoint
from builtin_interfaces.msg import Duration
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class FollowJointTrajectoryClient(Node):
def __init__(self, name, prefix):
super().__init__(name)
self.__client = ActionClient(self, FollowJointTrajectory, prefix + '/follow_joint_trajectory')
self.__state_subscriber = self.create_subscription(
JointTrajectoryControllerState, prefix + '/state', self.__on_state_received, 1
)
self.__received_states_counter = 0
self.__remaining_iteration = 0
self.__current_trajectory = None
self.__get_result_future = None
self.__send_goal_future = None
def __on_goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected by action server.')
return
self.get_logger().info('Goal accepted by action server.')
self.__get_result_future = goal_handle.get_result_async()
self.__get_result_future.add_done_callback(self.__on_get_result_callback)
def __on_get_result_callback(self, future):
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Goal succeeded.')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
if self.__remaining_iteration > 0:
self.send_goal(self.__current_trajectory, self.__remaining_iteration - 1)
else:
rclpy.shutdown()
def __on_state_received(self, _):
self.__received_states_counter += 1
def send_goal(self, trajectory, iteration=1):
self.get_logger().info('Waiting for action server to be ready...')
self.__client.wait_for_server()
# WORKAROUND: The `wait_for_server()` method reports the `joint_trajectory_controller` node is ready even though it
# needs a bit more time to get ready to receive commands.
while self.__received_states_counter < 1:
rclpy.spin_once(self)
self.__current_trajectory = trajectory
self.__remaining_iteration = iteration - 1
goal_message = FollowJointTrajectory.Goal()
goal_message.trajectory.joint_names = trajectory['joint_names']
for point in trajectory['points']:
trajectory_point = JointTrajectoryPoint(
positions=point['positions'],
time_from_start=Duration(
sec=point['time_from_start']['sec'],
nanosec=point['time_from_start']['nanosec']
)
)
goal_message.trajectory.points.append(trajectory_point)
self.get_logger().info('Sending goal request...')
self.__send_goal_future = self.__client.send_goal_async(
goal_message
)
self.__send_goal_future.add_done_callback(self.__on_goal_response_callback)
| 40 | 123 | 0.698711 |
from action_msgs.msg import GoalStatus
from control_msgs.action import FollowJointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
from trajectory_msgs.msg import JointTrajectoryPoint
from builtin_interfaces.msg import Duration
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class FollowJointTrajectoryClient(Node):
def __init__(self, name, prefix):
super().__init__(name)
self.__client = ActionClient(self, FollowJointTrajectory, prefix + '/follow_joint_trajectory')
self.__state_subscriber = self.create_subscription(
JointTrajectoryControllerState, prefix + '/state', self.__on_state_received, 1
)
self.__received_states_counter = 0
self.__remaining_iteration = 0
self.__current_trajectory = None
self.__get_result_future = None
self.__send_goal_future = None
def __on_goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected by action server.')
return
self.get_logger().info('Goal accepted by action server.')
self.__get_result_future = goal_handle.get_result_async()
self.__get_result_future.add_done_callback(self.__on_get_result_callback)
def __on_get_result_callback(self, future):
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Goal succeeded.')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
if self.__remaining_iteration > 0:
self.send_goal(self.__current_trajectory, self.__remaining_iteration - 1)
else:
rclpy.shutdown()
def __on_state_received(self, _):
self.__received_states_counter += 1
def send_goal(self, trajectory, iteration=1):
self.get_logger().info('Waiting for action server to be ready...')
self.__client.wait_for_server()
while self.__received_states_counter < 1:
rclpy.spin_once(self)
self.__current_trajectory = trajectory
self.__remaining_iteration = iteration - 1
goal_message = FollowJointTrajectory.Goal()
goal_message.trajectory.joint_names = trajectory['joint_names']
for point in trajectory['points']:
trajectory_point = JointTrajectoryPoint(
positions=point['positions'],
time_from_start=Duration(
sec=point['time_from_start']['sec'],
nanosec=point['time_from_start']['nanosec']
)
)
goal_message.trajectory.points.append(trajectory_point)
self.get_logger().info('Sending goal request...')
self.__send_goal_future = self.__client.send_goal_async(
goal_message
)
self.__send_goal_future.add_done_callback(self.__on_goal_response_callback)
| true | true |
f726466b10a38e592d89b680d7031e520070c599 | 3,697 | py | Python | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 12 | 2021-09-13T18:31:09.000Z | 2022-03-31T12:10:28.000Z | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 5 | 2021-12-01T04:34:07.000Z | 2022-01-28T08:28:18.000Z | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 3 | 2022-01-18T10:56:05.000Z | 2022-01-28T01:46:43.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import os
import torch
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
# Initialise PyTorch model
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print("Building PyTorch XLNetForSequenceClassification model from configuration: {}".format(str(config)))
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 32.147826 | 117 | 0.712199 |
import argparse
import os
import torch
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print("Building PyTorch XLNetForSequenceClassification model from configuration: {}".format(str(config)))
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| true | true |
f72646ac93dd6fd9d9c8c4a8152f818f740a9035 | 2,689 | py | Python | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | 3 | 2021-11-17T08:07:50.000Z | 2021-12-19T04:35:15.000Z | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | null | null | null | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | null | null | null | import unittest
from pathlib import Path
import re
import tempfile
import d2vg
class ParserTest(unittest.TestCase):
def test_text_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.txt"
content = "1st line.\n2nd line.\n"
p.write_text(content)
read_content = d2vg.parsers.read_text_file(str(p))
self.assertEqual(read_content, content)
def test_html_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.html"
content = """<!DOCTYPE html>
<html>
<body>
<p>1st paragraph.</p>
<p>2nd paragraph.</p>
</body>
</html>"""
p.write_text(content)
read_content = d2vg.parsers.html_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "html\n1st paragraph.\n2nd paragraph.")
def test_pdf_file(self):
from borb.pdf.canvas.layout.page_layout.multi_column_layout import (
SingleColumnLayout,
)
from borb.pdf.canvas.layout.text.paragraph import Paragraph
from borb.pdf.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.pdf"
pdf = Document()
page = Page()
pdf.append_page(page)
layout = SingleColumnLayout(page)
layout.add(Paragraph("1st paragraph."))
layout.add(Paragraph("2nd paragraph."))
with open(p, "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, pdf)
read_content = d2vg.parsers.pdf_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "1st paragraph.\n2nd paragraph.")
# !! not working !! ref: https://stackoverflow.com/questions/58186869/how-to-fix-the-bug-modulenotfounderror-no-module-named-exceptions-when-impo
# def test_docx_file(self):
# from docx import Document
# with tempfile.TemporaryDirectory() as tempdir:
# p = Path(tempdir) / 'a.docx'
# document = Document()
# document.add_paragraph("1st paragraph.")
# document.add_paragraph("1st paragraph.")
# document.save(str(p))
# read_content = d2vg.parsers.docx_parse(str(p))
# read_content = re.sub(r'\n+', r'\n', read_content).rstrip()
# self.assertEqual(read_content, '1st paragraph.\n2nd paragraph.')
if __name__ == "__main__":
unittest.main()
| 34.474359 | 149 | 0.61138 | import unittest
from pathlib import Path
import re
import tempfile
import d2vg
class ParserTest(unittest.TestCase):
def test_text_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.txt"
content = "1st line.\n2nd line.\n"
p.write_text(content)
read_content = d2vg.parsers.read_text_file(str(p))
self.assertEqual(read_content, content)
def test_html_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.html"
content = """<!DOCTYPE html>
<html>
<body>
<p>1st paragraph.</p>
<p>2nd paragraph.</p>
</body>
</html>"""
p.write_text(content)
read_content = d2vg.parsers.html_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "html\n1st paragraph.\n2nd paragraph.")
def test_pdf_file(self):
from borb.pdf.canvas.layout.page_layout.multi_column_layout import (
SingleColumnLayout,
)
from borb.pdf.canvas.layout.text.paragraph import Paragraph
from borb.pdf.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.pdf"
pdf = Document()
page = Page()
pdf.append_page(page)
layout = SingleColumnLayout(page)
layout.add(Paragraph("1st paragraph."))
layout.add(Paragraph("2nd paragraph."))
with open(p, "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, pdf)
read_content = d2vg.parsers.pdf_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "1st paragraph.\n2nd paragraph.")
if __name__ == "__main__":
unittest.main()
| true | true |
f72646f0022b7bc1e1b506d20a786c3a402e9a98 | 422 | py | Python | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | null | null | null | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | 13 | 2021-01-14T06:09:55.000Z | 2021-03-08T08:56:36.000Z | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | 1 | 2021-04-07T18:20:21.000Z | 2021-04-07T18:20:21.000Z | # Generated by Django 3.1.5 on 2021-03-05 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20210305_0539'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(default='Click link below to see more.', max_length=200),
),
]
| 22.210526 | 92 | 0.606635 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20210305_0539'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(default='Click link below to see more.', max_length=200),
),
]
| true | true |
f7264824f603075011c4ae3509f47ec148f2cec0 | 12,224 | py | Python | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | # coding: utf-8
import pytest
import mock
import workdays
import datetime
import dmutils.dates as dates_package
class TestPublishingDates():
def test_get_publishing_dates_formats_time(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
assert dates_package.datetime.utcnow() == datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
assert dates_package.get_publishing_dates(brief)['closing_time'] == '11:59 pm'
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 7, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
'lotSlug': 'digital-specialists'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 16, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 17, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 23, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 24, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_no_requirementLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_1_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '1 week'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_2_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '2 weeks'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_published_at_key_is_a_date_object(self):
brief = {
'publishedAt': datetime.datetime(2016, 1, 4, 12, 0, 0),
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
| 49.489879 | 115 | 0.641525 |
import pytest
import mock
import workdays
import datetime
import dmutils.dates as dates_package
class TestPublishingDates():
def test_get_publishing_dates_formats_time(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
assert dates_package.datetime.utcnow() == datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
assert dates_package.get_publishing_dates(brief)['closing_time'] == '11:59 pm'
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 7, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
'lotSlug': 'digital-specialists'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 16, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 17, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 23, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 24, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_no_requirementLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_1_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '1 week'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_2_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '2 weeks'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_published_at_key_is_a_date_object(self):
brief = {
'publishedAt': datetime.datetime(2016, 1, 4, 12, 0, 0),
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
| true | true |
f726488d8cbb6bc2a5748013e73cd7f6e42b06b9 | 20,570 | py | Python | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 2,327 | 2020-03-01T09:47:34.000Z | 2021-11-25T12:38:42.000Z | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 209 | 2020-03-01T17:14:12.000Z | 2021-11-08T20:35:42.000Z | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 686 | 2020-03-03T17:24:51.000Z | 2021-11-25T23:39:12.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and DMLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT for QA datasets."""
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
import numpy as np
__all__ = ['SQuADTransform', '\rocess_dataset']
class SquadExample:
"""A single training/test example for SQuAD question.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
"""Function for processing data in worker process."""
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
"""Use multiprocessing to perform transform for dataset.
Parameters
----------
dataset: dataset-like object
Source dataset.
transform: callable
Transformer function.
num_workers: int, default 8
The number of multiprocessing workers to use for data preprocessing.
"""
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
"""Single feature of a single example transform of the SQuAD question.
"""
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
"""Dataset Transformation for BERT-style QA.
The transformation is processed in the following steps:
- Convert from gluonnlp.data.SQuAD's record to SquadExample.
- Tokenize the question_text in the example.
- For examples where the document is too long,
use a sliding window to split into multiple features and
record whether each token is a maximum context.
- Tokenize the split document chunks.
- Combine the token of question_text with the token
of the document and insert [CLS] and [SEP].
- Generate the start position and end position of the answer.
- Generate valid length.
E.g:
Inputs:
question_text: 'When did BBC Japan begin broadcasting?'
doc_tokens: ['BBC','Japan','was','a','general','entertainment','channel,',
'which','operated','between','December','2004','and','April',
'2006.','It','ceased','operations','after','its','Japanese',
'distributor','folded.']
start_position: 10
end_position: 11
orig_answer_text: 'December 2004'
Processed:
tokens: ['[CLS]','when','did','bbc','japan','begin','broadcasting','?',
'[SEP]','bbc','japan','was','a','general','entertainment','channel',
',','which','operated','between','december','2004','and','april',
'2006','.','it','ceased','operations','after','its','japanese',
'distributor','folded','.','[SEP]']
segment_ids: [0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
start_position: 20
end_position: 21
valid_length: 36
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents.
So you need to record whether each token is a maximum context. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
labels : list of int.
List of all label ids for the classification task.
max_seq_length : int, default 384
Maximum sequence length of the sentences.
doc_stride : int, default 128
When splitting up a long document into chunks,
how much stride to take between chunks.
max_query_length : int, default 64
The maximum length of the query tokens.
is_pad : bool, default True
Whether to pad the sentences to maximum length.
is_training : bool, default True
Whether to run training.
do_lookup : bool, default True
Whether to do vocabulary lookup for convert tokens to indices.
"""
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
valid_length = len(input_ids)
# Zero-pad up to the sequence length.
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| 38.958333 | 84 | 0.597083 |
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
import numpy as np
__all__ = ['SQuADTransform', '\rocess_dataset']
class SquadExample:
def __init__(self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple(
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
valid_length = len(input_ids)
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| true | true |
f7264918230895dea85690103d818973bdb62a3f | 242 | py | Python | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | # flake8: noqa
from .analysis import add_airline_info, clean_vector
from .database import Database
from .openflights import fetch_reference_data
from .opensky import fetch_live_aircraft_data
from .position import Area, Position, bounding_box
| 34.571429 | 52 | 0.85124 |
from .analysis import add_airline_info, clean_vector
from .database import Database
from .openflights import fetch_reference_data
from .opensky import fetch_live_aircraft_data
from .position import Area, Position, bounding_box
| true | true |
f726495538536b17d16dcc758c9c8febfb1dc64a | 13,874 | py | Python | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | 1 | 2021-05-12T08:34:32.000Z | 2021-05-12T08:34:32.000Z | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | null | null | null | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RetinaNet configuration definition."""
import os
from typing import List, Optional
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = TfExampleDecoder()
label_map_decoder: TfExampleDecoderLabelMap = TfExampleDecoderLabelMap()
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: DataDecoder = DataDecoder()
parser: Parser = Parser()
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: Optional[List[AttributeHead]] = None
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
use_batched_nms: bool = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = Anchor()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(
type='fpn', fpn=decoders.FPN())
head: RetinaNetHead = RetinaNetHead()
detection_generator: DetectionGenerator = DetectionGenerator()
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = RetinaNet()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
annotation_file: Optional[str] = None
per_category_metrics: bool = False
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
"""RetinaNet general config."""
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using Mobile SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 35.302799 | 95 | 0.605305 |
import os
from typing import List, Optional
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = TfExampleDecoder()
label_map_decoder: TfExampleDecoderLabelMap = TfExampleDecoderLabelMap()
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
input_path: str = ''
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: DataDecoder = DataDecoder()
parser: Parser = Parser()
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: Optional[List[AttributeHead]] = None
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
use_batched_nms: bool = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = Anchor()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(
type='fpn', fpn=decoders.FPN())
head: RetinaNetHead = RetinaNetHead()
detection_generator: DetectionGenerator = DetectionGenerator()
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = RetinaNet()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all'
annotation_file: Optional[str] = None
per_category_metrics: bool = False
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| true | true |
f726499b8d6913a5329f1b82ef19ff7b2b6b251b | 1,827 | py | Python | Arbitrage_Spot/dquant/entrypoint.py | ronaldzgithub/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | 1 | 2021-11-03T06:16:16.000Z | 2021-11-03T06:16:16.000Z | Arbitrage_Spot/dquant/entrypoint.py | benno0810/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | null | null | null | Arbitrage_Spot/dquant/entrypoint.py | benno0810/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | 2 | 2021-05-07T09:11:54.000Z | 2021-11-27T16:29:10.000Z | import argparse
import logging
from logging.handlers import RotatingFileHandler
from dquant.datafeed import Datafeed
class EntryPoint:
datafeed = None
def exec_command(self, args ):
logging.debug('exec_command:%s' % args)
if "feed" in args.command:
self.datafeed = Datafeed()
if args.markets:
self.datafeed.init_markets(args.markets.split(","))
self.datafeed._run_loop()
return
def init_logger(self, args):
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=level)
Rthandler = RotatingFileHandler('../logs/dquant.log', maxBytes=100 * 1024 * 1024, backupCount=10)
Rthandler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="debug verbose mode",
action="store_true")
parser.add_argument("-m", "--markets", type=str,
help="markets, example: -mokexusd")
parser.add_argument("-s","--strategy",type=str,
help="strategy, example:-smaker")
parser.add_argument("command", nargs='*', default="watch",
help='verb: "feed|exec|rexec"')
args = parser.parse_args()
self.init_logger(args)
self.exec_command(args)
print('main end')
exit(-1)
def main():
entrypoint = EntryPoint()
entrypoint.main()
if __name__ == "__main__":
main()
| 31.5 | 105 | 0.579639 | import argparse
import logging
from logging.handlers import RotatingFileHandler
from dquant.datafeed import Datafeed
class EntryPoint:
datafeed = None
def exec_command(self, args ):
logging.debug('exec_command:%s' % args)
if "feed" in args.command:
self.datafeed = Datafeed()
if args.markets:
self.datafeed.init_markets(args.markets.split(","))
self.datafeed._run_loop()
return
def init_logger(self, args):
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=level)
Rthandler = RotatingFileHandler('../logs/dquant.log', maxBytes=100 * 1024 * 1024, backupCount=10)
Rthandler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="debug verbose mode",
action="store_true")
parser.add_argument("-m", "--markets", type=str,
help="markets, example: -mokexusd")
parser.add_argument("-s","--strategy",type=str,
help="strategy, example:-smaker")
parser.add_argument("command", nargs='*', default="watch",
help='verb: "feed|exec|rexec"')
args = parser.parse_args()
self.init_logger(args)
self.exec_command(args)
print('main end')
exit(-1)
def main():
entrypoint = EntryPoint()
entrypoint.main()
if __name__ == "__main__":
main()
| true | true |
f7264a51c8db2fd5cc7ad9b2720dd04cb72172fd | 5,693 | py | Python | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import functools
DATABASE_FILE = "version"
def MakeDirs(dirname):
dirname = os.path.abspath(dirname)
dirname = dirname.replace("\\","/")
dirnames = dirname.split("/")
destdir = ""
destdir = os.path.join(dirnames[0] + "/",dirnames[1])
if not os.path.exists(destdir):
os.mkdir(destdir)
for name in dirnames[2:]:
destdir=os.path.join(destdir,name)
if not os.path.exists(destdir):
os.mkdir(destdir)
def get_relative_name(module_path,path_list = sys.path):
path = os.path.dirname(module_path)
recent_path = ''
while True:
#when route to sys path or root path,such as / or c:\\,skip the circle
if PathsContainPath(path_list,path) or os.path.dirname(path) == path:
recent_path = path
break
path = os.path.dirname(path)
path_name = module_path.replace(recent_path + os.sep,'').split('.')[0]
if os.name == 'nt':
path_name = path_name.replace(os.sep,'/')
parts = path_name.split('/')
if parts[-1] == "__init__":
relative_module_name = '.'.join(parts[0:-1])
is_package = True
else:
relative_module_name = '.'.join(parts)
is_package = False
return relative_module_name,is_package
def strcmp(str1,str2):
i = 0
while i<len(str1) and i<len(str2):
if str1[i] != str2[i]:
if str1[i] == '_':
return 1
elif str2[i] == '_':
return -1
outcome = py_cmp(str1[i],str2[i])
return outcome
i += 1
return py_cmp(len(str1),len(str2))
def CmpMember(x,y):
if strcmp(x.lower() , y.lower()) == 1:
return 1
return -1
def CmpMember2(x,y):
if x.startswith("_") and not y.startswith("_"):
return 1
elif y.startswith("_") and not x.startswith("_"):
return -1
if x.lower() > y.lower():
return 1
return -1
def CompareDatabaseVersion_(new_version,old_version):
new_verions = new_version.split(".")
old_versions = old_version.split(".")
for i,v in enumerate(new_verions):
if i >= len(old_versions):
return 1
if int(v) > int(old_versions[i]):
return 1
return 0
def IsNoneOrEmpty(value):
if value is None:
return True
elif value == "":
return True
return False
def IsPython3():
if sys.version_info[0] >= 3:
return True
return False
def IsPython2():
if sys.version_info[0] == 2:
return True
return False
def ComparePath(path1,path2):
if os.name == 'nt':
path1 = path1.replace("/",os.sep).rstrip(os.sep)
path2 = path2.replace("/",os.sep).rstrip(os.sep)
return path1.lower() == path2.lower()
return path1.rstrip(os.sep) == path2.rstrip(os.sep)
def PathsContainPath(path_list,path):
if os.name == 'nt':
for p in path_list:
if ComparePath(p,path):
return True
return False
return path in path_list
def CalcVersionValue(ver_str="0.0.0"):
"""Calculates a version value from the provided dot-formated string
1) SPECIFICATION: Version value calculation AA.BBB.CCC
- major values: < 1 (i.e 0.0.85 = 0.850)
- minor values: 1 - 999 (i.e 0.1.85 = 1.850)
- micro values: >= 1000 (i.e 1.1.85 = 1001.850)
@keyword ver_str: Version string to calculate value of
"""
ver_str = ''.join([char for char in ver_str
if char.isdigit() or char == '.'])
ver_lvl = ver_str.split(u".")
if len(ver_lvl) < 3:
return 0
major = int(ver_lvl[0]) * 1000
minor = int(ver_lvl[1])
if len(ver_lvl[2]) <= 2:
ver_lvl[2] += u'0'
micro = float(ver_lvl[2]) / 1000
return float(major) + float(minor) + micro
def CompareCommonVersion(new_version,old_version):
'''
比较通用版本号大小,如果新版本号大于旧版本号返回1,否则返回0,返回0才正常,返回1需要更新
'''
def format_version(version_str):
'''
标准化版本字符串,至少包含3个点.如果是类似x.x的版本则转换为x.x.0之类的
'''
if len(version_str.split('.')) == 2:
version_str += ".0"
return version_str
new_version = format_version(new_version)
old_version = format_version(old_version)
if CalcVersionValue(new_version) <= CalcVersionValue(old_version):
return 0
return 1
def py_sorted(iter_obj,cmp_func):
if IsPython2():
sort_obj = sorted(iter_obj, cmp=cmp_func)
elif IsPython3():
sort_obj = sorted(iter_obj, key=functools.cmp_to_key(cmp_func))
return sort_obj
def py3_cmp(l,r):
if r < l:
return 1
if l < r:
return -1
return 0
#python3没有cmp函数,自己实现一个
if IsPython2():
py_cmp = cmp
elif IsPython3():
py_cmp = py3_cmp
def LoadDatabaseVersion(database_location):
with open(os.path.join(database_location,DATABASE_FILE)) as f:
return f.read()
def SaveDatabaseVersion(database_location,new_database_version):
with open(os.path.join(database_location,DATABASE_FILE),"w") as f:
f.write(new_database_version)
def NeedRenewDatabase(database_location,new_database_version):
if not os.path.exists(os.path.join(database_location,DATABASE_FILE)):
return True
old_database_version = LoadDatabaseVersion(database_location)
if 0 == CompareCommonVersion(new_database_version,old_database_version):
return False
return True | 30.121693 | 79 | 0.585456 |
import os
import sys
import functools
DATABASE_FILE = "version"
def MakeDirs(dirname):
dirname = os.path.abspath(dirname)
dirname = dirname.replace("\\","/")
dirnames = dirname.split("/")
destdir = ""
destdir = os.path.join(dirnames[0] + "/",dirnames[1])
if not os.path.exists(destdir):
os.mkdir(destdir)
for name in dirnames[2:]:
destdir=os.path.join(destdir,name)
if not os.path.exists(destdir):
os.mkdir(destdir)
def get_relative_name(module_path,path_list = sys.path):
path = os.path.dirname(module_path)
recent_path = ''
while True:
if PathsContainPath(path_list,path) or os.path.dirname(path) == path:
recent_path = path
break
path = os.path.dirname(path)
path_name = module_path.replace(recent_path + os.sep,'').split('.')[0]
if os.name == 'nt':
path_name = path_name.replace(os.sep,'/')
parts = path_name.split('/')
if parts[-1] == "__init__":
relative_module_name = '.'.join(parts[0:-1])
is_package = True
else:
relative_module_name = '.'.join(parts)
is_package = False
return relative_module_name,is_package
def strcmp(str1,str2):
i = 0
while i<len(str1) and i<len(str2):
if str1[i] != str2[i]:
if str1[i] == '_':
return 1
elif str2[i] == '_':
return -1
outcome = py_cmp(str1[i],str2[i])
return outcome
i += 1
return py_cmp(len(str1),len(str2))
def CmpMember(x,y):
if strcmp(x.lower() , y.lower()) == 1:
return 1
return -1
def CmpMember2(x,y):
if x.startswith("_") and not y.startswith("_"):
return 1
elif y.startswith("_") and not x.startswith("_"):
return -1
if x.lower() > y.lower():
return 1
return -1
def CompareDatabaseVersion_(new_version,old_version):
new_verions = new_version.split(".")
old_versions = old_version.split(".")
for i,v in enumerate(new_verions):
if i >= len(old_versions):
return 1
if int(v) > int(old_versions[i]):
return 1
return 0
def IsNoneOrEmpty(value):
if value is None:
return True
elif value == "":
return True
return False
def IsPython3():
if sys.version_info[0] >= 3:
return True
return False
def IsPython2():
if sys.version_info[0] == 2:
return True
return False
def ComparePath(path1,path2):
if os.name == 'nt':
path1 = path1.replace("/",os.sep).rstrip(os.sep)
path2 = path2.replace("/",os.sep).rstrip(os.sep)
return path1.lower() == path2.lower()
return path1.rstrip(os.sep) == path2.rstrip(os.sep)
def PathsContainPath(path_list,path):
if os.name == 'nt':
for p in path_list:
if ComparePath(p,path):
return True
return False
return path in path_list
def CalcVersionValue(ver_str="0.0.0"):
ver_str = ''.join([char for char in ver_str
if char.isdigit() or char == '.'])
ver_lvl = ver_str.split(u".")
if len(ver_lvl) < 3:
return 0
major = int(ver_lvl[0]) * 1000
minor = int(ver_lvl[1])
if len(ver_lvl[2]) <= 2:
ver_lvl[2] += u'0'
micro = float(ver_lvl[2]) / 1000
return float(major) + float(minor) + micro
def CompareCommonVersion(new_version,old_version):
def format_version(version_str):
if len(version_str.split('.')) == 2:
version_str += ".0"
return version_str
new_version = format_version(new_version)
old_version = format_version(old_version)
if CalcVersionValue(new_version) <= CalcVersionValue(old_version):
return 0
return 1
def py_sorted(iter_obj,cmp_func):
if IsPython2():
sort_obj = sorted(iter_obj, cmp=cmp_func)
elif IsPython3():
sort_obj = sorted(iter_obj, key=functools.cmp_to_key(cmp_func))
return sort_obj
def py3_cmp(l,r):
if r < l:
return 1
if l < r:
return -1
return 0
if IsPython2():
py_cmp = cmp
elif IsPython3():
py_cmp = py3_cmp
def LoadDatabaseVersion(database_location):
with open(os.path.join(database_location,DATABASE_FILE)) as f:
return f.read()
def SaveDatabaseVersion(database_location,new_database_version):
with open(os.path.join(database_location,DATABASE_FILE),"w") as f:
f.write(new_database_version)
def NeedRenewDatabase(database_location,new_database_version):
if not os.path.exists(os.path.join(database_location,DATABASE_FILE)):
return True
old_database_version = LoadDatabaseVersion(database_location)
if 0 == CompareCommonVersion(new_database_version,old_database_version):
return False
return True | true | true |
f7264b00ab45f44826da46ff3c5c64fce9f84f82 | 219 | py | Python | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | 1 | 2022-01-12T11:20:22.000Z | 2022-01-12T11:20:22.000Z | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | null | null | null | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | 2 | 2021-05-07T08:01:13.000Z | 2021-08-14T22:24:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCustomerStatus(unittest.TestCase):
pass
| 19.909091 | 51 | 0.767123 |
from __future__ import unicode_literals
import unittest
class TestCustomerStatus(unittest.TestCase):
pass
| true | true |
f7264b4fcfd7aafc1c81e31c2b3afdfb0672a9ba | 1,144 | py | Python | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 17 | 2019-03-29T18:30:36.000Z | 2021-10-17T15:38:22.000Z | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 1 | 2019-04-22T22:40:30.000Z | 2019-04-24T21:45:07.000Z | code/nn.py | ctallec/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 5 | 2019-04-29T16:26:18.000Z | 2020-01-23T07:17:49.000Z | """Some nn utilities."""
import torch
from abstract import ParametricFunction
def copy_buffer(net: ParametricFunction, target_net: ParametricFunction):
"""Copy all buffers from net to target_net."""
with torch.no_grad():
for target_buf, buf in zip(target_net.buffers(), net.buffers()): # type: ignore
target_buf.copy_(buf)
def soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):
"""Soft update of the parameters of target_net with those of net.
Precisely
theta_targetnet <- tau * theta_targetnet + (1 - tau) * theta_net
"""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.add_(1 - tau, param - target_param)
def hard_update(net: ParametricFunction, target_net: ParametricFunction):
"""Hard update (i.e. copy) of the parameters of target_net with those of net."""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.copy_(param)
| 40.857143 | 87 | 0.701049 | import torch
from abstract import ParametricFunction
def copy_buffer(net: ParametricFunction, target_net: ParametricFunction):
with torch.no_grad():
for target_buf, buf in zip(target_net.buffers(), net.buffers()):
target_buf.copy_(buf)
def soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.add_(1 - tau, param - target_param)
def hard_update(net: ParametricFunction, target_net: ParametricFunction):
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.copy_(param)
| true | true |
f7264b724b3836bde921699ae915f09f3081112e | 598 | py | Python | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email']
class UpdateForm(forms.ModelForm):
class Meta:
model = Driver
fields = ['name', 'bio', 'avatar','contact_info','vehicle'] | 26 | 68 | 0.667224 | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email']
class UpdateForm(forms.ModelForm):
class Meta:
model = Driver
fields = ['name', 'bio', 'avatar','contact_info','vehicle'] | true | true |
f7264d89247663cd466f161ae8db1fc4b69e2f6a | 946 | py | Python | cms/admin/dialog/views.py | emiquelito/django-cms-2.0 | 721d6aa91925ff46aa0de9f8ea967ca93e73741b | [
"BSD-3-Clause"
] | 1 | 2015-09-28T10:08:14.000Z | 2015-09-28T10:08:14.000Z | cms/admin/dialog/views.py | gmurewa/django-cms-2.0 | 6fab9d93ddcea301a844996f5f0db7edc4883953 | [
"BSD-3-Clause"
] | 1 | 2019-11-08T02:38:49.000Z | 2019-11-08T02:38:49.000Z | cms/admin/dialog/views.py | gmurewa/django-cms-2.0 | 6fab9d93ddcea301a844996f5f0db7edc4883953 | [
"BSD-3-Clause"
] | null | null | null | from cms.admin.dialog.forms import get_copy_dialog_form
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse
from django.conf import settings
from cms.models import Page
@staff_member_required
def get_copy_dialog(request, page_id):
if not settings.CMS_PERMISSION or not settings.CMS_MODERATOR:
return HttpResponse('')
page = get_object_or_404(Page, pk=page_id)
target = get_object_or_404(Page, pk=request.REQUEST['target'])
if not page.has_change_permission(request) or \
not target.has_add_permission(request):
raise Http404
context = {
'dialog_id': 'dialog-copy',
'form': get_copy_dialog_form(request)(),
'callback': request.REQUEST['callback'],
}
return render_to_response("admin/cms/page/dialog/copy.html", context)
| 36.384615 | 73 | 0.738901 | from cms.admin.dialog.forms import get_copy_dialog_form
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse
from django.conf import settings
from cms.models import Page
@staff_member_required
def get_copy_dialog(request, page_id):
if not settings.CMS_PERMISSION or not settings.CMS_MODERATOR:
return HttpResponse('')
page = get_object_or_404(Page, pk=page_id)
target = get_object_or_404(Page, pk=request.REQUEST['target'])
if not page.has_change_permission(request) or \
not target.has_add_permission(request):
raise Http404
context = {
'dialog_id': 'dialog-copy',
'form': get_copy_dialog_form(request)(),
'callback': request.REQUEST['callback'],
}
return render_to_response("admin/cms/page/dialog/copy.html", context)
| true | true |
f7264ec3c56d15f017cede4fca8175d52c1cacc4 | 2,219 | py | Python | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | from unittest.mock import MagicMock
import pytest
from gb_chat.db.user_history_storage import UserHistoryStorage
from gb_chat.db.user_storage import (InvalidName, InvalidPassword, UserExists,
UserNotFound, UserStorage)
from conftest import VALID_PASSWORD, VALID_USERNAME
@pytest.fixture
def user_history_storage():
return MagicMock(spec_set=UserHistoryStorage)
@pytest.fixture
def sut(session, user_history_storage):
return UserStorage(session, user_history_storage)
@pytest.mark.parametrize("username", ["", " ", "user 1", "usr"])
def test_registers_user_raises_when_username_invalid(username, sut):
with pytest.raises(InvalidName):
sut.register_user(username, VALID_PASSWORD)
@pytest.mark.parametrize("password", ["", "qwerty", "password", "passw0rd", "Passw0rd"])
def test_registers_user_raises_when_password_invalid(password, sut):
with pytest.raises(InvalidPassword):
sut.register_user(VALID_USERNAME, password)
def test_registers_user_adds_register_record(sut, user_history_storage):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
user_history_storage.add_register_record.assert_called_once()
call = user_history_storage.add_register_record.mock_calls[0]
user = call.args[0]
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
@pytest.fixture
def sut_with_user(sut):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
return sut
def test_registers_user_raises_when_same_name(sut_with_user):
with pytest.raises(UserExists):
sut_with_user.register_user(VALID_USERNAME, "P@ssw0rd111")
@pytest.mark.parametrize(
"username,password", [(VALID_USERNAME, "pass"), ("user1", VALID_PASSWORD)]
)
def test_credentials_invalid(username, password, sut_with_user):
assert not sut_with_user.credentials_valid(username, password)
def test_get_user_raises_when_no_user_found(sut):
with pytest.raises(UserNotFound):
sut.get_user_by_name("aaaa")
def test_get_user_raises_when_no_user_found(sut_with_user):
user = sut_with_user.get_user_by_name(VALID_USERNAME)
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
| 32.15942 | 88 | 0.775124 | from unittest.mock import MagicMock
import pytest
from gb_chat.db.user_history_storage import UserHistoryStorage
from gb_chat.db.user_storage import (InvalidName, InvalidPassword, UserExists,
UserNotFound, UserStorage)
from conftest import VALID_PASSWORD, VALID_USERNAME
@pytest.fixture
def user_history_storage():
return MagicMock(spec_set=UserHistoryStorage)
@pytest.fixture
def sut(session, user_history_storage):
return UserStorage(session, user_history_storage)
@pytest.mark.parametrize("username", ["", " ", "user 1", "usr"])
def test_registers_user_raises_when_username_invalid(username, sut):
with pytest.raises(InvalidName):
sut.register_user(username, VALID_PASSWORD)
@pytest.mark.parametrize("password", ["", "qwerty", "password", "passw0rd", "Passw0rd"])
def test_registers_user_raises_when_password_invalid(password, sut):
with pytest.raises(InvalidPassword):
sut.register_user(VALID_USERNAME, password)
def test_registers_user_adds_register_record(sut, user_history_storage):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
user_history_storage.add_register_record.assert_called_once()
call = user_history_storage.add_register_record.mock_calls[0]
user = call.args[0]
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
@pytest.fixture
def sut_with_user(sut):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
return sut
def test_registers_user_raises_when_same_name(sut_with_user):
with pytest.raises(UserExists):
sut_with_user.register_user(VALID_USERNAME, "P@ssw0rd111")
@pytest.mark.parametrize(
"username,password", [(VALID_USERNAME, "pass"), ("user1", VALID_PASSWORD)]
)
def test_credentials_invalid(username, password, sut_with_user):
assert not sut_with_user.credentials_valid(username, password)
def test_get_user_raises_when_no_user_found(sut):
with pytest.raises(UserNotFound):
sut.get_user_by_name("aaaa")
def test_get_user_raises_when_no_user_found(sut_with_user):
user = sut_with_user.get_user_by_name(VALID_USERNAME)
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
| true | true |
f7264fe3b301f10852827f18dc032e373e7bf3a4 | 25,966 | py | Python | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 375 | 2015-01-16T14:04:50.000Z | 2022-03-16T02:19:43.000Z | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 589 | 2015-03-05T20:06:03.000Z | 2022-03-29T22:49:56.000Z | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 150 | 2015-02-10T12:19:40.000Z | 2022-03-25T05:06:50.000Z | import subprocess
import sys
import os
from io import StringIO, BytesIO
import dnaio
import pytest
from cutadapt.__main__ import main
from utils import assert_files_equal, datapath, cutpath
# pytest.mark.timeout will not fail even if pytest-timeout is not installed
try:
import pytest_timeout as _unused
except ImportError: # pragma: no cover
raise ImportError("pytest_timeout needs to be installed")
del _unused
def test_does_not_close_stdout():
main([datapath("small.fastq")])
assert not sys.stdout.closed
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_unknown_file_format(tmp_path):
path = tmp_path / "unknown_format.txt"
path.write_text("raw text")
with pytest.raises(SystemExit):
main([str(path)])
def test_cores_negative():
with pytest.raises(SystemExit) as e:
main(["--cores=-1", datapath("simple.fasta")])
assert e.value.args[0] == 2
# "cannot be negative"
def test_quiet_and_report():
with pytest.raises(SystemExit) as e:
main(["--quiet", "--report=minimal", datapath("simple.fasta")])
assert e.value.args[0] == 2
# "Options --quiet and --report cannot be used at the same time"
@pytest.mark.parametrize("args", [
("--discard-trimmed", "--discard-untrimmed"),
("--discard-trimmed", "--untrimmed-output", os.devnull),
("--discard-untrimmed", "--untrimmed-output", os.devnull),
])
def test_only_one_of_discard_trimmed_discard_untrimmed_untrimmed_output(args):
with pytest.raises(SystemExit) as e:
main(["-o", os.devnull, *args, datapath("small.fastq")])
assert e.value.args[0] == 2
def test_debug():
main(["--debug", "--", datapath("small.fastq")])
def test_debug_trace():
main(["--debug", "--debug", "-a", "ACGT", datapath("small.fastq")])
def test_example(run):
run('-N -b ADAPTER', 'example.fa', 'example.fa')
def test_compressed_fasta(run):
run("", "simple.fasta", "simple.fasta.gz")
def test_small(run):
run('-a TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
def test_empty(run, cores):
"""empty input"""
run("--cores {} -a TTAGACATATCTCCGTCG".format(cores), "empty.fastq", "empty.fastq")
def test_newlines(run):
"""DOS/Windows newlines"""
run('-e 0.12 -a TTAGACATATCTCCGTCG', 'dos.fastq', 'dos.fastq')
def test_lowercase(run):
"""lowercase adapter"""
run('-a ttagacatatctccgtcg', 'lowercase.fastq', 'small.fastq')
def test_rest(run, tmp_path, cores):
"""-r/--rest-file"""
rest = tmp_path / "rest.tmp"
run(['--cores', str(cores), '-b', 'ADAPTER', '-N', '-r', rest], "rest.fa", "rest.fa")
assert_files_equal(datapath('rest.txt'), rest)
def test_restfront(run, tmp_path):
path = tmp_path / "rest.txt"
run(['-g', 'ADAPTER', '-N', '-r', path], "restfront.fa", "rest.fa")
assert_files_equal(datapath('restfront.txt'), path)
def test_discard(run):
"""--discard"""
run("-b TTAGACATATCTCCGTCG --discard", "discard.fastq", "small.fastq")
def test_discard_untrimmed(run):
"""--discard-untrimmed"""
run('-b CAAGAT --discard-untrimmed', 'discard-untrimmed.fastq', 'small.fastq')
def test_extensiontxtgz(run):
"""automatic recognition of "_sequence.txt.gz" extension"""
run("-b TTAGACATATCTCCGTCG", "s_1_sequence.txt", "s_1_sequence.txt.gz")
def test_minimum_length(run):
"""-m/--minimum-length"""
stats = run("-m 5 -a TTAGACATATCTCCGTCG", "minlen.fa", "lengths.fa")
assert stats.written_bp[0] == 45
assert stats.written == 6
def test_too_short(run, tmp_path, cores):
too_short_path = tmp_path / 'tooshort.fa'
stats = run([
"--cores", str(cores),
"-m", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-short-output", too_short_path
], "minlen.fa", "lengths.fa")
assert_files_equal(datapath('tooshort.fa'), too_short_path)
assert stats.filtered["too_short"] == 5
@pytest.mark.parametrize("redirect", (False, True))
def test_too_short_statistics(redirect):
args = ["-a", "TTAGACATATCTCCGTCG", "-m", "24", "-o", os.devnull, datapath("small.fastq")]
if redirect:
args[:0] = ["--too-short-output", os.devnull]
stats = main(args)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 58
assert stats.filtered["too_short"] == 1
def test_maximum_length(run):
"""-M/--maximum-length"""
run("-M 5 -a TTAGACATATCTCCGTCG", "maxlen.fa", "lengths.fa")
def test_too_long(run, tmp_path, cores):
"""--too-long-output"""
too_long_path = tmp_path / 'toolong.fa'
stats = run([
"--cores", str(cores),
"-M", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-long-output", too_long_path
], "maxlen.fa", "lengths.fa")
assert_files_equal(datapath('toolong.fa'), too_long_path)
assert stats.filtered["too_long"] == 5
def test_length_tag(run):
"""454 data; -n and --length-tag"""
run("-n 3 -e 0.1 --length-tag length= "
"-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "
"-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
@pytest.mark.parametrize("length", list(range(3, 11)))
def test_overlap_a(tmp_path, length):
"""-O/--overlap with -a"""
adapter = "catatctccg"
record = ">read\nGAGACCATTCCAATG" + adapter[:length] + '\n'
input = tmp_path / "overlap.fasta"
input.write_text(record)
if length < 7:
expected = record
else:
expected = '>read\nGAGACCATTCCAATG\n'
output = tmp_path / "overlap-trimmed.fasta"
main(["-O", "7", "-e", "0", "-a", adapter, "-o", str(output), str(input)])
assert expected == output.read_text()
def test_overlap_b(run):
"""-O/--overlap with -b"""
run("-O 10 -b TTAGACATATCTCCGTCG", "overlapb.fa", "overlapb.fa")
def test_trim_n(run):
run("--trim-n", "trim-n.fasta", "trim-n.fasta")
def test_qualtrim(run):
"""-q with low qualities"""
run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
def test_qualbase(run):
"""-q with low qualities, using ascii(quality+64) encoding"""
run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
def test_quality_trim_only(run):
"""only trim qualities, do not remove adapters"""
run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
def test_twoadapters(run):
"""two adapters"""
run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta", "twoadapters.fasta")
def test_polya(run):
"""poly-A tails"""
run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta", "polya.fasta")
def test_polya_brace_notation(run):
"""poly-A tails"""
run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
# the same as --action=none
def test_no_trim(run):
run("--no-trim --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_action_none(run):
run("--action=none --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
# the same as --action=mask
def test_mask_adapter(run):
"""mask adapter with N (reads maintain the same length)"""
run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_mask(run):
"""mask adapter with N (reads maintain the same length)"""
run("-b CAAG -n 3 --action=mask", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_lowercase(run):
run("-b CAAG -n 3 --action=lowercase", "action_lowercase.fasta", "action_lowercase.fasta")
def test_action_retain(run):
run("-g GGTTAACC -a CAAG --action=retain", "action_retain.fasta", "action_retain.fasta")
def test_action_retain_times():
with pytest.raises(SystemExit):
main(["-a", "ACGT", "--times=2", "--action=retain", datapath("small.fastq")])
def test_gz_multiblock(run):
"""compressed gz file with multiple blocks (created by concatenating two .gz files)"""
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
def test_read_wildcard(run):
"""test wildcards in reads"""
run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
@pytest.mark.parametrize("adapter_type,expected", [
("-a", "wildcard_adapter.fa"),
("-b", "wildcard_adapter_anywhere.fa"),
])
def test_adapter_wildcard(adapter_type, expected, run, tmp_path, cores):
"""wildcards in adapter"""
wildcard_path = tmp_path / "wildcards.txt"
run([
"--cores", str(cores),
"--wildcard-file", wildcard_path,
adapter_type, "ACGTNNNACGT"
], expected, "wildcard_adapter.fa")
with open(wildcard_path) as wct:
lines = wct.readlines()
lines = [line.strip() for line in lines]
assert lines == ["AAA 1", "GGG 2", "CCC 3b", "TTT 4b"]
def test_wildcard_N(run):
"""test 'N' wildcard matching with no allowed errors"""
run("-e 0 -a GGGGGGG --match-read-wildcards", "wildcardN.fa", "wildcardN.fa")
def test_illumina_adapter_wildcard(run):
run("-a VCCGAMCYUCKHRKDCUBBCNUWNSGHCGU", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_front(run):
"""test adapter in front"""
run("--front ADAPTER -N", "examplefront.fa", "example.fa")
def test_literal_N(run):
"""test matching literal 'N's"""
run("-N -e 0.2 -a NNNNNNNNNNNNNN", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2(run):
run("-N -O 1 -g NNNNNNNNNNNNNN", "trimN5.fasta", "trimN5.fasta")
def test_literal_N_brace_notation(run):
"""test matching literal 'N's"""
run("-N -e 0.2 -a N{14}", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2_brace_notation(run):
run("-N -O 1 -g N{14}", "trimN5.fasta", "trimN5.fasta")
def test_anchored_front(run):
run("-g ^FRONTADAPT -N", "anchored.fasta", "anchored.fasta")
def test_anchored_front_ellipsis_notation(run):
run("-a ^FRONTADAPT... -N", "anchored.fasta", "anchored.fasta")
def test_anchored_back(run):
run("-a BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_ellipsis_notation(run):
run("-a ...BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_no_indels(run):
run("-a BACKADAPTER$ -N --no-indels", "anchored-back.fasta", "anchored-back.fasta")
def test_no_indels(run):
run('-a TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_ellipsis_notation(run):
run('-a ...TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_issue_46(run, tmp_path):
"""issue 46 - IndexError with --wildcard-file"""
run("--anywhere=AACGTN --wildcard-file={}".format(
tmp_path / "wildcards.txt"), "issue46.fasta", "issue46.fasta")
def test_strip_suffix(run):
run("--strip-suffix _sequence -a XXXXXXX", "stripped.fasta", "simple.fasta")
def test_info_file(run, tmp_path, cores):
# The true adapter sequence in the illumina.fastq.gz data set is
# GCCTAACTTCTTAGACTGCCTTAAGGACGT (fourth base is different from the sequence shown here)
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "adapt=GCCGAACTTCTTAGACTGCCTTAAGGACGT"],
"illumina.fastq", "illumina.fastq.gz")
assert_files_equal(cutpath("illumina.info.txt"), info_path, ignore_trailing_space=True)
def test_info_file_times(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "--times", "2", "-a", "adapt=GCCGAACTTCTTA",
"-a", "adapt2=GACTGCCTTAAGGACGT"], "illumina5.fastq", "illumina5.fastq")
assert_files_equal(cutpath('illumina5.info.txt'), info_path, ignore_trailing_space=True)
def test_info_file_fasta(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
# Just make sure that it runs
run(["--cores", str(cores), "--info-file", info_path, "-a", "TTAGACATAT", "-g", "GAGATTGCCA", "--no-indels"],
"no_indels.fasta", "no_indels.fasta")
def test_info_file_revcomp(run, tmp_path):
info_path = tmp_path / "info-rc.txt"
main([
"--info-file", str(info_path),
"-a", "adapt=GAGTCG",
"--revcomp",
"--rename={header}",
"-o", str(tmp_path / "out.fasta"),
datapath("info-rc.fasta")
])
assert_files_equal(cutpath("info-rc.txt"), info_path)
def test_named_adapter(run):
run("-a MY_ADAPTER=GCCGAACTTCTTAGACTGCCTTAAGGACGT", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_with_u(run):
run("-a GCCGAACUUCUUAGACUGCCUUAAGGACGU", "illumina.fastq", "illumina.fastq.gz")
def test_bzip2_input(run, cores):
run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG"], "small.fastq", "small.fastq.bz2")
@pytest.mark.parametrize("extension", ["bz2", "xz", "gz"])
def test_compressed_output(tmp_path, cores, extension):
out_path = str(tmp_path / ("small.fastq." + extension))
params = [
"--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "-o", out_path, datapath("small.fastq")]
main(params)
def test_bzip2_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.bz2")
def test_xz(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.xz')
def test_no_args():
with pytest.raises(SystemExit):
main([])
def test_two_fastqs():
with pytest.raises(SystemExit):
main([datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_anchored_no_indels(run):
"""anchored 5' adapter, mismatches only (no indels)"""
run('-g ^TTAGACATAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_read(run):
"""anchored 5' adapter, mismatches only (no indels), but wildcards in the read count as matches"""
run('-g ^TTAGACATAT --match-read-wildcards --no-indels -e 0.1',
'anchored_no_indels_wildcard.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_adapt(run):
"""anchored 5' adapter, mismatches only (no indels), but wildcards in the adapter count as matches"""
run('-g ^TTAGACANAT --no-indels -e 0.12', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_non_iupac_characters(run):
with pytest.raises(SystemExit):
main(['-a', 'ZACGT', datapath('small.fastq')])
def test_unconditional_cut_front(run):
run('-u 5', 'unconditional-front.fastq', 'small.fastq')
def test_unconditional_cut_back(run):
run('-u -5', 'unconditional-back.fastq', 'small.fastq')
def test_unconditional_cut_both(run):
run('-u -5 -u 5', 'unconditional-both.fastq', 'small.fastq')
def test_unconditional_cut_too_many_commas():
with pytest.raises(SystemExit):
main(["-u", "5,7,8", datapath("small.fastq")])
def test_unconditional_cut_invalid_number():
with pytest.raises(SystemExit):
main(["-u", "a,b", datapath("small.fastq")])
def test_untrimmed_output(run, cores, tmp_path):
path = tmp_path / "untrimmed.fastq"
stats = run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "--untrimmed-output", path],
"small.trimmed.fastq", "small.fastq")
assert_files_equal(cutpath("small.untrimmed.fastq"), path)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 46
def test_adapter_file(run):
run('-a file:' + datapath('adapter.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
def test_adapter_file_5p_anchored(run):
run('-N -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored(run):
run('-N -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_5p_anchored_no_indels(run):
run('-N --no-indels -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored_no_indels(run):
run('-N --no-indels -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_empty_name(run):
run('-N -a file:' + datapath('adapter-empty-name.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
@pytest.mark.parametrize("ext", ["", ".gz"])
def test_demultiplex(cores, tmp_path, ext):
multiout = str(tmp_path / 'tmp-demulti.{name}.fasta') + ext
params = [
'--cores', str(cores),
'-a', 'first=AATTTCAGGAATT',
'-a', 'second=GTTCTCTAGTTCT',
'-o', multiout,
datapath('twoadapters.fasta'),
]
main(params)
for name in ("first", "second", "unknown"):
actual = multiout.format(name=name)
if ext == ".gz":
subprocess.run(["gzip", "-d", actual], check=True)
actual = actual[:-3]
expected = cutpath("twoadapters.{name}.fasta".format(name=name))
assert_files_equal(expected, actual)
def test_multiple_fake_anchored_adapters(run):
run("-g ^CGTCCGAAGTAGC -g ^ATTGCCCTAG "
"-a TTCCATGCAGCATT$ -a CCAGTCCCCCC$ "
"-a GCCGAACTTCTTAGACTGCCTTAAGGACGT",
"illumina.fastq",
"illumina.fastq.gz")
def test_multiple_prefix_adapters(run):
run("-g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_prefix_adapters_noindels(run):
run("--no-indels -g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_suffix_adapters_noindels(run):
run("--no-indels -a CGTGATTATCTTGC$ -a CCTATTAGTGGTTGAAC$", "multisuffix.fasta", "multi.fasta")
def test_max_n(run):
assert run('--max-n 0', 'maxn0.fasta', 'maxn.fasta').filtered["too_many_n"] == 4
assert run('--max-n 1', 'maxn1.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
assert run('--max-n 2', 'maxn2.fasta', 'maxn.fasta').filtered["too_many_n"] == 1
assert run('--max-n 0.2', 'maxn0.2.fasta', 'maxn.fasta').filtered["too_many_n"] == 3
assert run('--max-n 0.4', 'maxn0.4.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
def test_quiet_is_quiet():
captured_standard_output = StringIO()
captured_standard_error = StringIO()
setattr(captured_standard_output, "buffer", BytesIO())
setattr(captured_standard_error, "buffer", BytesIO())
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = captured_standard_output
sys.stderr = captured_standard_error
main(['-o', os.devnull, '--quiet', datapath('small.fastq')])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
assert captured_standard_output.getvalue() == ''
assert captured_standard_error.getvalue() == ''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
def test_x_brace_notation():
main(['-o', os.devnull, '--quiet', '-a', 'X{5}', datapath('small.fastq')])
def test_nextseq(run):
run('--nextseq-trim 22', 'nextseq.fastq', 'nextseq.fastq')
def test_linked_explicitly_anchored(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT', 'linked.fasta', 'linked.fasta')
def test_linked_multiple(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT -a ^AAAAAAAAAA...GCGCGCGCGC', 'linked.fasta', 'linked.fasta')
def test_linked_both_anchored(run):
run('-a ^AAAAAAAAAA...TTTTT$', 'linked-anchored.fasta', 'linked.fasta')
def test_linked_5p_not_anchored(run):
run('-g AAAAAAAAAA...TTTTTTTTTT', 'linked-not-anchored.fasta', 'linked.fasta')
def test_linked_discard_untrimmed(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard.fasta', 'linked.fasta')
def test_linked_discard_untrimmed_g(run):
run('-g AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard-g.fasta', 'linked.fasta')
def test_linked_lowercase(run):
run('-a ^AACCGGTTTT...GGGGGGG$ -a ^AAAA...TTTT$ --times=2 --action=lowercase',
'linked-lowercase.fasta', 'linked.fasta')
def test_linked_info_file(tmp_path):
info_path = tmp_path / 'info.txt'
main(['-a linkedadapter=^AAAAAAAAAA...TTTTTTTTTT', '--info-file', str(info_path),
'-o', str(tmp_path / 'out.fasta'), datapath('linked.fasta')])
assert_files_equal(cutpath('linked-info.txt'), info_path, ignore_trailing_space=True)
def test_linked_anywhere():
with pytest.raises(SystemExit):
main(['-b', 'AAA...TTT', datapath('linked.fasta')])
def test_anywhere_anchored_5p():
with pytest.raises(SystemExit):
main(['-b', '^AAA', datapath('small.fastq')])
def test_anywhere_anchored_3p():
with pytest.raises(SystemExit):
main(['-b', 'TTT$', datapath('small.fastq')])
def test_fasta(run):
run('-a TTAGACATATCTCCGTCG', 'small.fasta', 'small.fastq')
def test_fasta_no_trim(run):
run([], 'small-no-trim.fasta', 'small.fastq')
def test_length(run):
run('--length 5', 'shortened.fastq', 'small.fastq')
def test_negative_length(run):
run('--length -5', 'shortened-negative.fastq', 'small.fastq')
@pytest.mark.timeout(0.5)
def test_issue_296(tmp_path):
# Hang when using both --no-trim and --info-file together
info_path = tmp_path / 'info.txt'
reads_path = tmp_path / 'reads.fasta'
out_path = tmp_path / 'out.fasta'
reads_path.write_text(">read\nCACAAA\n")
main([
"--info-file", str(info_path),
"--no-trim",
"-g", "TTTCAC",
"-o", str(out_path),
str(reads_path),
])
# Output should be unchanged because of --no-trim
assert_files_equal(reads_path, out_path)
def test_xadapter(run):
run('-g XTCCGAATAGA', 'xadapter.fasta', 'xadapterx.fasta')
def test_adapterx(run):
run('-a TCCGAATAGAX', 'adapterx.fasta', 'xadapterx.fasta')
def test_discard_casava(run):
stats = run('--discard-casava', 'casava.fastq', 'casava.fastq')
assert stats.filtered["casava_filtered"] == 1
def test_underscore(run):
"""File name ending in _fastq.gz (issue #275)"""
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_cores_autodetect(run):
# Just make sure that it runs; functionality is not tested
run('--cores 0 -b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_write_compressed_fastq(cores, tmp_path):
main(['--cores', str(cores), '-o', str(tmp_path / 'out.fastq.gz'), datapath('small.fastq')])
def test_minimal_report(run):
run('-b TTAGACATATCTCCGTCG --report=minimal', 'small.fastq', 'small.fastq')
def test_paired_separate(run):
"""test separate trimming of paired-end reads"""
run("-a TTAGACATAT", "paired-separate.1.fastq", "paired.1.fastq")
run("-a CAGTGGAGTA", "paired-separate.2.fastq", "paired.2.fastq")
def test_empty_read_with_wildcard_in_adapter(run):
run("-g CWC", "empty.fastq", "empty.fastq")
def test_print_progress_to_tty(tmp_path, mocker):
mocker.patch("cutadapt.utils.sys.stderr").isatty.return_value = True
main(["-o", str(tmp_path / "out.fastq"), datapath("small.fastq")])
def test_adapter_order(run):
run("-g ^AAACC -a CCGGG", "adapterorder-ga.fasta", "adapterorder.fasta")
run("-a CCGGG -g ^AAACC", "adapterorder-ag.fasta", "adapterorder.fasta")
def test_reverse_complement_no_rc_suffix(run, tmp_path):
out_path = tmp_path / "out.fastq"
main([
"-o", str(out_path),
"--revcomp",
"--no-index",
"--rename", "{header}",
"-g", "^TTATTTGTCT",
"-g", "^TCCGCACTGG",
datapath("revcomp.1.fastq")
])
with dnaio.open(out_path) as f:
reads = list(f)
assert len(reads) == 6
assert reads[1].name == "read2/1"
assert reads[1].sequence == "ACCATCCGATATGTCTAATGTGGCCTGTTG"
def test_reverse_complement_normalized(run):
stats = run(
"--revcomp --no-index -g ^TTATTTGTCT -g ^TCCGCACTGG",
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
assert stats.n == 6
assert stats.reverse_complemented == 2
def test_reverse_complement_and_info_file(run, tmp_path, cores):
info_path = str(tmp_path / "info.txt")
run(
[
"--revcomp",
"--no-index",
"-g",
"^TTATTTGTCT",
"-g",
"^TCCGCACTGG",
"--info-file",
info_path,
],
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
with open(info_path) as f:
lines = f.readlines()
assert len(lines) == 6
assert lines[0].split("\t")[0] == "read1/1"
assert lines[1].split("\t")[0] == "read2/1 rc"
def test_max_expected_errors(run, cores):
stats = run("--max-ee=0.9", "maxee.fastq", "maxee.fastq")
assert stats.filtered["too_many_expected_errors"] == 2
def test_max_expected_errors_fasta(tmp_path):
path = tmp_path / "input.fasta"
path.write_text(">read\nACGTACGT\n")
main(["--max-ee=0.001", "-o", os.devnull, str(path)])
def test_warn_if_en_dashes_used():
with pytest.raises(SystemExit):
main(["–q", "25", "-o", os.devnull, "in.fastq"])
@pytest.mark.parametrize("opt", ["-y", "--suffix"])
def test_suffix(opt, run):
"""-y/--suffix parameter"""
run([opt, ' {name}', '-e', '0', '-a', 'OnlyT=TTTTTTTT', '-a', 'OnlyG=GGGGGGGG'], "suffix.fastq", "suffix.fastq")
@pytest.mark.parametrize("opt", ["--prefix", "--suffix"])
def test_rename_cannot_be_combined_with_other_renaming_options(opt):
with pytest.raises(SystemExit):
main([opt, "something", "--rename='{id} {comment} extrainfo'", "-o", os.devnull, datapath("empty.fastq")])
def test_rename(run):
run([
"--rename={id}_{cut_suffix} {header} {adapter_name}",
"--cut=-4",
"-a", "OnlyT=TTTTTT",
"-a", "OnlyG=GGGGGG",
], "rename.fastq", "suffix.fastq")
@pytest.mark.skip("This has not been fixed")
def test_terminates_correctly_on_error_in_subprocess(tmp_path):
params = [
"-j", "2",
"-o", str(tmp_path / "out.fastq.gz"),
datapath("format-error.fastq"),
]
with pytest.raises(SystemExit):
main(params)
| 31.359903 | 116 | 0.655973 | import subprocess
import sys
import os
from io import StringIO, BytesIO
import dnaio
import pytest
from cutadapt.__main__ import main
from utils import assert_files_equal, datapath, cutpath
try:
import pytest_timeout as _unused
except ImportError:
raise ImportError("pytest_timeout needs to be installed")
del _unused
def test_does_not_close_stdout():
main([datapath("small.fastq")])
assert not sys.stdout.closed
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_unknown_file_format(tmp_path):
path = tmp_path / "unknown_format.txt"
path.write_text("raw text")
with pytest.raises(SystemExit):
main([str(path)])
def test_cores_negative():
with pytest.raises(SystemExit) as e:
main(["--cores=-1", datapath("simple.fasta")])
assert e.value.args[0] == 2
def test_quiet_and_report():
with pytest.raises(SystemExit) as e:
main(["--quiet", "--report=minimal", datapath("simple.fasta")])
assert e.value.args[0] == 2
@pytest.mark.parametrize("args", [
("--discard-trimmed", "--discard-untrimmed"),
("--discard-trimmed", "--untrimmed-output", os.devnull),
("--discard-untrimmed", "--untrimmed-output", os.devnull),
])
def test_only_one_of_discard_trimmed_discard_untrimmed_untrimmed_output(args):
with pytest.raises(SystemExit) as e:
main(["-o", os.devnull, *args, datapath("small.fastq")])
assert e.value.args[0] == 2
def test_debug():
main(["--debug", "--", datapath("small.fastq")])
def test_debug_trace():
main(["--debug", "--debug", "-a", "ACGT", datapath("small.fastq")])
def test_example(run):
run('-N -b ADAPTER', 'example.fa', 'example.fa')
def test_compressed_fasta(run):
run("", "simple.fasta", "simple.fasta.gz")
def test_small(run):
run('-a TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
def test_empty(run, cores):
run("--cores {} -a TTAGACATATCTCCGTCG".format(cores), "empty.fastq", "empty.fastq")
def test_newlines(run):
run('-e 0.12 -a TTAGACATATCTCCGTCG', 'dos.fastq', 'dos.fastq')
def test_lowercase(run):
run('-a ttagacatatctccgtcg', 'lowercase.fastq', 'small.fastq')
def test_rest(run, tmp_path, cores):
rest = tmp_path / "rest.tmp"
run(['--cores', str(cores), '-b', 'ADAPTER', '-N', '-r', rest], "rest.fa", "rest.fa")
assert_files_equal(datapath('rest.txt'), rest)
def test_restfront(run, tmp_path):
path = tmp_path / "rest.txt"
run(['-g', 'ADAPTER', '-N', '-r', path], "restfront.fa", "rest.fa")
assert_files_equal(datapath('restfront.txt'), path)
def test_discard(run):
run("-b TTAGACATATCTCCGTCG --discard", "discard.fastq", "small.fastq")
def test_discard_untrimmed(run):
run('-b CAAGAT --discard-untrimmed', 'discard-untrimmed.fastq', 'small.fastq')
def test_extensiontxtgz(run):
run("-b TTAGACATATCTCCGTCG", "s_1_sequence.txt", "s_1_sequence.txt.gz")
def test_minimum_length(run):
stats = run("-m 5 -a TTAGACATATCTCCGTCG", "minlen.fa", "lengths.fa")
assert stats.written_bp[0] == 45
assert stats.written == 6
def test_too_short(run, tmp_path, cores):
too_short_path = tmp_path / 'tooshort.fa'
stats = run([
"--cores", str(cores),
"-m", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-short-output", too_short_path
], "minlen.fa", "lengths.fa")
assert_files_equal(datapath('tooshort.fa'), too_short_path)
assert stats.filtered["too_short"] == 5
@pytest.mark.parametrize("redirect", (False, True))
def test_too_short_statistics(redirect):
args = ["-a", "TTAGACATATCTCCGTCG", "-m", "24", "-o", os.devnull, datapath("small.fastq")]
if redirect:
args[:0] = ["--too-short-output", os.devnull]
stats = main(args)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 58
assert stats.filtered["too_short"] == 1
def test_maximum_length(run):
run("-M 5 -a TTAGACATATCTCCGTCG", "maxlen.fa", "lengths.fa")
def test_too_long(run, tmp_path, cores):
too_long_path = tmp_path / 'toolong.fa'
stats = run([
"--cores", str(cores),
"-M", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-long-output", too_long_path
], "maxlen.fa", "lengths.fa")
assert_files_equal(datapath('toolong.fa'), too_long_path)
assert stats.filtered["too_long"] == 5
def test_length_tag(run):
run("-n 3 -e 0.1 --length-tag length= "
"-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "
"-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
@pytest.mark.parametrize("length", list(range(3, 11)))
def test_overlap_a(tmp_path, length):
adapter = "catatctccg"
record = ">read\nGAGACCATTCCAATG" + adapter[:length] + '\n'
input = tmp_path / "overlap.fasta"
input.write_text(record)
if length < 7:
expected = record
else:
expected = '>read\nGAGACCATTCCAATG\n'
output = tmp_path / "overlap-trimmed.fasta"
main(["-O", "7", "-e", "0", "-a", adapter, "-o", str(output), str(input)])
assert expected == output.read_text()
def test_overlap_b(run):
run("-O 10 -b TTAGACATATCTCCGTCG", "overlapb.fa", "overlapb.fa")
def test_trim_n(run):
run("--trim-n", "trim-n.fasta", "trim-n.fasta")
def test_qualtrim(run):
run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
def test_qualbase(run):
run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
def test_quality_trim_only(run):
run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
def test_twoadapters(run):
run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta", "twoadapters.fasta")
def test_polya(run):
run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta", "polya.fasta")
def test_polya_brace_notation(run):
run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
def test_no_trim(run):
run("--no-trim --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_action_none(run):
run("--action=none --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_mask_adapter(run):
run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_mask(run):
run("-b CAAG -n 3 --action=mask", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_lowercase(run):
run("-b CAAG -n 3 --action=lowercase", "action_lowercase.fasta", "action_lowercase.fasta")
def test_action_retain(run):
run("-g GGTTAACC -a CAAG --action=retain", "action_retain.fasta", "action_retain.fasta")
def test_action_retain_times():
with pytest.raises(SystemExit):
main(["-a", "ACGT", "--times=2", "--action=retain", datapath("small.fastq")])
def test_gz_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
def test_read_wildcard(run):
run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
@pytest.mark.parametrize("adapter_type,expected", [
("-a", "wildcard_adapter.fa"),
("-b", "wildcard_adapter_anywhere.fa"),
])
def test_adapter_wildcard(adapter_type, expected, run, tmp_path, cores):
wildcard_path = tmp_path / "wildcards.txt"
run([
"--cores", str(cores),
"--wildcard-file", wildcard_path,
adapter_type, "ACGTNNNACGT"
], expected, "wildcard_adapter.fa")
with open(wildcard_path) as wct:
lines = wct.readlines()
lines = [line.strip() for line in lines]
assert lines == ["AAA 1", "GGG 2", "CCC 3b", "TTT 4b"]
def test_wildcard_N(run):
run("-e 0 -a GGGGGGG --match-read-wildcards", "wildcardN.fa", "wildcardN.fa")
def test_illumina_adapter_wildcard(run):
run("-a VCCGAMCYUCKHRKDCUBBCNUWNSGHCGU", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_front(run):
run("--front ADAPTER -N", "examplefront.fa", "example.fa")
def test_literal_N(run):
run("-N -e 0.2 -a NNNNNNNNNNNNNN", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2(run):
run("-N -O 1 -g NNNNNNNNNNNNNN", "trimN5.fasta", "trimN5.fasta")
def test_literal_N_brace_notation(run):
run("-N -e 0.2 -a N{14}", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2_brace_notation(run):
run("-N -O 1 -g N{14}", "trimN5.fasta", "trimN5.fasta")
def test_anchored_front(run):
run("-g ^FRONTADAPT -N", "anchored.fasta", "anchored.fasta")
def test_anchored_front_ellipsis_notation(run):
run("-a ^FRONTADAPT... -N", "anchored.fasta", "anchored.fasta")
def test_anchored_back(run):
run("-a BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_ellipsis_notation(run):
run("-a ...BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_no_indels(run):
run("-a BACKADAPTER$ -N --no-indels", "anchored-back.fasta", "anchored-back.fasta")
def test_no_indels(run):
run('-a TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_ellipsis_notation(run):
run('-a ...TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_issue_46(run, tmp_path):
run("--anywhere=AACGTN --wildcard-file={}".format(
tmp_path / "wildcards.txt"), "issue46.fasta", "issue46.fasta")
def test_strip_suffix(run):
run("--strip-suffix _sequence -a XXXXXXX", "stripped.fasta", "simple.fasta")
def test_info_file(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "adapt=GCCGAACTTCTTAGACTGCCTTAAGGACGT"],
"illumina.fastq", "illumina.fastq.gz")
assert_files_equal(cutpath("illumina.info.txt"), info_path, ignore_trailing_space=True)
def test_info_file_times(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "--times", "2", "-a", "adapt=GCCGAACTTCTTA",
"-a", "adapt2=GACTGCCTTAAGGACGT"], "illumina5.fastq", "illumina5.fastq")
assert_files_equal(cutpath('illumina5.info.txt'), info_path, ignore_trailing_space=True)
def test_info_file_fasta(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "TTAGACATAT", "-g", "GAGATTGCCA", "--no-indels"],
"no_indels.fasta", "no_indels.fasta")
def test_info_file_revcomp(run, tmp_path):
info_path = tmp_path / "info-rc.txt"
main([
"--info-file", str(info_path),
"-a", "adapt=GAGTCG",
"--revcomp",
"--rename={header}",
"-o", str(tmp_path / "out.fasta"),
datapath("info-rc.fasta")
])
assert_files_equal(cutpath("info-rc.txt"), info_path)
def test_named_adapter(run):
run("-a MY_ADAPTER=GCCGAACTTCTTAGACTGCCTTAAGGACGT", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_with_u(run):
run("-a GCCGAACUUCUUAGACUGCCUUAAGGACGU", "illumina.fastq", "illumina.fastq.gz")
def test_bzip2_input(run, cores):
run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG"], "small.fastq", "small.fastq.bz2")
@pytest.mark.parametrize("extension", ["bz2", "xz", "gz"])
def test_compressed_output(tmp_path, cores, extension):
out_path = str(tmp_path / ("small.fastq." + extension))
params = [
"--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "-o", out_path, datapath("small.fastq")]
main(params)
def test_bzip2_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.bz2")
def test_xz(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.xz')
def test_no_args():
with pytest.raises(SystemExit):
main([])
def test_two_fastqs():
with pytest.raises(SystemExit):
main([datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_anchored_no_indels(run):
run('-g ^TTAGACATAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_read(run):
run('-g ^TTAGACATAT --match-read-wildcards --no-indels -e 0.1',
'anchored_no_indels_wildcard.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_adapt(run):
run('-g ^TTAGACANAT --no-indels -e 0.12', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_non_iupac_characters(run):
with pytest.raises(SystemExit):
main(['-a', 'ZACGT', datapath('small.fastq')])
def test_unconditional_cut_front(run):
run('-u 5', 'unconditional-front.fastq', 'small.fastq')
def test_unconditional_cut_back(run):
run('-u -5', 'unconditional-back.fastq', 'small.fastq')
def test_unconditional_cut_both(run):
run('-u -5 -u 5', 'unconditional-both.fastq', 'small.fastq')
def test_unconditional_cut_too_many_commas():
with pytest.raises(SystemExit):
main(["-u", "5,7,8", datapath("small.fastq")])
def test_unconditional_cut_invalid_number():
with pytest.raises(SystemExit):
main(["-u", "a,b", datapath("small.fastq")])
def test_untrimmed_output(run, cores, tmp_path):
path = tmp_path / "untrimmed.fastq"
stats = run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "--untrimmed-output", path],
"small.trimmed.fastq", "small.fastq")
assert_files_equal(cutpath("small.untrimmed.fastq"), path)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 46
def test_adapter_file(run):
run('-a file:' + datapath('adapter.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
def test_adapter_file_5p_anchored(run):
run('-N -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored(run):
run('-N -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_5p_anchored_no_indels(run):
run('-N --no-indels -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored_no_indels(run):
run('-N --no-indels -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_empty_name(run):
run('-N -a file:' + datapath('adapter-empty-name.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
@pytest.mark.parametrize("ext", ["", ".gz"])
def test_demultiplex(cores, tmp_path, ext):
multiout = str(tmp_path / 'tmp-demulti.{name}.fasta') + ext
params = [
'--cores', str(cores),
'-a', 'first=AATTTCAGGAATT',
'-a', 'second=GTTCTCTAGTTCT',
'-o', multiout,
datapath('twoadapters.fasta'),
]
main(params)
for name in ("first", "second", "unknown"):
actual = multiout.format(name=name)
if ext == ".gz":
subprocess.run(["gzip", "-d", actual], check=True)
actual = actual[:-3]
expected = cutpath("twoadapters.{name}.fasta".format(name=name))
assert_files_equal(expected, actual)
def test_multiple_fake_anchored_adapters(run):
run("-g ^CGTCCGAAGTAGC -g ^ATTGCCCTAG "
"-a TTCCATGCAGCATT$ -a CCAGTCCCCCC$ "
"-a GCCGAACTTCTTAGACTGCCTTAAGGACGT",
"illumina.fastq",
"illumina.fastq.gz")
def test_multiple_prefix_adapters(run):
run("-g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_prefix_adapters_noindels(run):
run("--no-indels -g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_suffix_adapters_noindels(run):
run("--no-indels -a CGTGATTATCTTGC$ -a CCTATTAGTGGTTGAAC$", "multisuffix.fasta", "multi.fasta")
def test_max_n(run):
assert run('--max-n 0', 'maxn0.fasta', 'maxn.fasta').filtered["too_many_n"] == 4
assert run('--max-n 1', 'maxn1.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
assert run('--max-n 2', 'maxn2.fasta', 'maxn.fasta').filtered["too_many_n"] == 1
assert run('--max-n 0.2', 'maxn0.2.fasta', 'maxn.fasta').filtered["too_many_n"] == 3
assert run('--max-n 0.4', 'maxn0.4.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
def test_quiet_is_quiet():
captured_standard_output = StringIO()
captured_standard_error = StringIO()
setattr(captured_standard_output, "buffer", BytesIO())
setattr(captured_standard_error, "buffer", BytesIO())
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = captured_standard_output
sys.stderr = captured_standard_error
main(['-o', os.devnull, '--quiet', datapath('small.fastq')])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
assert captured_standard_output.getvalue() == ''
assert captured_standard_error.getvalue() == ''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
def test_x_brace_notation():
main(['-o', os.devnull, '--quiet', '-a', 'X{5}', datapath('small.fastq')])
def test_nextseq(run):
run('--nextseq-trim 22', 'nextseq.fastq', 'nextseq.fastq')
def test_linked_explicitly_anchored(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT', 'linked.fasta', 'linked.fasta')
def test_linked_multiple(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT -a ^AAAAAAAAAA...GCGCGCGCGC', 'linked.fasta', 'linked.fasta')
def test_linked_both_anchored(run):
run('-a ^AAAAAAAAAA...TTTTT$', 'linked-anchored.fasta', 'linked.fasta')
def test_linked_5p_not_anchored(run):
run('-g AAAAAAAAAA...TTTTTTTTTT', 'linked-not-anchored.fasta', 'linked.fasta')
def test_linked_discard_untrimmed(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard.fasta', 'linked.fasta')
def test_linked_discard_untrimmed_g(run):
run('-g AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard-g.fasta', 'linked.fasta')
def test_linked_lowercase(run):
run('-a ^AACCGGTTTT...GGGGGGG$ -a ^AAAA...TTTT$ --times=2 --action=lowercase',
'linked-lowercase.fasta', 'linked.fasta')
def test_linked_info_file(tmp_path):
info_path = tmp_path / 'info.txt'
main(['-a linkedadapter=^AAAAAAAAAA...TTTTTTTTTT', '--info-file', str(info_path),
'-o', str(tmp_path / 'out.fasta'), datapath('linked.fasta')])
assert_files_equal(cutpath('linked-info.txt'), info_path, ignore_trailing_space=True)
def test_linked_anywhere():
with pytest.raises(SystemExit):
main(['-b', 'AAA...TTT', datapath('linked.fasta')])
def test_anywhere_anchored_5p():
with pytest.raises(SystemExit):
main(['-b', '^AAA', datapath('small.fastq')])
def test_anywhere_anchored_3p():
with pytest.raises(SystemExit):
main(['-b', 'TTT$', datapath('small.fastq')])
def test_fasta(run):
run('-a TTAGACATATCTCCGTCG', 'small.fasta', 'small.fastq')
def test_fasta_no_trim(run):
run([], 'small-no-trim.fasta', 'small.fastq')
def test_length(run):
run('--length 5', 'shortened.fastq', 'small.fastq')
def test_negative_length(run):
run('--length -5', 'shortened-negative.fastq', 'small.fastq')
@pytest.mark.timeout(0.5)
def test_issue_296(tmp_path):
info_path = tmp_path / 'info.txt'
reads_path = tmp_path / 'reads.fasta'
out_path = tmp_path / 'out.fasta'
reads_path.write_text(">read\nCACAAA\n")
main([
"--info-file", str(info_path),
"--no-trim",
"-g", "TTTCAC",
"-o", str(out_path),
str(reads_path),
])
assert_files_equal(reads_path, out_path)
def test_xadapter(run):
run('-g XTCCGAATAGA', 'xadapter.fasta', 'xadapterx.fasta')
def test_adapterx(run):
run('-a TCCGAATAGAX', 'adapterx.fasta', 'xadapterx.fasta')
def test_discard_casava(run):
stats = run('--discard-casava', 'casava.fastq', 'casava.fastq')
assert stats.filtered["casava_filtered"] == 1
def test_underscore(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_cores_autodetect(run):
run('--cores 0 -b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_write_compressed_fastq(cores, tmp_path):
main(['--cores', str(cores), '-o', str(tmp_path / 'out.fastq.gz'), datapath('small.fastq')])
def test_minimal_report(run):
run('-b TTAGACATATCTCCGTCG --report=minimal', 'small.fastq', 'small.fastq')
def test_paired_separate(run):
run("-a TTAGACATAT", "paired-separate.1.fastq", "paired.1.fastq")
run("-a CAGTGGAGTA", "paired-separate.2.fastq", "paired.2.fastq")
def test_empty_read_with_wildcard_in_adapter(run):
run("-g CWC", "empty.fastq", "empty.fastq")
def test_print_progress_to_tty(tmp_path, mocker):
mocker.patch("cutadapt.utils.sys.stderr").isatty.return_value = True
main(["-o", str(tmp_path / "out.fastq"), datapath("small.fastq")])
def test_adapter_order(run):
run("-g ^AAACC -a CCGGG", "adapterorder-ga.fasta", "adapterorder.fasta")
run("-a CCGGG -g ^AAACC", "adapterorder-ag.fasta", "adapterorder.fasta")
def test_reverse_complement_no_rc_suffix(run, tmp_path):
out_path = tmp_path / "out.fastq"
main([
"-o", str(out_path),
"--revcomp",
"--no-index",
"--rename", "{header}",
"-g", "^TTATTTGTCT",
"-g", "^TCCGCACTGG",
datapath("revcomp.1.fastq")
])
with dnaio.open(out_path) as f:
reads = list(f)
assert len(reads) == 6
assert reads[1].name == "read2/1"
assert reads[1].sequence == "ACCATCCGATATGTCTAATGTGGCCTGTTG"
def test_reverse_complement_normalized(run):
stats = run(
"--revcomp --no-index -g ^TTATTTGTCT -g ^TCCGCACTGG",
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
assert stats.n == 6
assert stats.reverse_complemented == 2
def test_reverse_complement_and_info_file(run, tmp_path, cores):
info_path = str(tmp_path / "info.txt")
run(
[
"--revcomp",
"--no-index",
"-g",
"^TTATTTGTCT",
"-g",
"^TCCGCACTGG",
"--info-file",
info_path,
],
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
with open(info_path) as f:
lines = f.readlines()
assert len(lines) == 6
assert lines[0].split("\t")[0] == "read1/1"
assert lines[1].split("\t")[0] == "read2/1 rc"
def test_max_expected_errors(run, cores):
stats = run("--max-ee=0.9", "maxee.fastq", "maxee.fastq")
assert stats.filtered["too_many_expected_errors"] == 2
def test_max_expected_errors_fasta(tmp_path):
path = tmp_path / "input.fasta"
path.write_text(">read\nACGTACGT\n")
main(["--max-ee=0.001", "-o", os.devnull, str(path)])
def test_warn_if_en_dashes_used():
with pytest.raises(SystemExit):
main(["–q", "25", "-o", os.devnull, "in.fastq"])
@pytest.mark.parametrize("opt", ["-y", "--suffix"])
def test_suffix(opt, run):
run([opt, ' {name}', '-e', '0', '-a', 'OnlyT=TTTTTTTT', '-a', 'OnlyG=GGGGGGGG'], "suffix.fastq", "suffix.fastq")
@pytest.mark.parametrize("opt", ["--prefix", "--suffix"])
def test_rename_cannot_be_combined_with_other_renaming_options(opt):
with pytest.raises(SystemExit):
main([opt, "something", "--rename='{id} {comment} extrainfo'", "-o", os.devnull, datapath("empty.fastq")])
def test_rename(run):
run([
"--rename={id}_{cut_suffix} {header} {adapter_name}",
"--cut=-4",
"-a", "OnlyT=TTTTTT",
"-a", "OnlyG=GGGGGG",
], "rename.fastq", "suffix.fastq")
@pytest.mark.skip("This has not been fixed")
def test_terminates_correctly_on_error_in_subprocess(tmp_path):
params = [
"-j", "2",
"-o", str(tmp_path / "out.fastq.gz"),
datapath("format-error.fastq"),
]
with pytest.raises(SystemExit):
main(params)
| true | true |
f726506396cd55e3c14fcaaeebe3a90d09e1dcf1 | 851 | py | Python | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 6,180 | 2016-11-01T14:22:30.000Z | 2022-03-31T08:39:20.000Z | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 6,281 | 2016-12-22T07:42:31.000Z | 2022-03-31T19:57:02.000Z | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 829 | 2017-02-23T05:46:12.000Z | 2022-03-27T17:40:03.000Z | import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_nested_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| 25.029412 | 61 | 0.591069 | import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_nested_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| true | true |
f72650f004500bff57f9152bebe02c0607cf7d24 | 567 | py | Python | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | 2 | 2019-03-05T15:42:46.000Z | 2019-07-24T15:52:36.000Z | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | null | null | null | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | null | null | null | s=input();f=True
try:
while s!="":
if s[0]=="p":
t=s[:2]
if t=="pi":
s=s[2:]
else:
f=False; break
elif s[0]=="k":
t=s[:2]
if t=="ka":
s=s[2:]
else:
f=False; break
elif s[0]=="c":
t=s[:3]
if t=="chu":
s=s[3:]
else:
f=False; break
else:
f=False; break
except:
f=False
if f:
print("YES")
else:
print("NO")
| 18.9 | 30 | 0.294533 | s=input();f=True
try:
while s!="":
if s[0]=="p":
t=s[:2]
if t=="pi":
s=s[2:]
else:
f=False; break
elif s[0]=="k":
t=s[:2]
if t=="ka":
s=s[2:]
else:
f=False; break
elif s[0]=="c":
t=s[:3]
if t=="chu":
s=s[3:]
else:
f=False; break
else:
f=False; break
except:
f=False
if f:
print("YES")
else:
print("NO")
| true | true |
f726518fe0feae8b103c50e15118f179206e6821 | 374 | py | Python | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | 4 | 2019-10-26T18:48:51.000Z | 2020-02-27T19:31:36.000Z | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | null | null | null | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | 1 | 2019-10-26T18:50:48.000Z | 2019-10-26T18:50:48.000Z | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "check_obstacle"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/install"
PROJECT_VERSION = "0.0.0"
| 41.555556 | 68 | 0.705882 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "check_obstacle"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/install"
PROJECT_VERSION = "0.0.0"
| true | true |
f7265193a85ba6ceeb41dc4f556930b01622c5a0 | 2,456 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig
import paddle.inference as paddle_infer
import unittest
import hypothesis.strategies as st
class TestIdentityScaleCleanPass(PassAutoScanTest):
def sample_predictor_configs(self, program_config):
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=8,
workspace_size=0,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, ['relu'], (1e-5, 1e-5)
def sample_program_config(self, draw):
bias_after_scale = draw(st.booleans())
n = draw(st.integers(min_value=1, max_value=4))
c = draw(st.integers(min_value=1, max_value=20))
h = draw(st.integers(min_value=1, max_value=20))
w = draw(st.integers(min_value=1, max_value=20))
relu_op = OpConfig("relu",
inputs={"X": ["relu_x"]},
outputs={"Out": ["relu_out"]})
scale_op = OpConfig("scale",
inputs={"X": ["relu_out"]},
outputs={"Out": ["scale_out"]},
bias=0.,
scale=1.,
bias_after_scale=True)
program_config = ProgramConfig(
ops=[relu_op, scale_op],
weights={},
inputs={"relu_x": TensorConfig(shape=[n, c, h, w])},
outputs=["scale_out"])
return program_config
def test(self):
self.run_and_statis(max_examples=25,
passes=["identity_scale_op_clean_pass"])
if __name__ == "__main__":
unittest.main()
| 37.212121 | 74 | 0.623779 |
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig
import paddle.inference as paddle_infer
import unittest
import hypothesis.strategies as st
class TestIdentityScaleCleanPass(PassAutoScanTest):
def sample_predictor_configs(self, program_config):
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=8,
workspace_size=0,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, ['relu'], (1e-5, 1e-5)
def sample_program_config(self, draw):
bias_after_scale = draw(st.booleans())
n = draw(st.integers(min_value=1, max_value=4))
c = draw(st.integers(min_value=1, max_value=20))
h = draw(st.integers(min_value=1, max_value=20))
w = draw(st.integers(min_value=1, max_value=20))
relu_op = OpConfig("relu",
inputs={"X": ["relu_x"]},
outputs={"Out": ["relu_out"]})
scale_op = OpConfig("scale",
inputs={"X": ["relu_out"]},
outputs={"Out": ["scale_out"]},
bias=0.,
scale=1.,
bias_after_scale=True)
program_config = ProgramConfig(
ops=[relu_op, scale_op],
weights={},
inputs={"relu_x": TensorConfig(shape=[n, c, h, w])},
outputs=["scale_out"])
return program_config
def test(self):
self.run_and_statis(max_examples=25,
passes=["identity_scale_op_clean_pass"])
if __name__ == "__main__":
unittest.main()
| true | true |
f72651d2708561bc7a3f8b8ff37df2a1572eaac2 | 15,984 | py | Python | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train the language model on texts from the file pride And Prejudice. Before using it to train the language model,
you need to first sentence segment, then tokenize, then lower case each line of the file using Spacy. Append
start-of-sentence token ’<s>’ and end-of-sentence ’</s>’ token to each sentence and put each sentence in its own line.
Use only words that appear more than once in this corpus and assign UNK tokens for the rest; you may also need to
pad sentences that are shorter than 5. Train the language model and save the trained model. Generate 10 examples
of text from it, starting from ’<s>’ token and ending at ’</s>’ token.
@author: shahab Sotudian
"""
import re
import pickle
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.lm.preprocessing import pad_both_ends
from collections import Counter
import math
# Functions ###########================-------------------
'''
############################################################
#### Piazza calculate Preplexity
net.cuda()
net.eval()
H = 0
TOTAL_PROBs = 1
with torch.no_grad():
for Test_Sentence in Test_1_Preprocessed_Pride_Text_Perplexity:
H += len(Test_Sentence)
# Calculate for each sentence
Total_prob_Sentence = 1
for i,word in enumerate(Test_Sentence):
if i == len(Test_Sentence)-1:
continue
else:
if i==0:
h = net.init_hidden(1)
h = tuple([each.data for each in h])
else:
h = h_new
x = np.array([[word2idx[word]]])
inputs = torch.from_numpy(x)
inputs = inputs.cuda()
out, h_new = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
Prob_next_Word = p[word2idx[Test_Sentence[i+1]]] # P(w4|w1,w2,w3)
Total_prob_Sentence = Prob_next_Word * Total_prob_Sentence
TOTAL_PROBs = TOTAL_PROBs * Total_prob_Sentence
Preplexity = (1/TOTAL_PROBs)**(1/float(H))
############################################################
'''
def NLP_PreProcessing(text_main):
# sentence segmenting
sentences = nltk.sent_tokenize(text_main)
# Tokenization + lower casing
Tokenized_sentences = [word_tokenize(S.lower()) for S in sentences]
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(TS, n=2)) for TS in Tokenized_sentences]
return Pad_Tokenized_sentences
def NLP_PreProcessing_Test(text_main):
# Tokenization + lower casing
Tokenized_sentences = word_tokenize(text_main.lower())
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(Tokenized_sentences, n=2))]
return Pad_Tokenized_sentences
def Equal_seq(text, seq_len):
sequences = []
if len(text) > seq_len:
for i in range(seq_len, (len(text)+1)):
seq = text[i-seq_len:i]
sequences.append(seq)
else:
sequences = [['_PAD']*(seq_len-len(text)) + text ]
return sequences
def get_batches(arr_x, arr_y, batch_size):
# iterate through the arrays
prv = 0
for n in range(batch_size, arr_x.shape[0], batch_size):
x = arr_x[prv:n,:]
y = arr_y[prv:n,:]
prv = n
yield x, y
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size, 200)
## define the LSTM
self.lstm = nn.LSTM(200, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
def train(net, epochs, batch_size, lr, clip, print_every,XX,YY):
# optimizer
opt = torch.optim.Adam(net.parameters(), lr=lr)
# loss
criterion = nn.CrossEntropyLoss()
# push model to GPU
net.cuda()
counter = 0
net.train()
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(XX, YY, batch_size):
counter+= 1
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(-1))
# back-propagate error
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
# update weigths
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter))
def predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ):
# tensor inputs
x = np.array([[word2idx_Inp[tkn]]])
inputs = torch.from_numpy(x)
# push to GPU
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
# get indices of top 3 values
top_n_idx = p.argsort()[-3:][::-1]
# randomly select one of the three indices
sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]
# return the encoded value of the predicted char and the hidden state
return idx2word_Inp[sampled_token_index], h
# function to generate text
def sample(net, size, prime="<s>",word2idx_Inp = None, idx2word_Inp =None ):
# push to GPU
net.cuda()
net.eval()
# batch size is 1
h = net.init_hidden(1)
toks = prime.split()
# predict next token
for t in prime.split():
token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)
toks.append(token)
# predict subsequent tokens
if size == '</s>':
while(token!='</s>'):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
else:
for i in range(size-1):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
return ' '.join(toks)
def Testing(net, batch_size,Test_X,Test_Y):
net.eval()
criterion = nn.CrossEntropyLoss()
# initialize hidden state
h = net.init_hidden(batch_size)
test_loss = 0.
with torch.no_grad():
for x, y in get_batches(Test_X, Test_Y, batch_size):
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# get the output from the model
output, h = net(inputs, h)
test_loss += criterion(output, targets.view(-1)).item()
test_loss = test_loss / ((len(Test_X) // batch_size)+1)
print('-' * 40)
print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('-' * 40)
class WordLSTM_with_Glove(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0)
self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))
self.emb_layer.weight.requires_grad = False ## freeze embeddings
'''
self.emb_layer = nn.Embedding(vocab_size_Q6,100)
self.emb_layer.weight = nn.Parameter(torch.from_numpy(embedding_matrix).float())
'''
## define the LSTM
self.lstm = nn.LSTM(100, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size_Q6)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
# Data ###########================-------------------
with open('prideAndPrejudice.txt') as f:
Pride_Text = [line.rstrip() for line in f]
# Q2.1 ###########================-------------------
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing
Preprocessed_Pride_Text = []
for t in range(len(Pride_Text)):
Preprocessed_Pride_Text = Preprocessed_Pride_Text + NLP_PreProcessing(Pride_Text[t])
Length_of_Sequences = 5
Pride_Text_Equal_seqs_L5 = sum([Equal_seq(i,Length_of_Sequences) for i in Preprocessed_Pride_Text], [])
del t,f
# Create Vocab
words = Counter()
for i, sentence in enumerate(Preprocessed_Pride_Text):
for word in sentence:
words.update([word])
words = {k:v for k,v in words.items() if v>1} # Removing the words that only appear once
del i,sentence,word
words = sorted(words, key=words.get, reverse=True) # Sorting the words
words = ['_PAD','_UNK'] + words
word2idx = {o:i for i,o in enumerate(words)}
idx2word = {i:o for i,o in enumerate(words)}
# Looking up the mapping dictionary and assigning the index to the respective words
Pride_Text_Equal_seqs_INDICES_L5 =[]
for i, sentence in enumerate(Pride_Text_Equal_seqs_L5):
Pride_Text_Equal_seqs_INDICES_L5.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
X = []
Y = []
for S in Pride_Text_Equal_seqs_INDICES_L5:
X.append(S[:-1])
Y.append(S[1:])
x_int_L5 = np.array(X)
y_int_L5 = np.array(Y)
vocab_size = len(word2idx)
# Train Or Load LSTM
Do_want_To_Train = 0
batch_size = 320
epochs=20
lr=0.001
if Do_want_To_Train == 1:
net1 = WordLSTM() # instantiate the model
net1.cuda() # push the model to GPU
train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5) # train the model
torch.save(net1, 'Q2_Part_1_Network.pt')
else:
net1 = torch.load('Q2_Part_1_Network.pt')
net1.eval()
print(net1)
# Generate text
for i in range(10):
print('=======================================')
print("- Example "+str(i+1)+": ",sample(net1, size='</s>' , prime="<s>", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\n')
del X,Y,i,S,Do_want_To_Train
print('=' * 60)
# Q2.2 ###########================-------------------
with open('test_1.txt') as f:
test_1 = [line.rstrip() for line in f]
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing_Test
Test_1_Preprocessed_Pride_Text = []
for t in range(len(test_1)):
Test_1_Preprocessed_Pride_Text = Test_1_Preprocessed_Pride_Text + NLP_PreProcessing_Test((test_1[t])[4:-5])
Test_1_Pride_Text_Equal_seqs = sum([Equal_seq(i,Length_of_Sequences) for i in Test_1_Preprocessed_Pride_Text], [])
del t,f
# Looking up the mapping dictionary and assigning the index to the respective words
Test_1_Pride_Text_Equal_seqs_INDICES =[]
for i, sentence in enumerate(Test_1_Pride_Text_Equal_seqs):
Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
Test_1_X = []
Test_1_Y = []
for S in Test_1_Pride_Text_Equal_seqs_INDICES:
Test_1_X.append(S[:-1])
Test_1_Y.append(S[1:])
Test_1_x_int = np.array(Test_1_X)
Test_1_y_int = np.array(Test_1_Y)
del Test_1_X,Test_1_Y,S
# Calculate Perplexity
Testing(net1, batch_size ,Test_1_x_int,Test_1_y_int)
del Pride_Text,Length_of_Sequences
| 29.820896 | 132 | 0.603478 |
import re
import pickle
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.lm.preprocessing import pad_both_ends
from collections import Counter
import math
ces = [list(pad_both_ends(Tokenized_sentences, n=2))]
return Pad_Tokenized_sentences
def Equal_seq(text, seq_len):
sequences = []
if len(text) > seq_len:
for i in range(seq_len, (len(text)+1)):
seq = text[i-seq_len:i]
sequences.append(seq)
else:
sequences = [['_PAD']*(seq_len-len(text)) + text ]
return sequences
def get_batches(arr_x, arr_y, batch_size):
prv = 0
for n in range(batch_size, arr_x.shape[0], batch_size):
x = arr_x[prv:n,:]
y = arr_y[prv:n,:]
prv = n
yield x, y
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size, 200)
m = nn.LSTM(200, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
nn.Dropout(drop_prob)
den, vocab_size)
def forward(self, x, hidden):
)
n)
tm_output)
out = out.reshape(-1, self.n_hidden)
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
def train(net, epochs, batch_size, lr, clip, print_every,XX,YY):
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
net.cuda()
counter = 0
net.train()
for e in range(epochs):
h = net.init_hidden(batch_size)
for x, y in get_batches(XX, YY, batch_size):
counter+= 1
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([each.data for each in h])
net.zero_grad()
output, h = net(inputs, h)
loss = criterion(output, targets.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter))
def predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ):
x = np.array([[word2idx_Inp[tkn]]])
inputs = torch.from_numpy(x)
inputs = inputs.cuda()
h = tuple([each.data for each in h])
out, h = net(inputs, h)
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
top_n_idx = p.argsort()[-3:][::-1]
sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]
return idx2word_Inp[sampled_token_index], h
def sample(net, size, prime="<s>",word2idx_Inp = None, idx2word_Inp =None ):
net.cuda()
net.eval()
h = net.init_hidden(1)
toks = prime.split()
for t in prime.split():
token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)
toks.append(token)
if size == '</s>':
while(token!='</s>'):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
else:
for i in range(size-1):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
return ' '.join(toks)
def Testing(net, batch_size,Test_X,Test_Y):
net.eval()
criterion = nn.CrossEntropyLoss()
h = net.init_hidden(batch_size)
test_loss = 0.
with torch.no_grad():
for x, y in get_batches(Test_X, Test_Y, batch_size):
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([each.data for each in h])
output, h = net(inputs, h)
test_loss += criterion(output, targets.view(-1)).item()
test_loss = test_loss / ((len(Test_X) // batch_size)+1)
print('-' * 40)
print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('-' * 40)
class WordLSTM_with_Glove(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0)
self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))
self.emb_layer.weight.requires_grad = False n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
nn.Dropout(drop_prob)
den, vocab_size_Q6)
def forward(self, x, hidden):
)
n)
tm_output)
out = out.reshape(-1, self.n_hidden)
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
rd2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
X = []
Y = []
for S in Pride_Text_Equal_seqs_INDICES_L5:
X.append(S[:-1])
Y.append(S[1:])
x_int_L5 = np.array(X)
y_int_L5 = np.array(Y)
vocab_size = len(word2idx)
Do_want_To_Train = 0
batch_size = 320
epochs=20
lr=0.001
if Do_want_To_Train == 1:
net1 = WordLSTM()
net1.cuda()
train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5)
torch.save(net1, 'Q2_Part_1_Network.pt')
else:
net1 = torch.load('Q2_Part_1_Network.pt')
net1.eval()
print(net1)
for i in range(10):
print('=======================================')
print("- Example "+str(i+1)+": ",sample(net1, size='</s>' , prime="<s>", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\n')
del X,Y,i,S,Do_want_To_Train
print('=' * 60)
e(Test_1_Pride_Text_Equal_seqs):
Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
Test_1_X = []
Test_1_Y = []
for S in Test_1_Pride_Text_Equal_seqs_INDICES:
Test_1_X.append(S[:-1])
Test_1_Y.append(S[1:])
Test_1_x_int = np.array(Test_1_X)
Test_1_y_int = np.array(Test_1_Y)
del Test_1_X,Test_1_Y,S
Testing(net1, batch_size ,Test_1_x_int,Test_1_y_int)
del Pride_Text,Length_of_Sequences
| true | true |
f72654a975ae6f6f07f9c0a9f4ef3e7ceecfc94a | 488 | py | Python | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-09-08 01:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AssetsApp', '0009_alter_assetscategories_datetime_added'),
]
operations = [
migrations.AlterField(
model_name='assetscategories',
name='datetime_added',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 8, 1, 37, 53, 844688)),
),
]
| 24.4 | 97 | 0.647541 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AssetsApp', '0009_alter_assetscategories_datetime_added'),
]
operations = [
migrations.AlterField(
model_name='assetscategories',
name='datetime_added',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 8, 1, 37, 53, 844688)),
),
]
| true | true |
f72656c4b0c210ce9367e4cf253ca9a70c73fd41 | 5,884 | py | Python | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 2 | 2019-05-09T07:21:25.000Z | 2019-08-05T06:37:53.000Z | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 664 | 2018-12-19T12:43:44.000Z | 2019-08-23T04:24:42.000Z | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/jumpscale10 | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 7 | 2019-05-03T07:14:37.000Z | 2019-08-05T12:36:52.000Z | import urllib.request
import urllib.parse
import urllib.error
import string
import requests
import time
import random
from Jumpscale import j
JSConfigClient = j.application.JSBaseConfigClass
class OauthClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.oauth.client
name* = "" (S)
addr = "" (S)
accesstokenaddr = "" (S)
client_id = "" (S)
secret_ = "" (S)
scope = "" (S)
redirect_url = "" (S)
user_info_url = "" (S)
logout_url = "" (S)
client_instance = "github" (S)
"""
def _init(self):
self.addr = self.addr
self.accesstokenaddr = self.accesstokenaddr
self.client_id = self.client_id
self.secret = self.secret_
self.scope = self.scope
self.redirect_url = self.redirect_url
self.user_info_url = self.user_info_url
self.logout_url = self.logout_url
self.client_instance = self.client_instance
self._client = None
self.instance = "main"
@property
def client(self):
if self._client:
return self._client
if self.client_instance in ("itsyouonline", "itsyou.online"):
self._client = ItsYouOnline(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
else:
self._client = OauthInstance(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
return self._client
class AuthError(Exception):
def _init(self):
pass
class UserInfo(object):
def _init(self, username, emailaddress, groups):
self.username = username
self.emailaddress = emailaddress
self.groups = groups
class OauthInstance:
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
if not addr:
raise RuntimeError("Failed to get oauth instance, no address provided")
else:
self.addr = addr
self.client_id = client_id
self.scope = scope
self.redirect_url = redirect_url
self.accesstokenaddress = accesstokenaddr
self.secret = secret
self.user_info_url = user_info_url
self.logout_url = logout_url
self.state = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
@property
def url(self):
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_url,
"state": self.state,
"response_type": "code",
}
if self.scope:
params.update({"scope": self.scope})
return "%s?%s" % (self.addr, urllib.parse.urlencode(params))
def getAccessToken(self, code, state):
payload = {
"code": code,
"client_id": self.client_id,
"client_secret": self.secret,
"redirect_uri": self.redirect_url,
"grant_type": "authorization_code",
"state": state,
}
result = requests.post(self.accesstokenaddress, data=payload, headers={"Accept": "application/json"})
if not result.ok or "error" in result.json():
msg = result.json()["error"]
self._log_error(msg)
raise AuthError(msg)
return result.json()
def getUserInfo(self, accesstoken):
params = {"access_token": accesstoken["access_token"]}
userinforesp = requests.get(self.user_info_url, params=params)
if not userinforesp.ok:
msg = "Failed to get user details"
self._log_error(msg)
raise AuthError(msg)
userinfo = userinforesp.json()
return UserInfo(userinfo["login"], userinfo["email"], ["user"])
class ItsYouOnline(OauthInstance):
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
OauthInstance.__init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
)
def getAccessToken(self):
return j.clients.itsyouonline.jwt_get(self.client_id, self.secret)
def getUserInfo(self, accesstoken):
import jose
import jose.jwt
jwt = accesstoken
headers = {"Authorization": "bearer %s" % jwt}
jwtdata = jose.jwt.get_unverified_claims(jwt)
scopes = jwtdata["scope"]
requestedscopes = set(self.scope.split(","))
if set(jwtdata["scope"]).intersection(requestedscopes) != requestedscopes:
msg = "Failed to get the requested scope for %s" % self.client_id
raise AuthError(msg)
username = jwtdata["username"]
userinfourl = self.user_info_url.rstrip("/") + "/%s/info" % username
userinforesp = requests.get(userinfourl, headers=headers)
if not userinforesp.ok:
msg = "Failed to get user details"
raise AuthError(msg)
groups = ["user"]
for scope in scopes:
parts = scope.split(":")
if len(parts) == 3 and parts[:2] == ["user", "memberof"]:
groups.append(parts[-1].split(".")[-1])
userinfo = userinforesp.json()
return UserInfo(userinfo["username"], userinfo["emailaddresses"][0]["emailaddress"], groups)
| 31.978261 | 116 | 0.579708 | import urllib.request
import urllib.parse
import urllib.error
import string
import requests
import time
import random
from Jumpscale import j
JSConfigClient = j.application.JSBaseConfigClass
class OauthClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.oauth.client
name* = "" (S)
addr = "" (S)
accesstokenaddr = "" (S)
client_id = "" (S)
secret_ = "" (S)
scope = "" (S)
redirect_url = "" (S)
user_info_url = "" (S)
logout_url = "" (S)
client_instance = "github" (S)
"""
def _init(self):
self.addr = self.addr
self.accesstokenaddr = self.accesstokenaddr
self.client_id = self.client_id
self.secret = self.secret_
self.scope = self.scope
self.redirect_url = self.redirect_url
self.user_info_url = self.user_info_url
self.logout_url = self.logout_url
self.client_instance = self.client_instance
self._client = None
self.instance = "main"
@property
def client(self):
if self._client:
return self._client
if self.client_instance in ("itsyouonline", "itsyou.online"):
self._client = ItsYouOnline(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
else:
self._client = OauthInstance(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
return self._client
class AuthError(Exception):
def _init(self):
pass
class UserInfo(object):
def _init(self, username, emailaddress, groups):
self.username = username
self.emailaddress = emailaddress
self.groups = groups
class OauthInstance:
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
if not addr:
raise RuntimeError("Failed to get oauth instance, no address provided")
else:
self.addr = addr
self.client_id = client_id
self.scope = scope
self.redirect_url = redirect_url
self.accesstokenaddress = accesstokenaddr
self.secret = secret
self.user_info_url = user_info_url
self.logout_url = logout_url
self.state = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
@property
def url(self):
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_url,
"state": self.state,
"response_type": "code",
}
if self.scope:
params.update({"scope": self.scope})
return "%s?%s" % (self.addr, urllib.parse.urlencode(params))
def getAccessToken(self, code, state):
payload = {
"code": code,
"client_id": self.client_id,
"client_secret": self.secret,
"redirect_uri": self.redirect_url,
"grant_type": "authorization_code",
"state": state,
}
result = requests.post(self.accesstokenaddress, data=payload, headers={"Accept": "application/json"})
if not result.ok or "error" in result.json():
msg = result.json()["error"]
self._log_error(msg)
raise AuthError(msg)
return result.json()
def getUserInfo(self, accesstoken):
params = {"access_token": accesstoken["access_token"]}
userinforesp = requests.get(self.user_info_url, params=params)
if not userinforesp.ok:
msg = "Failed to get user details"
self._log_error(msg)
raise AuthError(msg)
userinfo = userinforesp.json()
return UserInfo(userinfo["login"], userinfo["email"], ["user"])
class ItsYouOnline(OauthInstance):
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
OauthInstance.__init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
)
def getAccessToken(self):
return j.clients.itsyouonline.jwt_get(self.client_id, self.secret)
def getUserInfo(self, accesstoken):
import jose
import jose.jwt
jwt = accesstoken
headers = {"Authorization": "bearer %s" % jwt}
jwtdata = jose.jwt.get_unverified_claims(jwt)
scopes = jwtdata["scope"]
requestedscopes = set(self.scope.split(","))
if set(jwtdata["scope"]).intersection(requestedscopes) != requestedscopes:
msg = "Failed to get the requested scope for %s" % self.client_id
raise AuthError(msg)
username = jwtdata["username"]
userinfourl = self.user_info_url.rstrip("/") + "/%s/info" % username
userinforesp = requests.get(userinfourl, headers=headers)
if not userinforesp.ok:
msg = "Failed to get user details"
raise AuthError(msg)
groups = ["user"]
for scope in scopes:
parts = scope.split(":")
if len(parts) == 3 and parts[:2] == ["user", "memberof"]:
groups.append(parts[-1].split(".")[-1])
userinfo = userinforesp.json()
return UserInfo(userinfo["username"], userinfo["emailaddresses"][0]["emailaddress"], groups)
| true | true |
f7265773512670aeae3bd3c088b90f7348f9d8d0 | 6,363 | py | Python | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 111 | 2020-05-05T04:34:10.000Z | 2022-02-20T17:04:56.000Z | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 5 | 2020-06-06T12:45:52.000Z | 2021-03-16T13:22:37.000Z | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 17 | 2020-05-07T07:43:05.000Z | 2022-01-19T11:33:33.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
precompute hidden states of CMLM teacher to speedup KD training
"""
import argparse
import io
import os
import shelve
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from toolz.sandbox import unzip
from cmlm.model import BertForSeq2seq
from cmlm.data import convert_token_to_bert, CLS, SEP, MASK
def tensor_dumps(tensor):
with io.BytesIO() as writer:
np.save(writer, tensor.cpu().numpy().astype(np.float16),
allow_pickle=False)
dump = writer.getvalue()
return dump
def gather_hiddens(hiddens, masks):
outputs = []
for hid, mask in zip(hiddens.split(1, dim=1), masks.split(1, dim=1)):
if mask.sum().item() == 0:
continue
mask = mask.unsqueeze(-1).expand_as(hid)
outputs.append(hid.masked_select(mask))
output = torch.stack(outputs, dim=0)
return output
class BertSampleDataset(Dataset):
def __init__(self, corpus_path, tokenizer, num_samples=7):
self.db = shelve.open(corpus_path, 'r')
self.ids = []
for i, ex in self.db.items():
if len(ex['src']) + len(ex['tgt']) + 3 <= 512:
self.ids.append(i)
self.toker = tokenizer
self.num_samples = num_samples
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.db[id_]
features = convert_example(example['src'], example['tgt'],
self.toker, self.num_samples)
return (id_, ) + features
def convert_example(src, tgt, toker, num_samples):
src = [convert_token_to_bert(tok) for tok in src]
tgt = [convert_token_to_bert(tok) for tok in tgt] + [SEP]
# build the random masks
tgt_len = len(tgt)
if tgt_len <= num_samples:
masks = torch.eye(tgt_len).byte()
num_samples = tgt_len
else:
mask_inds = [list(range(i, tgt_len, num_samples))
for i in range(num_samples)]
masks = torch.zeros(num_samples, tgt_len).byte()
for i, indices in enumerate(mask_inds):
for j in indices:
masks.data[i, j] = 1
assert (masks.sum(dim=0) != torch.ones(tgt_len).long()).sum().item() == 0
assert masks.sum().item() == tgt_len
masks = torch.cat([torch.zeros(num_samples, len(src)+2).byte(), masks],
dim=1)
# make BERT inputs
input_ids = toker.convert_tokens_to_ids([CLS] + src + [SEP] + tgt)
mask_id = toker.convert_tokens_to_ids([MASK])[0]
input_ids = torch.tensor([input_ids for _ in range(num_samples)])
input_ids.data.masked_fill_(masks, mask_id)
token_ids = torch.tensor([[0] * (len(src) + 2) + [1] * len(tgt)
for _ in range(num_samples)])
return input_ids, token_ids, masks
def batch_features(features):
ids, all_input_ids, all_token_ids, all_masks = map(list, unzip(features))
batch_size = sum(input_ids.size(0) for input_ids in all_input_ids)
max_len = max(input_ids.size(1) for input_ids in all_input_ids)
input_ids = torch.zeros(batch_size, max_len).long()
token_ids = torch.zeros(batch_size, max_len).long()
attn_mask = torch.zeros(batch_size, max_len).long()
i = 0
for inp, tok in zip(all_input_ids, all_token_ids):
block, len_ = inp.size()
input_ids.data[i: i+block, :len_] = inp.data
token_ids.data[i: i+block, :len_] = tok.data
attn_mask.data[i: i+block, :len_].fill_(1)
i += block
return ids, input_ids, token_ids, attn_mask, all_masks
def process_batch(batch, bert, toker, num_samples=7):
input_ids, token_ids, attn_mask, all_masks = batch
input_ids = input_ids.cuda()
token_ids = token_ids.cuda()
attn_mask = attn_mask.cuda()
hiddens, _ = bert.bert(input_ids, token_ids, attn_mask,
output_all_encoded_layers=False)
hiddens = bert.cls.predictions.transform(hiddens)
i = 0
outputs = []
for masks in all_masks:
block, len_ = masks.size()
hids = hiddens[i:i+block, :len_, :]
masks = masks.cuda()
outputs.append(gather_hiddens(hids, masks))
i += block
return outputs
def build_db_batched(corpus, out_db, bert, toker, batch_size=8):
dataset = BertSampleDataset(corpus, toker)
loader = DataLoader(dataset, batch_size=batch_size,
num_workers=4, collate_fn=batch_features)
with tqdm(desc='computing BERT features', total=len(dataset)) as pbar:
for ids, *batch in loader:
outputs = process_batch(batch, bert, toker)
for id_, output in zip(ids, outputs):
out_db[id_] = tensor_dumps(output)
pbar.update(len(ids))
def main(opts):
# load BERT
state_dict = torch.load(opts.ckpt)
vsize = state_dict['cls.predictions.decoder.weight'].size(0)
bert = BertForSeq2seq.from_pretrained(opts.bert).eval().half().cuda()
bert.update_output_layer_by_size(vsize)
bert.load_state_dict(state_dict)
toker = BertTokenizer.from_pretrained(opts.bert,
do_lower_case='uncased' in opts.bert)
# save the final projection layer
linear = torch.nn.Linear(bert.config.hidden_size, bert.config.vocab_size)
linear.weight.data = state_dict['cls.predictions.decoder.weight']
linear.bias.data = state_dict['cls.predictions.bias']
os.makedirs(opts.output)
torch.save(linear, f'{opts.output}/linear.pt')
# create DB
with shelve.open(f'{opts.output}/db') as out_db, \
torch.no_grad():
build_db_batched(opts.db, out_db, bert, toker)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bert', required=True,
choices=['bert-base-uncased',
'bert-base-multilingual-cased'],
help='BERT model')
parser.add_argument('--ckpt', required=True, help='BERT checkpoint')
parser.add_argument('--db', required=True, help='dataset to compute')
parser.add_argument('--output', required=True, help='path to dump output')
args = parser.parse_args()
main(args)
| 35.949153 | 79 | 0.636964 | import argparse
import io
import os
import shelve
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from toolz.sandbox import unzip
from cmlm.model import BertForSeq2seq
from cmlm.data import convert_token_to_bert, CLS, SEP, MASK
def tensor_dumps(tensor):
with io.BytesIO() as writer:
np.save(writer, tensor.cpu().numpy().astype(np.float16),
allow_pickle=False)
dump = writer.getvalue()
return dump
def gather_hiddens(hiddens, masks):
outputs = []
for hid, mask in zip(hiddens.split(1, dim=1), masks.split(1, dim=1)):
if mask.sum().item() == 0:
continue
mask = mask.unsqueeze(-1).expand_as(hid)
outputs.append(hid.masked_select(mask))
output = torch.stack(outputs, dim=0)
return output
class BertSampleDataset(Dataset):
def __init__(self, corpus_path, tokenizer, num_samples=7):
self.db = shelve.open(corpus_path, 'r')
self.ids = []
for i, ex in self.db.items():
if len(ex['src']) + len(ex['tgt']) + 3 <= 512:
self.ids.append(i)
self.toker = tokenizer
self.num_samples = num_samples
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.db[id_]
features = convert_example(example['src'], example['tgt'],
self.toker, self.num_samples)
return (id_, ) + features
def convert_example(src, tgt, toker, num_samples):
src = [convert_token_to_bert(tok) for tok in src]
tgt = [convert_token_to_bert(tok) for tok in tgt] + [SEP]
tgt_len = len(tgt)
if tgt_len <= num_samples:
masks = torch.eye(tgt_len).byte()
num_samples = tgt_len
else:
mask_inds = [list(range(i, tgt_len, num_samples))
for i in range(num_samples)]
masks = torch.zeros(num_samples, tgt_len).byte()
for i, indices in enumerate(mask_inds):
for j in indices:
masks.data[i, j] = 1
assert (masks.sum(dim=0) != torch.ones(tgt_len).long()).sum().item() == 0
assert masks.sum().item() == tgt_len
masks = torch.cat([torch.zeros(num_samples, len(src)+2).byte(), masks],
dim=1)
input_ids = toker.convert_tokens_to_ids([CLS] + src + [SEP] + tgt)
mask_id = toker.convert_tokens_to_ids([MASK])[0]
input_ids = torch.tensor([input_ids for _ in range(num_samples)])
input_ids.data.masked_fill_(masks, mask_id)
token_ids = torch.tensor([[0] * (len(src) + 2) + [1] * len(tgt)
for _ in range(num_samples)])
return input_ids, token_ids, masks
def batch_features(features):
ids, all_input_ids, all_token_ids, all_masks = map(list, unzip(features))
batch_size = sum(input_ids.size(0) for input_ids in all_input_ids)
max_len = max(input_ids.size(1) for input_ids in all_input_ids)
input_ids = torch.zeros(batch_size, max_len).long()
token_ids = torch.zeros(batch_size, max_len).long()
attn_mask = torch.zeros(batch_size, max_len).long()
i = 0
for inp, tok in zip(all_input_ids, all_token_ids):
block, len_ = inp.size()
input_ids.data[i: i+block, :len_] = inp.data
token_ids.data[i: i+block, :len_] = tok.data
attn_mask.data[i: i+block, :len_].fill_(1)
i += block
return ids, input_ids, token_ids, attn_mask, all_masks
def process_batch(batch, bert, toker, num_samples=7):
input_ids, token_ids, attn_mask, all_masks = batch
input_ids = input_ids.cuda()
token_ids = token_ids.cuda()
attn_mask = attn_mask.cuda()
hiddens, _ = bert.bert(input_ids, token_ids, attn_mask,
output_all_encoded_layers=False)
hiddens = bert.cls.predictions.transform(hiddens)
i = 0
outputs = []
for masks in all_masks:
block, len_ = masks.size()
hids = hiddens[i:i+block, :len_, :]
masks = masks.cuda()
outputs.append(gather_hiddens(hids, masks))
i += block
return outputs
def build_db_batched(corpus, out_db, bert, toker, batch_size=8):
dataset = BertSampleDataset(corpus, toker)
loader = DataLoader(dataset, batch_size=batch_size,
num_workers=4, collate_fn=batch_features)
with tqdm(desc='computing BERT features', total=len(dataset)) as pbar:
for ids, *batch in loader:
outputs = process_batch(batch, bert, toker)
for id_, output in zip(ids, outputs):
out_db[id_] = tensor_dumps(output)
pbar.update(len(ids))
def main(opts):
state_dict = torch.load(opts.ckpt)
vsize = state_dict['cls.predictions.decoder.weight'].size(0)
bert = BertForSeq2seq.from_pretrained(opts.bert).eval().half().cuda()
bert.update_output_layer_by_size(vsize)
bert.load_state_dict(state_dict)
toker = BertTokenizer.from_pretrained(opts.bert,
do_lower_case='uncased' in opts.bert)
linear = torch.nn.Linear(bert.config.hidden_size, bert.config.vocab_size)
linear.weight.data = state_dict['cls.predictions.decoder.weight']
linear.bias.data = state_dict['cls.predictions.bias']
os.makedirs(opts.output)
torch.save(linear, f'{opts.output}/linear.pt')
with shelve.open(f'{opts.output}/db') as out_db, \
torch.no_grad():
build_db_batched(opts.db, out_db, bert, toker)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bert', required=True,
choices=['bert-base-uncased',
'bert-base-multilingual-cased'],
help='BERT model')
parser.add_argument('--ckpt', required=True, help='BERT checkpoint')
parser.add_argument('--db', required=True, help='dataset to compute')
parser.add_argument('--output', required=True, help='path to dump output')
args = parser.parse_args()
main(args)
| true | true |
f72658a22e37eca598653ef574062e41fecd324c | 1,652 | py | Python | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class FloatEnum(with_metaclass(_CaseInsensitiveEnumMeta, float, Enum)):
"""List of float enums
"""
TWO_HUNDRED4 = 200.4
FOUR_HUNDRED_THREE4 = 403.4
FOUR_HUNDRED_FIVE3 = 405.3
FOUR_HUNDRED_SIX2 = 406.2
FOUR_HUNDRED_TWENTY_NINE1 = 429.1
class IntEnum(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
"""List of integer enums
"""
TWO_HUNDRED = 200
FOUR_HUNDRED_THREE = 403
FOUR_HUNDRED_FIVE = 405
FOUR_HUNDRED_SIX = 406
FOUR_HUNDRED_TWENTY_NINE = 429
| 34.416667 | 94 | 0.640436 |
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class FloatEnum(with_metaclass(_CaseInsensitiveEnumMeta, float, Enum)):
TWO_HUNDRED4 = 200.4
FOUR_HUNDRED_THREE4 = 403.4
FOUR_HUNDRED_FIVE3 = 405.3
FOUR_HUNDRED_SIX2 = 406.2
FOUR_HUNDRED_TWENTY_NINE1 = 429.1
class IntEnum(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
TWO_HUNDRED = 200
FOUR_HUNDRED_THREE = 403
FOUR_HUNDRED_FIVE = 405
FOUR_HUNDRED_SIX = 406
FOUR_HUNDRED_TWENTY_NINE = 429
| true | true |
f72658a883e0ec8dce8892d634b7105d4bac6c3d | 354 | py | Python | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | 1 | 2020-01-30T06:47:09.000Z | 2020-01-30T06:47:09.000Z | t = int(input().strip())
for i in range(t):
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
tot_choc=n//c
wrap=tot_choc
#print(tot_choc, wrap)
while wrap >= m:
extra_choc = wrap//m
wrap-=m*extra_choc
wrap+=extra_choc
tot_choc+= extra_choc
print(tot_choc)
| 25.285714 | 39 | 0.516949 | t = int(input().strip())
for i in range(t):
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
tot_choc=n//c
wrap=tot_choc
while wrap >= m:
extra_choc = wrap//m
wrap-=m*extra_choc
wrap+=extra_choc
tot_choc+= extra_choc
print(tot_choc)
| true | true |
f72658e3bad5fe570677617b549a92f97f742c6b | 8,745 | py | Python | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | 2 | 2020-09-27T20:46:34.000Z | 2020-10-15T01:01:57.000Z | from cereal import car
from common.numpy_fast import clip
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, \
create_scc11, create_scc12, create_mdps12, \
create_scc13, create_scc14
from selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
VisualAlert = car.CarControl.HUDControl.VisualAlert
min_set_speed = 30 * CV.KPH_TO_MS
# Accel limits
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
def accel_hysteresis(accel, accel_steady):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(enabled, fingerprint, visual_alert, left_lane,
right_lane, left_lane_depart, right_lane_depart, button_on):
sys_warning = (visual_alert == VisualAlert.steerRequired)
# initialize to no line visible
sys_state = 1
if not button_on:
lane_visible = 0
if left_lane and right_lane or sys_warning: #HUD alert only display when LKAS status is active
if enabled or sys_warning:
sys_state = 3
else:
sys_state = 4
elif left_lane:
sys_state = 5
elif right_lane:
sys_state = 6
# initialize to no warnings
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, CP, VM):
self.car_fingerprint = CP.carFingerprint
self.packer = CANPacker(dbc_name)
self.accel_steady = 0
self.apply_steer_last = 0
self.steer_rate_limited = False
self.lkas11_cnt = 0
self.scc12_cnt = 0
self.resume_cnt = 0
self.last_resume_frame = 0
self.last_lead_distance = 0
self.turning_signal_timer = 0
self.lkas_button_on = True
self.longcontrol = CP.openpilotLongitudinalControl
self.scc_live = not CP.radarOffCan
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible):
# *** compute control surfaces ***
# gas and brake
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady)
apply_accel = clip(apply_accel * ACCEL_SCALE, ACCEL_MIN, ACCEL_MAX)
# Steering Torque
new_steer = actuators.steer * SteerLimitParams.STEER_MAX
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, SteerLimitParams)
self.steer_rate_limited = new_steer != apply_steer
# disable if steer angle reach 90 deg, otherwise mdps fault in some models
# temporarily disable steering when LKAS button off
lkas_active = enabled and abs(CS.out.steeringAngle) < 90. and self.lkas_button_on
# fix for Genesis hard fault at low speed
if CS.out.vEgo < 60 * CV.KPH_TO_MS and self.car_fingerprint == CAR.HYUNDAI_GENESIS and not CS.mdps_bus:
lkas_active = 0
# Disable steering while turning blinker on and speed below 60 kph
if CS.out.leftBlinker or CS.out.rightBlinker:
if self.car_fingerprint not in [CAR.KIA_OPTIMA, CAR.KIA_OPTIMA_H]:
self.turning_signal_timer = 100 # Disable for 1.0 Seconds after blinker turned off
elif CS.left_blinker_flash or CS.right_blinker_flash: # Optima has blinker flash signal only
self.turning_signal_timer = 100
if self.turning_indicator_alert: # set and clear by interface
lkas_active = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
if not lkas_active:
apply_steer = 0
self.apply_accel_last = apply_accel
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning =\
process_hud_alert(lkas_active, self.car_fingerprint, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart,
self.lkas_button_on)
clu11_speed = CS.clu11["CF_Clu_Vanz"]
enabled_speed = 38 if CS.is_set_speed_in_mph else 60
if clu11_speed > enabled_speed or not lkas_active:
enabled_speed = clu11_speed
if set_speed < min_set_speed:
set_speed = min_set_speed
set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH
if frame == 0: # initialize counts from last received count signals
self.lkas11_cnt = CS.lkas11["CF_Lkas_MsgCount"]
self.scc12_cnt = CS.scc12["CR_VSM_Alive"] + 1 if not CS.no_radar else 0
#TODO: fix this
# self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
# self.scc_update_frame = frame
# check if SCC is alive
# if frame % 7 == 0:
# if CS.scc11["AliveCounterACC"] == self.prev_scc_cnt:
# if frame - self.scc_update_frame > 20 and self.scc_live:
# self.scc_live = False
# else:
# self.scc_live = True
# self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
# self.scc_update_frame = frame
self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
self.lkas11_cnt = (self.lkas11_cnt + 1) % 0x10
self.scc12_cnt %= 0xF
can_sends = []
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 0))
if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps or scc is on bus 1
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 1))
if frame % 2 and CS.mdps_bus: # send clu11 to mdps if it is not on bus 0
can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))
if pcm_cancel_cmd and self.longcontrol:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))
elif CS.mdps_bus: # send mdps12 to LKAS to prevent LKAS error if no cancel cmd
can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))
# send scc to car if longcontrol enabled and SCC not on bus 0 or ont live
if self.longcontrol and (CS.scc_bus or not self.scc_live) and frame % 2 == 0:
can_sends.append(create_scc12(self.packer, apply_accel, enabled, self.scc12_cnt, self.scc_live, CS.scc12))
can_sends.append(create_scc11(self.packer, frame, enabled, set_speed, lead_visible, self.scc_live, CS.scc11))
if CS.has_scc13 and frame % 20 == 0:
can_sends.append(create_scc13(self.packer, CS.scc13))
if CS.has_scc14:
can_sends.append(create_scc14(self.packer, enabled, CS.scc14))
self.scc12_cnt += 1
if CS.out.cruiseState.standstill:
# run only first time when the car stopped
if self.last_lead_distance == 0:
# get the lead distance from the Radar
self.last_lead_distance = CS.lead_distance
self.resume_cnt = 0
# when lead car starts moving, create 6 RES msgs
elif CS.lead_distance != self.last_lead_distance and (frame - self.last_resume_frame) > 5:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
self.resume_cnt += 1
# interval after 6 msgs
if self.resume_cnt > 5:
self.last_resume_frame = frame
self.resume_cnt = 0
# reset lead distnce after the car starts moving
elif self.last_lead_distance != 0:
self.last_lead_distance = 0
# 20 Hz LFA MFA message
if frame % 5 == 0 and self.car_fingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SONATA_H, CAR.SANTA_FE]:
can_sends.append(create_lfa_mfa(self.packer, frame, enabled))
return can_sends
| 42.658537 | 122 | 0.705432 | from cereal import car
from common.numpy_fast import clip
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, \
create_scc11, create_scc12, create_mdps12, \
create_scc13, create_scc14
from selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
VisualAlert = car.CarControl.HUDControl.VisualAlert
min_set_speed = 30 * CV.KPH_TO_MS
ACCEL_HYST_GAP = 0.02
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
def accel_hysteresis(accel, accel_steady):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(enabled, fingerprint, visual_alert, left_lane,
right_lane, left_lane_depart, right_lane_depart, button_on):
sys_warning = (visual_alert == VisualAlert.steerRequired)
sys_state = 1
if not button_on:
lane_visible = 0
if left_lane and right_lane or sys_warning:
if enabled or sys_warning:
sys_state = 3
else:
sys_state = 4
elif left_lane:
sys_state = 5
elif right_lane:
sys_state = 6
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, CP, VM):
self.car_fingerprint = CP.carFingerprint
self.packer = CANPacker(dbc_name)
self.accel_steady = 0
self.apply_steer_last = 0
self.steer_rate_limited = False
self.lkas11_cnt = 0
self.scc12_cnt = 0
self.resume_cnt = 0
self.last_resume_frame = 0
self.last_lead_distance = 0
self.turning_signal_timer = 0
self.lkas_button_on = True
self.longcontrol = CP.openpilotLongitudinalControl
self.scc_live = not CP.radarOffCan
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible):
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady)
apply_accel = clip(apply_accel * ACCEL_SCALE, ACCEL_MIN, ACCEL_MAX)
new_steer = actuators.steer * SteerLimitParams.STEER_MAX
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, SteerLimitParams)
self.steer_rate_limited = new_steer != apply_steer
lkas_active = enabled and abs(CS.out.steeringAngle) < 90. and self.lkas_button_on
if CS.out.vEgo < 60 * CV.KPH_TO_MS and self.car_fingerprint == CAR.HYUNDAI_GENESIS and not CS.mdps_bus:
lkas_active = 0
if CS.out.leftBlinker or CS.out.rightBlinker:
if self.car_fingerprint not in [CAR.KIA_OPTIMA, CAR.KIA_OPTIMA_H]:
self.turning_signal_timer = 100
elif CS.left_blinker_flash or CS.right_blinker_flash:
self.turning_signal_timer = 100
if self.turning_indicator_alert:
lkas_active = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
if not lkas_active:
apply_steer = 0
self.apply_accel_last = apply_accel
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning =\
process_hud_alert(lkas_active, self.car_fingerprint, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart,
self.lkas_button_on)
clu11_speed = CS.clu11["CF_Clu_Vanz"]
enabled_speed = 38 if CS.is_set_speed_in_mph else 60
if clu11_speed > enabled_speed or not lkas_active:
enabled_speed = clu11_speed
if set_speed < min_set_speed:
set_speed = min_set_speed
set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH
if frame == 0:
self.lkas11_cnt = CS.lkas11["CF_Lkas_MsgCount"]
self.scc12_cnt = CS.scc12["CR_VSM_Alive"] + 1 if not CS.no_radar else 0
self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
self.lkas11_cnt = (self.lkas11_cnt + 1) % 0x10
self.scc12_cnt %= 0xF
can_sends = []
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 0))
if CS.mdps_bus or CS.scc_bus == 1:
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 1))
if frame % 2 and CS.mdps_bus:
can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))
if pcm_cancel_cmd and self.longcontrol:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))
elif CS.mdps_bus:
can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))
if self.longcontrol and (CS.scc_bus or not self.scc_live) and frame % 2 == 0:
can_sends.append(create_scc12(self.packer, apply_accel, enabled, self.scc12_cnt, self.scc_live, CS.scc12))
can_sends.append(create_scc11(self.packer, frame, enabled, set_speed, lead_visible, self.scc_live, CS.scc11))
if CS.has_scc13 and frame % 20 == 0:
can_sends.append(create_scc13(self.packer, CS.scc13))
if CS.has_scc14:
can_sends.append(create_scc14(self.packer, enabled, CS.scc14))
self.scc12_cnt += 1
if CS.out.cruiseState.standstill:
if self.last_lead_distance == 0:
self.last_lead_distance = CS.lead_distance
self.resume_cnt = 0
elif CS.lead_distance != self.last_lead_distance and (frame - self.last_resume_frame) > 5:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
self.resume_cnt += 1
if self.resume_cnt > 5:
self.last_resume_frame = frame
self.resume_cnt = 0
elif self.last_lead_distance != 0:
self.last_lead_distance = 0
if frame % 5 == 0 and self.car_fingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SONATA_H, CAR.SANTA_FE]:
can_sends.append(create_lfa_mfa(self.packer, frame, enabled))
return can_sends
| true | true |
f7265a0b69f2307625509d7efc09f1872f2f80c4 | 8,067 | py | Python | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 51 | 2019-02-01T19:43:37.000Z | 2022-03-16T09:07:03.000Z | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | import numpy as np
from filter import movingaverage
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, window=7):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
n=len(y)
movy = movingaverage(y, window)
# Implement trendlines and Find the indexes of these maxima in the data
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
#plt.plot(maxline, 'g')
#plt.plot(minline, 'r')
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
# OUTPUT
return x_maxima, maxima, x_minima, minima
def minitrends(x, window=20, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
if window < 1: # if window is given as fraction of data length
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
# Find whether max's or min's
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
# See if better max or min in region
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
# Plot results if desired
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
# Return arrays of critical points
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
| 32.792683 | 79 | 0.567497 | import numpy as np
from filter import movingaverage
def gentrends(x, window=1/3.0, charts=True):
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0]
min1 = np.where(x == min(x))[0][0]
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
max2 = np.where(x == max2)[0][0]
min2 = np.where(x == min2)[0][0]
maxslope = (x[max1] - x[max2]) / (max1 - max2)
minslope = (x[min1] - x[min2]) / (min1 - min2)
a_max = x[max1] - (maxslope * max1)
a_min = x[min1] - (minslope * min1)
b_max = x[max1] + (maxslope * (len(x) - max1))
b_min = x[min1] + (minslope * (len(x) - min1))
maxline = np.linspace(a_max, b_max, len(x))
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, window=7):
import numpy as np
y = np.array(x)
n=len(y)
movy = movingaverage(y, window)
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
return x_maxima, maxima, x_minima, minima
def minitrends(x, window=20, charts=True):
import numpy as np
y = np.array(x)
if window < 1:
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
| true | true |
f7265b61b492e4080f0306586bfd4421f042a4b1 | 9,483 | py | Python | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | 1 | 2022-03-15T13:05:40.000Z | 2022-03-15T13:05:40.000Z | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | null | null | null | import itertools
import logging
import os
import textwrap
from typing import List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import (
export_data,
patch_pandas_text_adjustment,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.insider.openinsider_model import (
get_open_insider_data,
get_open_insider_link,
)
from gamestonk_terminal import rich_config
logger = logging.getLogger(__name__)
d_open_insider = {
"lcb": "latest-cluster-buys",
"lpsb": "latest-penny-stock-buys",
"lit": "latest-insider-trading",
"lip": "insider-purchases",
"blip": "latest-insider-purchases-25k",
"blop": "latest-officer-purchases-25k",
"blcp": "latest-ceo-cfo-purchases-25k",
"lis": "insider-sales",
"blis": "latest-insider-sales-100k",
"blos": "latest-officer-sales-100k",
"blcs": "latest-ceo-cfo-sales-100k",
"topt": "top-officer-purchases-of-the-day",
"toppw": "top-officer-purchases-of-the-week",
"toppm": "top-officer-purchases-of-the-month",
"tipt": "top-insider-purchases-of-the-day",
"tippw": "top-insider-purchases-of-the-week",
"tippm": "top-insider-purchases-of-the-month",
"tist": "top-insider-sales-of-the-day",
"tispw": "top-insider-sales-of-the-week",
"tispm": "top-insider-sales-of-the-month",
}
d_notes = {
"A": "A: Amended filing",
"D": "D: Derivative transaction in filing (usually option exercise)",
"E": "E: Error detected in filing",
"M": "M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price",
}
d_trade_types = {
"S - Sale": "[red]S - Sale: Sale of securities on an exchange or to another person[/red]",
"S - Sale+OE": "[yellow]S - Sale+OE: Sale of securities "
"on an exchange or to another person (after option exercise)[/yellow]",
"F - Tax": "[magenta]F - Tax: Payment of exercise price or "
"tax liability using portion of securities received from the company[/magenta]",
"P - Purchase": "[green]P - Purchase: Purchase of securities on "
"an exchange or from another person[/green]",
}
def lambda_red_highlight(values) -> List[str]:
"""Red highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[red]{val}[/red]" for val in values]
def lambda_yellow_highlight(values) -> List[str]:
"""Yellow highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[yellow]{val}[/yellow]" for val in values]
def lambda_magenta_highlight(values):
"""Magenta highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[magenta]{val}[/magenta]" for val in values]
def lambda_green_highlight(values):
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[green]{val}[/green]" for val in values]
@log_start_end(log=logger)
def print_insider_data(type_insider: str, limit: int = 10, export: str = ""):
"""Print insider data
Parameters
----------
type_insider: str
Insider type of data
limit: int
Limit of data rows to display
export: str
Export data format
"""
response = requests.get(f"http://openinsider.com/{d_open_insider[type_insider]}")
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find("table", {"class": "tinytable"})
if not table:
console.print("No insider information found", "\n")
return
table_rows = table.find_all("tr")
res = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text.strip() for tr in td if tr.text.strip()]
res.append(row)
df = pd.DataFrame(res).dropna().head(n=limit)
columns = [
"X",
"Filing Date",
"Trade Date",
"Ticker",
"Company Name",
"Industry" if type_insider == "lcb" else "Insider Name",
"Title",
"Trade Type",
"Price",
"Qty",
"Owned",
"Diff Own",
"Value",
]
if df.shape[1] == 13:
df.columns = columns
else:
df.columns = columns[1:]
df["Filing Date"] = df["Filing Date"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
df["Company Name"] = df["Company Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
if type_insider == "lcb":
df["Industry"] = df["Industry"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
else:
df["Insider Name"] = df["Insider Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title="Insider Data",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), type_insider, df)
if df.shape[1] == 13:
l_chars = [list(chars) for chars in df["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
console.print("")
@log_start_end(log=logger)
def print_insider_filter(
preset_loaded: str,
ticker: str,
limit: int = 10,
links: bool = False,
export: str = "",
):
"""Print insider filter based on loaded preset. [Source: OpenInsider]
Parameters
----------
preset_loaded : str
Loaded preset filter
ticker : str
Stock ticker
limit : int
Limit of rows of data to display
links : bool
Flag to show hyperlinks
export : str
Format to export data
"""
if ticker:
link = f"http://openinsider.com/screener?s={ticker}"
else:
link = get_open_insider_link(preset_loaded)
if not link:
console.print("")
return
df_insider = get_open_insider_data(link, has_company_name=bool(not ticker))
df_insider_orig = df_insider.copy()
if df_insider.empty:
console.print("No insider data found\n")
return
if links:
df_insider = df_insider[["Ticker Link", "Insider Link", "Filing Link"]].head(
limit
)
else:
df_insider = df_insider.drop(
columns=["Filing Link", "Ticker Link", "Insider Link"]
).head(limit)
if rich_config.USE_COLOR and not links:
if not df_insider[df_insider["Trade Type"] == "S - Sale"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale"] = df_insider[
df_insider["Trade Type"] == "S - Sale"
].apply(lambda_red_highlight)
if not df_insider[df_insider["Trade Type"] == "S - Sale+OE"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale+OE"] = df_insider[
df_insider["Trade Type"] == "S - Sale+OE"
].apply(lambda_yellow_highlight)
if not df_insider[df_insider["Trade Type"] == "F - Tax"].empty:
df_insider[df_insider["Trade Type"] == "F - Tax"] = df_insider[
df_insider["Trade Type"] == "F - Tax"
].apply(lambda_magenta_highlight)
if not df_insider[df_insider["Trade Type"] == "P - Purchase"].empty:
df_insider[df_insider["Trade Type"] == "P - Purchase"] = df_insider[
df_insider["Trade Type"] == "P - Purchase"
].apply(lambda_green_highlight)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
# needs to be done because table is too large :(
df_insider = df_insider.drop(columns=["Filing Date", "Trade Type"])
else:
# needs to be done because table is too large :(
df_insider = df_insider.drop(columns=["Filing Date"])
console.print("")
print_rich_table(
df_insider,
headers=[x.title() for x in df_insider.columns],
title="Insider filtered",
)
if export:
if preset_loaded:
cmd = "filter"
if ticker:
cmd = "lis"
export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider)
if not links:
l_chars = [list(chars) for chars in df_insider_orig["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
console.print("")
for char in l_uchars:
console.print(d_notes[char])
l_tradetype = df_insider_orig["Trade Type"].values
l_utradetype = np.unique(l_tradetype)
console.print("")
for tradetype in l_utradetype:
console.print(d_trade_types[tradetype])
console.print("")
| 29.178462 | 119 | 0.605505 | import itertools
import logging
import os
import textwrap
from typing import List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import (
export_data,
patch_pandas_text_adjustment,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.insider.openinsider_model import (
get_open_insider_data,
get_open_insider_link,
)
from gamestonk_terminal import rich_config
logger = logging.getLogger(__name__)
d_open_insider = {
"lcb": "latest-cluster-buys",
"lpsb": "latest-penny-stock-buys",
"lit": "latest-insider-trading",
"lip": "insider-purchases",
"blip": "latest-insider-purchases-25k",
"blop": "latest-officer-purchases-25k",
"blcp": "latest-ceo-cfo-purchases-25k",
"lis": "insider-sales",
"blis": "latest-insider-sales-100k",
"blos": "latest-officer-sales-100k",
"blcs": "latest-ceo-cfo-sales-100k",
"topt": "top-officer-purchases-of-the-day",
"toppw": "top-officer-purchases-of-the-week",
"toppm": "top-officer-purchases-of-the-month",
"tipt": "top-insider-purchases-of-the-day",
"tippw": "top-insider-purchases-of-the-week",
"tippm": "top-insider-purchases-of-the-month",
"tist": "top-insider-sales-of-the-day",
"tispw": "top-insider-sales-of-the-week",
"tispm": "top-insider-sales-of-the-month",
}
d_notes = {
"A": "A: Amended filing",
"D": "D: Derivative transaction in filing (usually option exercise)",
"E": "E: Error detected in filing",
"M": "M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price",
}
d_trade_types = {
"S - Sale": "[red]S - Sale: Sale of securities on an exchange or to another person[/red]",
"S - Sale+OE": "[yellow]S - Sale+OE: Sale of securities "
"on an exchange or to another person (after option exercise)[/yellow]",
"F - Tax": "[magenta]F - Tax: Payment of exercise price or "
"tax liability using portion of securities received from the company[/magenta]",
"P - Purchase": "[green]P - Purchase: Purchase of securities on "
"an exchange or from another person[/green]",
}
def lambda_red_highlight(values) -> List[str]:
return [f"[red]{val}[/red]" for val in values]
def lambda_yellow_highlight(values) -> List[str]:
return [f"[yellow]{val}[/yellow]" for val in values]
def lambda_magenta_highlight(values):
return [f"[magenta]{val}[/magenta]" for val in values]
def lambda_green_highlight(values):
return [f"[green]{val}[/green]" for val in values]
@log_start_end(log=logger)
def print_insider_data(type_insider: str, limit: int = 10, export: str = ""):
response = requests.get(f"http://openinsider.com/{d_open_insider[type_insider]}")
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find("table", {"class": "tinytable"})
if not table:
console.print("No insider information found", "\n")
return
table_rows = table.find_all("tr")
res = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text.strip() for tr in td if tr.text.strip()]
res.append(row)
df = pd.DataFrame(res).dropna().head(n=limit)
columns = [
"X",
"Filing Date",
"Trade Date",
"Ticker",
"Company Name",
"Industry" if type_insider == "lcb" else "Insider Name",
"Title",
"Trade Type",
"Price",
"Qty",
"Owned",
"Diff Own",
"Value",
]
if df.shape[1] == 13:
df.columns = columns
else:
df.columns = columns[1:]
df["Filing Date"] = df["Filing Date"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
df["Company Name"] = df["Company Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
if type_insider == "lcb":
df["Industry"] = df["Industry"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
else:
df["Insider Name"] = df["Insider Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title="Insider Data",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), type_insider, df)
if df.shape[1] == 13:
l_chars = [list(chars) for chars in df["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
console.print("")
@log_start_end(log=logger)
def print_insider_filter(
preset_loaded: str,
ticker: str,
limit: int = 10,
links: bool = False,
export: str = "",
):
if ticker:
link = f"http://openinsider.com/screener?s={ticker}"
else:
link = get_open_insider_link(preset_loaded)
if not link:
console.print("")
return
df_insider = get_open_insider_data(link, has_company_name=bool(not ticker))
df_insider_orig = df_insider.copy()
if df_insider.empty:
console.print("No insider data found\n")
return
if links:
df_insider = df_insider[["Ticker Link", "Insider Link", "Filing Link"]].head(
limit
)
else:
df_insider = df_insider.drop(
columns=["Filing Link", "Ticker Link", "Insider Link"]
).head(limit)
if rich_config.USE_COLOR and not links:
if not df_insider[df_insider["Trade Type"] == "S - Sale"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale"] = df_insider[
df_insider["Trade Type"] == "S - Sale"
].apply(lambda_red_highlight)
if not df_insider[df_insider["Trade Type"] == "S - Sale+OE"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale+OE"] = df_insider[
df_insider["Trade Type"] == "S - Sale+OE"
].apply(lambda_yellow_highlight)
if not df_insider[df_insider["Trade Type"] == "F - Tax"].empty:
df_insider[df_insider["Trade Type"] == "F - Tax"] = df_insider[
df_insider["Trade Type"] == "F - Tax"
].apply(lambda_magenta_highlight)
if not df_insider[df_insider["Trade Type"] == "P - Purchase"].empty:
df_insider[df_insider["Trade Type"] == "P - Purchase"] = df_insider[
df_insider["Trade Type"] == "P - Purchase"
].apply(lambda_green_highlight)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
df_insider = df_insider.drop(columns=["Filing Date", "Trade Type"])
else:
df_insider = df_insider.drop(columns=["Filing Date"])
console.print("")
print_rich_table(
df_insider,
headers=[x.title() for x in df_insider.columns],
title="Insider filtered",
)
if export:
if preset_loaded:
cmd = "filter"
if ticker:
cmd = "lis"
export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider)
if not links:
l_chars = [list(chars) for chars in df_insider_orig["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
console.print("")
for char in l_uchars:
console.print(d_notes[char])
l_tradetype = df_insider_orig["Trade Type"].values
l_utradetype = np.unique(l_tradetype)
console.print("")
for tradetype in l_utradetype:
console.print(d_trade_types[tradetype])
console.print("")
| true | true |
f7265be4393da76c22754da75898052c8a4b8c71 | 831 | py | Python | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 15 | 2020-07-07T16:32:53.000Z | 2022-03-16T14:23:23.000Z | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 2 | 2020-09-02T15:25:39.000Z | 2020-09-24T08:37:18.000Z | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 6 | 2020-08-14T22:02:07.000Z | 2021-03-31T07:08:29.000Z | # The small linear cone program of section 8.1 (Linear cone programs).
from cvxopt import matrix, solvers
c = matrix([-6., -4., -5.])
G = matrix([[ 16., 7., 24., -8., 8., -1., 0., -1., 0., 0., 7.,
-5., 1., -5., 1., -7., 1., -7., -4.],
[-14., 2., 7., -13., -18., 3., 0., 0., -1., 0., 3.,
13., -6., 13., 12., -10., -6., -10., -28.],
[ 5., 0., -15., 12., -6., 17., 0., 0., 0., -1., 9.,
6., -6., 6., -7., -7., -6., -7., -11.]])
h = matrix( [ -3., 5., 12., -2., -14., -13., 10., 0., 0., 0., 68.,
-30., -19., -30., 99., 23., -19., 23., 10.] )
dims = {'l': 2, 'q': [4, 4], 's': [3]}
sol = solvers.conelp(c, G, h, dims)
print("\nStatus: " + sol['status'])
print("\nx = \n")
print(sol['x'])
print("\nz = \n")
print(sol['z'])
| 39.571429 | 75 | 0.361011 |
from cvxopt import matrix, solvers
c = matrix([-6., -4., -5.])
G = matrix([[ 16., 7., 24., -8., 8., -1., 0., -1., 0., 0., 7.,
-5., 1., -5., 1., -7., 1., -7., -4.],
[-14., 2., 7., -13., -18., 3., 0., 0., -1., 0., 3.,
13., -6., 13., 12., -10., -6., -10., -28.],
[ 5., 0., -15., 12., -6., 17., 0., 0., 0., -1., 9.,
6., -6., 6., -7., -7., -6., -7., -11.]])
h = matrix( [ -3., 5., 12., -2., -14., -13., 10., 0., 0., 0., 68.,
-30., -19., -30., 99., 23., -19., 23., 10.] )
dims = {'l': 2, 'q': [4, 4], 's': [3]}
sol = solvers.conelp(c, G, h, dims)
print("\nStatus: " + sol['status'])
print("\nx = \n")
print(sol['x'])
print("\nz = \n")
print(sol['z'])
| true | true |
f7265c3d05c95935872b8f725e78bb38be5404e8 | 9,800 | py | Python | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | 1 | 2018-07-17T11:23:47.000Z | 2018-07-17T11:23:47.000Z | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | null | null | null | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | 1 | 2020-02-29T04:49:04.000Z | 2020-02-29T04:49:04.000Z | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
import os
import shutil
import tempfile
import unittest
import mock
from stacker.context import Context, Config
from stacker.dag import walk
from stacker.util import stack_template_key_name
from stacker.lookups.registry import (
register_lookup_handler,
unregister_lookup_handler,
)
from stacker.plan import (
Step,
build_plan,
)
from stacker.exceptions import (
CancelExecution,
GraphError,
PlanFailed,
)
from stacker.status import (
SUBMITTED,
COMPLETE,
SKIPPED,
FAILED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
stack = mock.MagicMock()
stack.name = "stack"
stack.fqn = "namespace-stack"
self.step = Step(stack=stack, fn=None)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertEqual(self.step.status, COMPLETE)
self.assertNotEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
self.assertNotEqual(self.step.status, True)
self.assertNotEqual(self.step.status, False)
self.assertNotEqual(self.step.status, 'banana')
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.config = Config({"namespace": "namespace"})
self.context = Context(config=self.config)
register_lookup_handler("noop", lambda **kwargs: "test")
def tearDown(self):
unregister_lookup_handler("noop")
def test_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
plan = build_plan(description="Test", steps=[
Step(vpc, fn=None), Step(bastion, fn=None)])
self.assertEqual(plan.graph.to_dict(), {
'bastion.1': set(['vpc.1']),
'vpc.1': set([])})
def test_execute_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test", steps=[Step(vpc, fn), Step(bastion, fn)])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_filtered(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition('db', 1, requires=[vpc.name]),
context=self.context)
app = Stack(
definition=generate_definition('app', 1, requires=[db.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test",
steps=[Step(vpc, fn), Step(db, fn), Step(app, fn)],
targets=['db.1'])
plan.execute(walk)
self.assertEquals(calls, [
'namespace-vpc.1', 'namespace-db.1'])
def test_execute_plan_exception(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
raise ValueError('Boom')
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1'])
self.assertEquals(vpc_step.status, FAILED)
def test_execute_plan_skipped(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
return SKIPPED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_failed(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
db = Stack(
definition=generate_definition('db', 1),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
return FAILED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
db_step = Step(db, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step, db_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
calls.sort()
self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1'])
def test_execute_plan_cancelled(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
raise CancelExecution
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_build_plan_missing_dependency(self):
bastion = Stack(
definition=generate_definition(
'bastion', 1, requires=['vpc.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(description="Test", steps=[Step(bastion, None)])
message_starts = (
"Error detected when adding 'vpc.1' "
"as a dependency of 'bastion.1':"
)
message_contains = "dependent node vpc.1 does not exist"
self.assertTrue(str(expected.exception).startswith(message_starts))
self.assertTrue(message_contains in str(expected.exception))
def test_build_plan_cyclic_dependencies(self):
vpc = Stack(
definition=generate_definition(
'vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition(
'db', 1, requires=['app.1']),
context=self.context)
app = Stack(
definition=generate_definition(
'app', 1, requires=['db.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(
description="Test",
steps=[Step(vpc, None), Step(db, None), Step(app, None)])
message = ("Error detected when adding 'db.1' "
"as a dependency of 'app.1': graph is "
"not acyclic")
self.assertEqual(str(expected.exception), message)
def test_dump(self, *args):
requires = None
steps = []
for i in range(5):
overrides = {
"variables": {
"PublicSubnets": "1",
"SshKeyName": "1",
"PrivateSubnets": "1",
"Random": "${noop something}",
},
"requires": requires,
}
stack = Stack(
definition=generate_definition('vpc', i, **overrides),
context=self.context)
requires = [stack.name]
steps += [Step(stack, None)]
plan = build_plan(description="Test", steps=steps)
tmp_dir = tempfile.mkdtemp()
try:
plan.dump(tmp_dir, context=self.context)
for step in plan.steps:
template_path = os.path.join(
tmp_dir,
stack_template_key_name(step.stack.blueprint))
self.assertTrue(os.path.isfile(template_path))
finally:
shutil.rmtree(tmp_dir)
| 30.434783 | 78 | 0.580408 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
import os
import shutil
import tempfile
import unittest
import mock
from stacker.context import Context, Config
from stacker.dag import walk
from stacker.util import stack_template_key_name
from stacker.lookups.registry import (
register_lookup_handler,
unregister_lookup_handler,
)
from stacker.plan import (
Step,
build_plan,
)
from stacker.exceptions import (
CancelExecution,
GraphError,
PlanFailed,
)
from stacker.status import (
SUBMITTED,
COMPLETE,
SKIPPED,
FAILED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
stack = mock.MagicMock()
stack.name = "stack"
stack.fqn = "namespace-stack"
self.step = Step(stack=stack, fn=None)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertEqual(self.step.status, COMPLETE)
self.assertNotEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
self.assertNotEqual(self.step.status, True)
self.assertNotEqual(self.step.status, False)
self.assertNotEqual(self.step.status, 'banana')
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.config = Config({"namespace": "namespace"})
self.context = Context(config=self.config)
register_lookup_handler("noop", lambda **kwargs: "test")
def tearDown(self):
unregister_lookup_handler("noop")
def test_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
plan = build_plan(description="Test", steps=[
Step(vpc, fn=None), Step(bastion, fn=None)])
self.assertEqual(plan.graph.to_dict(), {
'bastion.1': set(['vpc.1']),
'vpc.1': set([])})
def test_execute_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test", steps=[Step(vpc, fn), Step(bastion, fn)])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_filtered(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition('db', 1, requires=[vpc.name]),
context=self.context)
app = Stack(
definition=generate_definition('app', 1, requires=[db.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test",
steps=[Step(vpc, fn), Step(db, fn), Step(app, fn)],
targets=['db.1'])
plan.execute(walk)
self.assertEquals(calls, [
'namespace-vpc.1', 'namespace-db.1'])
def test_execute_plan_exception(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
raise ValueError('Boom')
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1'])
self.assertEquals(vpc_step.status, FAILED)
def test_execute_plan_skipped(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
return SKIPPED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_failed(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
db = Stack(
definition=generate_definition('db', 1),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
return FAILED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
db_step = Step(db, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step, db_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
calls.sort()
self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1'])
def test_execute_plan_cancelled(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
raise CancelExecution
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_build_plan_missing_dependency(self):
bastion = Stack(
definition=generate_definition(
'bastion', 1, requires=['vpc.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(description="Test", steps=[Step(bastion, None)])
message_starts = (
"Error detected when adding 'vpc.1' "
"as a dependency of 'bastion.1':"
)
message_contains = "dependent node vpc.1 does not exist"
self.assertTrue(str(expected.exception).startswith(message_starts))
self.assertTrue(message_contains in str(expected.exception))
def test_build_plan_cyclic_dependencies(self):
vpc = Stack(
definition=generate_definition(
'vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition(
'db', 1, requires=['app.1']),
context=self.context)
app = Stack(
definition=generate_definition(
'app', 1, requires=['db.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(
description="Test",
steps=[Step(vpc, None), Step(db, None), Step(app, None)])
message = ("Error detected when adding 'db.1' "
"as a dependency of 'app.1': graph is "
"not acyclic")
self.assertEqual(str(expected.exception), message)
def test_dump(self, *args):
requires = None
steps = []
for i in range(5):
overrides = {
"variables": {
"PublicSubnets": "1",
"SshKeyName": "1",
"PrivateSubnets": "1",
"Random": "${noop something}",
},
"requires": requires,
}
stack = Stack(
definition=generate_definition('vpc', i, **overrides),
context=self.context)
requires = [stack.name]
steps += [Step(stack, None)]
plan = build_plan(description="Test", steps=steps)
tmp_dir = tempfile.mkdtemp()
try:
plan.dump(tmp_dir, context=self.context)
for step in plan.steps:
template_path = os.path.join(
tmp_dir,
stack_template_key_name(step.stack.blueprint))
self.assertTrue(os.path.isfile(template_path))
finally:
shutil.rmtree(tmp_dir)
| true | true |
f7265c6925ef94f77153adae115c2e78a7324353 | 3,058 | py | Python | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3+7ekm6aghztb!h1b@xcvvjid8$o%rb7bb3bha446)d1pk573*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'users',
'recipes',
# Third-party apps
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'over_roasted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.joinpath('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'over_roasted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/users/login/'
LOGIN_REDIRECT_URL = '/recipes/'
LOGOUT_REDIRECT_URL = LOGIN_URL
django_heroku.settings(locals())
| 24.66129 | 91 | 0.692283 | from pathlib import Path
import django_heroku
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '3+7ekm6aghztb!h1b@xcvvjid8$o%rb7bb3bha446)d1pk573*'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'users',
'recipes',
# Third-party apps
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'over_roasted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.joinpath('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'over_roasted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/users/login/'
LOGIN_REDIRECT_URL = '/recipes/'
LOGOUT_REDIRECT_URL = LOGIN_URL
django_heroku.settings(locals())
| true | true |
f7265d7477ff3fba1b5e7f80d15d88b7c11ed07e | 1,092 | py | Python | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2020-04-12T01:23:25.000Z | 2021-05-22T13:46:00.000Z | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | null | null | null | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2016-12-30T23:57:44.000Z | 2021-05-22T13:50:21.000Z | #!/usr/bin/env python
"""
Mark the start and end of the prompt with Final term (iterm2) escape sequences.
See: https://iterm2.com/finalterm.html
"""
from __future__ import unicode_literals
from prompt_toolkit import prompt
from prompt_toolkit.token import Token
import sys
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a' # command_status is the command status, 0-255
def get_prompt_tokens(cli):
# Generate the tokens for the prompt.
# Important: use the `ZeroWidthEscape` token only if you are sure that
# writing this as raw text to the output will not introduce any
# cursor movements.
return [
(Token.ZeroWidthEscape, BEFORE_PROMPT),
(Token, 'Say something: # '),
(Token.ZeroWidthEscape, AFTER_PROMPT),
]
if __name__ == '__main__':
answer = prompt(get_prompt_tokens=get_prompt_tokens)
sys.stdout.write(BEFORE_OUTPUT)
print('You said: %s' % answer)
sys.stdout.write(AFTER_OUTPUT.format(command_status=0))
| 29.513514 | 92 | 0.701465 |
from __future__ import unicode_literals
from prompt_toolkit import prompt
from prompt_toolkit.token import Token
import sys
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a'
def get_prompt_tokens(cli):
return [
(Token.ZeroWidthEscape, BEFORE_PROMPT),
(Token, 'Say something: # '),
(Token.ZeroWidthEscape, AFTER_PROMPT),
]
if __name__ == '__main__':
answer = prompt(get_prompt_tokens=get_prompt_tokens)
sys.stdout.write(BEFORE_OUTPUT)
print('You said: %s' % answer)
sys.stdout.write(AFTER_OUTPUT.format(command_status=0))
| true | true |
f7265e08d1ad65a2a3c1ac2d5369c00df1bea063 | 2,164 | py | Python | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | # Copyright 2007-2017 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the element in a list attributes
# Example of use:
# regionType = get_pyXB_anon_type_for_list(regions.regionEntities)
# region = regionType()
# regions.regionEntities.append(region)
def get_pyXB_anon_type_for_list_attrb(list_attrb):
return list_attrb._PluralBinding__elementBinding._element__typeDefinition
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the attribute "attrb_name" in the "attrb_holder" object
# Example of use:
# flavorType = get_pyXB_anon_type_for_list(flavors.flavor)
# flavor = flavorType()
# flavors.flavor.append(flavor)
def get_pyXB_anon_type_for_simple_attrb(attrb_holder, attrb_name):
return getattr(attrb_holder, "_" + type(
attrb_holder).__name__ + "__" + attrb_name)._ElementDeclaration__elementBinding._element__typeDefinition | 55.487179 | 121 | 0.77634 |
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the element in a list attributes
# Example of use:
# regionType = get_pyXB_anon_type_for_list(regions.regionEntities)
# region = regionType()
# regions.regionEntities.append(region)
def get_pyXB_anon_type_for_list_attrb(list_attrb):
return list_attrb._PluralBinding__elementBinding._element__typeDefinition
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
def get_pyXB_anon_type_for_simple_attrb(attrb_holder, attrb_name):
return getattr(attrb_holder, "_" + type(
attrb_holder).__name__ + "__" + attrb_name)._ElementDeclaration__elementBinding._element__typeDefinition | true | true |
f7265e3a53380992aefc3e1f0e5022e3b36f9db4 | 4,782 | py | Python | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | 2 | 2021-01-14T10:13:32.000Z | 2021-01-31T23:51:09.000Z | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | null | null | null | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | null | null | null | import ads
ads.config.token = 'my token'
import numpy as np
# Filenames
## Enter the filename for first-author publications here:
first_author = "first_author.bib"
## Enter the filename for cd-authored publications here:
co_author = "co_author.bib"
# Function Declarations
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
# Sum citations
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
# Compile LaTeX string
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
# Export to LaTeX
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n") | 32.97931 | 367 | 0.622961 | import ads
ads.config.token = 'my token'
import numpy as np
en(filename)
full_list = f.readlines()
bibcodes = []
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n") | true | true |
f7265e57b6d0c1d7e79981da52b990daf420ccab | 7,670 | py | Python | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | 1 | 2018-10-28T03:02:03.000Z | 2018-10-28T03:02:03.000Z | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | null | null | null | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from paka.breadcrumbs import Bread, Crumb
class BreadcrumbsTest(unittest.TestCase):
def setUp(self):
self.site_name = "Some site Name"
def test_breadcrumbs_can_be_converted_to_list(self):
crumbs = list(Bread(self.site_name))
self.assertGreater(len(crumbs), 0)
def test_breadcrumbs_can_be_indexed(self):
self.assertIsInstance(Bread(self.site_name)[0], Crumb)
def test_default_site_crumb(self):
crumb, = Bread(self.site_name)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, self.site_name)
self.assertEqual(crumb.url_path, "/")
self.assertEqual(crumb.extra, {})
def test_changed_site_url_path(self):
url_path = "/some/other/"
crumb, = Bread(self.site_name, url_path=url_path)
self.assertEqual(crumb.url_path, url_path)
def test_changed_site_heading(self):
heading = "something different"
crumb, = Bread(self.site_name, heading=heading)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, heading)
def test_changed_site_extra(self):
extra = {"a": 1, "b": 2}
crumb, = Bread(self.site_name, extra=extra)
self.assertEqual(crumb.extra, extra)
def test_adding_is_done_in_correct_order(self):
bcrumbs = Bread(self.site_name)
label, heading, url_path, extra = "Label", "Heading", "/test/", {1: 2}
bcrumbs.add(label, heading=heading, url_path=url_path, extra=extra)
site_crumb, test_crumb = bcrumbs
self.assertEqual(site_crumb.label, self.site_name)
self.assertEqual(site_crumb.heading, self.site_name)
self.assertEqual(site_crumb.url_path, "/")
self.assertEqual(site_crumb.extra, {})
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, heading)
self.assertEqual(test_crumb.url_path, url_path)
self.assertEqual(test_crumb.extra, extra)
def test_adding_defaults(self):
label = "some label"
bcrumbs = Bread(self.site_name)
bcrumbs.add(label)
test_crumb = bcrumbs[1]
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, label)
self.assertIsNone(test_crumb.url_path)
self.assertEqual(test_crumb.extra, {})
def test_adding_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread(self.site_name)
bcrumbs.add_crumb(expected_crumb)
site_crumb, test_crumb = bcrumbs
self.assertEqual(expected_crumb, test_crumb)
def test_from_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread.from_crumb(expected_crumb)
crumb, = bcrumbs
self.assertEqual(expected_crumb, crumb)
def test_from_crumbs(self):
crumbs = (
Crumb(self.site_name, extra={1: "one"}, url_path="/"),
Crumb("Second", url_path="/second/"),
Crumb("Third"))
bcrumbs = Bread.from_crumbs(crumbs)
for expected, actual in zip_longest(crumbs, bcrumbs):
self.assertEqual(expected, actual)
def test_from_empty_crumbs(self):
with self.assertRaises(ValueError):
Bread.from_crumbs(())
class BreadcrumbsTitleTest(unittest.TestCase):
def setUp(self):
from markupsafe import Markup
from mako.template import Template
self.markup_class = Markup
self.template_class = Template
self.site_name = "Some site Name"
def test_getting_title_with_one_crumb(self):
bcrumbs = Bread(self.site_name)
result = bcrumbs.get_title("←")
self.assertEqual(result, self.site_name)
self.assertIsInstance(result, self.markup_class)
def test_getting_title_with_several_crumbs(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
cases = (
("sep", "here sep <sub-sub> sep Subsection sep {}"),
("←", "here ← <sub-sub> ← Subsection ← {}"),
("<", "here < <sub-sub> < Subsection < {}"),
(
"<",
"here &lt; <sub-sub> "
"&lt; Subsection &lt; {}"),
(
self.markup_class("<"),
"here < <sub-sub> < Subsection < {}"))
for sep, tmpl in cases:
result = bcrumbs.get_title(sep)
self.assertIsInstance(result, self.markup_class)
self.assertEqual(result, tmpl.format(self.site_name))
def test_template_rendering(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
title = bcrumbs.get_title("<")
expected = (
"<title>here < <sub-sub> < Subsection "
"< {}</title>").format(self.site_name)
template_string = "<title>${title}</title>"
self.assertEqual(
self.template_class(
template_string, default_filters=["h"]).render(title=title),
expected)
self.assertEqual(
self.template_class(template_string).render(title=title),
expected)
class CrumbTest(unittest.TestCase):
def test_empty_url_path_results_in_none(self):
crumb = Crumb("label", url_path="")
self.assertIsNone(crumb.url_path)
def test_equality_defaults(self):
args, kwargs = ("a", ), {}
crumb_a = Crumb(*args, **kwargs)
crumb_b = Crumb(*args, **kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_same_kwargs(self):
kwargs = {
"label": "Some label", "url_path": "/url/path",
"heading": "Same", "extra": {0: 1}}
crumb_a = Crumb(**kwargs)
crumb_b = Crumb(**kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_different_label(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "extra": {1: 2}}
crumb_a = Crumb(label="a", **same_kwargs)
crumb_b = Crumb(label="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_url_path(self):
same_kwargs = {
"label": "Same", "heading": "Same too", "extra": {3: 4}}
crumb_a = Crumb(url_path="a", **same_kwargs)
crumb_b = Crumb(url_path="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_heading(self):
same_kwargs = {
"url_path": "/url/path", "label": "Same", "extra": {5: 6}}
crumb_a = Crumb(heading="a", **same_kwargs)
crumb_b = Crumb(heading="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_extra(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "label": "Same too"}
crumb_a = Crumb(extra={"a": 1}, **same_kwargs)
crumb_b = Crumb(extra={"b": 2}, **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
| 36.52381 | 78 | 0.619426 |
from __future__ import unicode_literals
import unittest
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from paka.breadcrumbs import Bread, Crumb
class BreadcrumbsTest(unittest.TestCase):
def setUp(self):
self.site_name = "Some site Name"
def test_breadcrumbs_can_be_converted_to_list(self):
crumbs = list(Bread(self.site_name))
self.assertGreater(len(crumbs), 0)
def test_breadcrumbs_can_be_indexed(self):
self.assertIsInstance(Bread(self.site_name)[0], Crumb)
def test_default_site_crumb(self):
crumb, = Bread(self.site_name)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, self.site_name)
self.assertEqual(crumb.url_path, "/")
self.assertEqual(crumb.extra, {})
def test_changed_site_url_path(self):
url_path = "/some/other/"
crumb, = Bread(self.site_name, url_path=url_path)
self.assertEqual(crumb.url_path, url_path)
def test_changed_site_heading(self):
heading = "something different"
crumb, = Bread(self.site_name, heading=heading)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, heading)
def test_changed_site_extra(self):
extra = {"a": 1, "b": 2}
crumb, = Bread(self.site_name, extra=extra)
self.assertEqual(crumb.extra, extra)
def test_adding_is_done_in_correct_order(self):
bcrumbs = Bread(self.site_name)
label, heading, url_path, extra = "Label", "Heading", "/test/", {1: 2}
bcrumbs.add(label, heading=heading, url_path=url_path, extra=extra)
site_crumb, test_crumb = bcrumbs
self.assertEqual(site_crumb.label, self.site_name)
self.assertEqual(site_crumb.heading, self.site_name)
self.assertEqual(site_crumb.url_path, "/")
self.assertEqual(site_crumb.extra, {})
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, heading)
self.assertEqual(test_crumb.url_path, url_path)
self.assertEqual(test_crumb.extra, extra)
def test_adding_defaults(self):
label = "some label"
bcrumbs = Bread(self.site_name)
bcrumbs.add(label)
test_crumb = bcrumbs[1]
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, label)
self.assertIsNone(test_crumb.url_path)
self.assertEqual(test_crumb.extra, {})
def test_adding_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread(self.site_name)
bcrumbs.add_crumb(expected_crumb)
site_crumb, test_crumb = bcrumbs
self.assertEqual(expected_crumb, test_crumb)
def test_from_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread.from_crumb(expected_crumb)
crumb, = bcrumbs
self.assertEqual(expected_crumb, crumb)
def test_from_crumbs(self):
crumbs = (
Crumb(self.site_name, extra={1: "one"}, url_path="/"),
Crumb("Second", url_path="/second/"),
Crumb("Third"))
bcrumbs = Bread.from_crumbs(crumbs)
for expected, actual in zip_longest(crumbs, bcrumbs):
self.assertEqual(expected, actual)
def test_from_empty_crumbs(self):
with self.assertRaises(ValueError):
Bread.from_crumbs(())
class BreadcrumbsTitleTest(unittest.TestCase):
def setUp(self):
from markupsafe import Markup
from mako.template import Template
self.markup_class = Markup
self.template_class = Template
self.site_name = "Some site Name"
def test_getting_title_with_one_crumb(self):
bcrumbs = Bread(self.site_name)
result = bcrumbs.get_title("←")
self.assertEqual(result, self.site_name)
self.assertIsInstance(result, self.markup_class)
def test_getting_title_with_several_crumbs(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
cases = (
("sep", "here sep <sub-sub> sep Subsection sep {}"),
("←", "here ← <sub-sub> ← Subsection ← {}"),
("<", "here < <sub-sub> < Subsection < {}"),
(
"<",
"here &lt; <sub-sub> "
"&lt; Subsection &lt; {}"),
(
self.markup_class("<"),
"here < <sub-sub> < Subsection < {}"))
for sep, tmpl in cases:
result = bcrumbs.get_title(sep)
self.assertIsInstance(result, self.markup_class)
self.assertEqual(result, tmpl.format(self.site_name))
def test_template_rendering(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
title = bcrumbs.get_title("<")
expected = (
"<title>here < <sub-sub> < Subsection "
"< {}</title>").format(self.site_name)
template_string = "<title>${title}</title>"
self.assertEqual(
self.template_class(
template_string, default_filters=["h"]).render(title=title),
expected)
self.assertEqual(
self.template_class(template_string).render(title=title),
expected)
class CrumbTest(unittest.TestCase):
def test_empty_url_path_results_in_none(self):
crumb = Crumb("label", url_path="")
self.assertIsNone(crumb.url_path)
def test_equality_defaults(self):
args, kwargs = ("a", ), {}
crumb_a = Crumb(*args, **kwargs)
crumb_b = Crumb(*args, **kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_same_kwargs(self):
kwargs = {
"label": "Some label", "url_path": "/url/path",
"heading": "Same", "extra": {0: 1}}
crumb_a = Crumb(**kwargs)
crumb_b = Crumb(**kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_different_label(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "extra": {1: 2}}
crumb_a = Crumb(label="a", **same_kwargs)
crumb_b = Crumb(label="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_url_path(self):
same_kwargs = {
"label": "Same", "heading": "Same too", "extra": {3: 4}}
crumb_a = Crumb(url_path="a", **same_kwargs)
crumb_b = Crumb(url_path="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_heading(self):
same_kwargs = {
"url_path": "/url/path", "label": "Same", "extra": {5: 6}}
crumb_a = Crumb(heading="a", **same_kwargs)
crumb_b = Crumb(heading="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_extra(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "label": "Same too"}
crumb_a = Crumb(extra={"a": 1}, **same_kwargs)
crumb_b = Crumb(extra={"b": 2}, **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
| true | true |
f72660bfc1f52e8bce65d7defe6754157512b67a | 3,196 | py | Python | grr/server/grr_response_server/databases/mem.py | billstackpole/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | 1 | 2019-03-28T07:09:41.000Z | 2019-03-28T07:09:41.000Z | grr/server/grr_response_server/databases/mem.py | gingogo/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/databases/mem.py | gingogo/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | 1 | 2018-08-30T14:50:24.000Z | 2018-08-30T14:50:24.000Z | #!/usr/bin/env python
"""An in memory database implementation used for testing."""
import sys
import threading
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import db
from grr_response_server.databases import mem_blobs
from grr_response_server.databases import mem_clients
from grr_response_server.databases import mem_cronjobs
from grr_response_server.databases import mem_events
from grr_response_server.databases import mem_flows
from grr_response_server.databases import mem_foreman_rules
from grr_response_server.databases import mem_paths
from grr_response_server.databases import mem_users
from grr_response_server.rdfvalues import objects as rdf_objects
# pyformat: disable
class InMemoryDB(mem_blobs.InMemoryDBBlobsMixin,
mem_clients.InMemoryDBClientMixin,
mem_cronjobs.InMemoryDBCronJobMixin,
mem_events.InMemoryDBEventMixin,
mem_flows.InMemoryDBFlowMixin,
mem_foreman_rules.InMemoryDBForemanRulesMixin,
mem_paths.InMemoryDBPathMixin,
mem_users.InMemoryDBUsersMixin,
db.Database):
"""An in memory database implementation used for testing."""
# pyformat: enable
def __init__(self):
super(InMemoryDB, self).__init__()
self._Init()
self.lock = threading.RLock()
def _Init(self):
self.approvals_by_username = {}
self.clients = {}
self.client_messages = {}
self.client_message_leases = {}
self.crash_history = {}
self.cronjob_leases = {}
self.cronjobs = {}
self.events = []
self.foreman_rules = []
self.keywords = {}
self.labels = {}
self.message_handler_leases = {}
self.message_handler_requests = {}
self.metadatas = {}
self.notifications_by_username = {}
self.startup_history = {}
# TODO(hanuszczak): Consider chaning this to nested dicts for improved
# debugging experience.
# Maps (client_id, path_type, components) to a path record.
self.path_records = {}
# Maps (client_id, path_type, path_id) to a blob record.
self.blob_records = {}
self.message_handler_requests = {}
self.message_handler_leases = {}
self.events = []
self.cronjobs = {}
self.cronjob_leases = {}
self.cronjob_runs = {}
self.foreman_rules = []
self.blobs = {}
self.users = {}
self.handler_thread = None
self.handler_stop = True
@utils.Synchronized
def ClearTestDB(self):
self._Init()
def _AllPathIDs(self):
result = set()
for client_id, path_type, components in self.path_records:
path_id = rdf_objects.PathID.FromComponents(components)
result.add((client_id, path_type, path_id))
return result
def _ParseTimeRange(self, timerange):
"""Parses a timerange argument and always returns non-None timerange."""
if timerange is None:
timerange = (None, None)
from_time, to_time = timerange
if not from_time:
from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)
if not to_time:
to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxsize)
return (from_time, to_time)
| 31.643564 | 76 | 0.708385 |
import sys
import threading
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import db
from grr_response_server.databases import mem_blobs
from grr_response_server.databases import mem_clients
from grr_response_server.databases import mem_cronjobs
from grr_response_server.databases import mem_events
from grr_response_server.databases import mem_flows
from grr_response_server.databases import mem_foreman_rules
from grr_response_server.databases import mem_paths
from grr_response_server.databases import mem_users
from grr_response_server.rdfvalues import objects as rdf_objects
class InMemoryDB(mem_blobs.InMemoryDBBlobsMixin,
mem_clients.InMemoryDBClientMixin,
mem_cronjobs.InMemoryDBCronJobMixin,
mem_events.InMemoryDBEventMixin,
mem_flows.InMemoryDBFlowMixin,
mem_foreman_rules.InMemoryDBForemanRulesMixin,
mem_paths.InMemoryDBPathMixin,
mem_users.InMemoryDBUsersMixin,
db.Database):
def __init__(self):
super(InMemoryDB, self).__init__()
self._Init()
self.lock = threading.RLock()
def _Init(self):
self.approvals_by_username = {}
self.clients = {}
self.client_messages = {}
self.client_message_leases = {}
self.crash_history = {}
self.cronjob_leases = {}
self.cronjobs = {}
self.events = []
self.foreman_rules = []
self.keywords = {}
self.labels = {}
self.message_handler_leases = {}
self.message_handler_requests = {}
self.metadatas = {}
self.notifications_by_username = {}
self.startup_history = {}
self.path_records = {}
self.blob_records = {}
self.message_handler_requests = {}
self.message_handler_leases = {}
self.events = []
self.cronjobs = {}
self.cronjob_leases = {}
self.cronjob_runs = {}
self.foreman_rules = []
self.blobs = {}
self.users = {}
self.handler_thread = None
self.handler_stop = True
@utils.Synchronized
def ClearTestDB(self):
self._Init()
def _AllPathIDs(self):
result = set()
for client_id, path_type, components in self.path_records:
path_id = rdf_objects.PathID.FromComponents(components)
result.add((client_id, path_type, path_id))
return result
def _ParseTimeRange(self, timerange):
if timerange is None:
timerange = (None, None)
from_time, to_time = timerange
if not from_time:
from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)
if not to_time:
to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxsize)
return (from_time, to_time)
| true | true |
f726620976060383077043d92b478dbeab78b397 | 571 | py | Python | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | """add default value to is_invited
Revision ID: 51387d8fda8d
Revises: 6779bebb64e6
Create Date: 2021-12-21 18:19:50.864781
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '51387d8fda8d'
down_revision = '6779bebb64e6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 19.689655 | 65 | 0.691769 | from alembic import op
import sqlalchemy as sa
revision = '51387d8fda8d'
down_revision = '6779bebb64e6'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f7266308ffe97be95f1df1a47115c73c0ac247a1 | 6,815 | py | Python | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
"""Set-up the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
# Required Args
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
# Optional args
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
"""This is the main function of main.py
Example:
python main.py --dataset example1
"""
# Initializing
args = get_args()
# Load the configurations
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
# ------- Start of Code ------- #
# # Initialize the network # #
netWork = NeuralNetwork(input_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
# Add layers
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer["type"]}')
# # Train the network # #
# First Feed forward
outputs = netWork.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
# Calculate Loss derivative
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets) # Train the network
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
# # First Layer (Convolutional)
# weights_L1 = np.array(
# [np.concatenate((l1k1.flatten(), l1b1)), np.concatenate((l1k2.flatten(), l1b2))])
# netWork.addConvLayer(num_kernels=2, kernel_size=3, activation="logistic", weights=weights_L1)
# # Second Layer (Convolutional)
# weights_L2 = np.array([np.concatenate((l2c1.flatten(), l2c2.flatten(), l2b))])
# netWork.addConvLayer(num_kernels=1, kernel_size=3, activation="logistic", weights=weights_L2)
# # Third Layer (Fully Connected)
# netWork.addFlattenLayer()
# weights_L3 = np.array([np.concatenate((l3.flatten(), l3b))])
# netWork.addFCLayer(num_neurons=1, activation="logistic", weights=weights_L3)
| 42.329193 | 101 | 0.612032 | import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
args = get_args()
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
ut_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer["type"]}')
.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets)
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
| true | true |
f72665b02611c25f6224783cfd40d4362312e741 | 62,701 | py | Python | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | null | null | null | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | null | null | null | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | 3 | 2019-10-24T11:40:06.000Z | 2022-02-24T07:34:00.000Z | """
job_metadata api data import unit tests.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import json
from flask import url_for
from _fixtures import * # noqa: F403, F401
from _helpers import job_metadata_importer
import dirbs.metadata as metadata
def test_classification_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for classification job.
"""
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'curr_date': None,
'conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='', status='success',
extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
else: # job_metadata api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 1
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert data['jobs'][0]['command'] == 'dirbs-classify'
assert data['jobs'][0]['run_id'] == 1
assert data['jobs'][0]['subcommand'] == ''
assert data['jobs'][0]['status'] == 'success'
assert data['jobs'][0]['extra_metadata'] == extra_metadata
def test_prune_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for pruning triplets and classification_state job.
"""
extra_metadata = {'rows_before': 0,
'retention_months': 6,
'curr_date': None,
'rows_after': 0}
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=9, subcommand='triplets', status='success',
extra_metadata=extra_metadata)
job_metadata_importer(db_conn=db_conn, command='dirbs-prune', run_id=10, subcommand='classification_state',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data[0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data[1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
else: # job_metadata api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data['jobs'][0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data['jobs'][1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
assert data['_keys']['result_size'] == 2
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
def test_operator_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing operator job.
"""
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-16T01:05:17.17081+00:00',
'init_staging_start': '2017-08-16T01:05:16.817426+00:00',
'extract_split_start': '2017-08-16T01:05:16.10788+00:00',
'prevalidate_upload_start': '2017-08-16T01:05:17.34236+00:00',
'analyze_staging_end': '2017-08-16T01:05: 20.807413+00:00',
'validation_binary_checks_end': '2017-08-16T01:05:25.565519+00:00',
'prevalidate_upload_end': '2017-08-16T01:05:20.125746+00:00',
'analyze_staging_start': '2017-08-16T01:05:20.296765+00:00',
'preprocess_start': '2017-08-16T01:05:16.474489+00:00',
'extract_split_end': '2017-08-16T01:05:16.301238+00:00',
'preprocess_end': '2017-08-16T01:05:16.645968+00:00',
'postprocess_staging_end': '2017-08-16T01:05:24.531709+00:00',
'validation_threshold_checks_start': '2017-08-16T01:05:25.741384+00:00',
'validation_binary_checks_start': '2017-08-16T01:05:24.705607+00:00',
'postprocess_staging_start': '2017-08-16T01:05:20.978153+00:00'},
'home_threshold': 0.2,
'cc': ['22%'],
'clean_threshold': 0.05,
'null_msisdn_threshold': 0.05,
'perform_leading_zero_check': True,
'perform_file_daterange_check': True,
'perform_null_check': True,
'perform_clean_check': True,
'perform_historic_imsi_check': True,
'perform_null_imsi_check': True,
'perform_null_msisdn_check': True,
'perform_historic_msisdn_check': True,
'operator_id':
'operator1',
'input_file':
'/workspace/data/operator1_home_'
'check_exceeded_20160701_20160731.zip',
'batch_size':
1000000, 'mcc_mnc_pairs':
[{'mnc': '01', 'mcc': '111'}],
'perform_historic_imei_check': True,
'null_imsi_threshold': 0.05,
'perform_rat_import': False,
'perform_null_imei_check': True,
'perform_home_check': True,
'null_imei_threshold': 0.05,
'region_threshold': 0.1,
'perform_region_check': False} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='operator',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
print(data['command'])
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
def test_stolen_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing stolen_list job.
"""
extra_metadata = {'output_stats':
{'num_records_updated': 20,
'num_records': 20,
'num_records_inserted': 20},
'performance_timing':
{'init_staging_end': '2017-08-22T01:42:30.695313+00:00',
'analyze_staging_end': '2017-08-22T01:42:34.286028+00:00',
'validation_threshold_checks_end': '2017-08-22T01:42:36.380127+00:00',
'analyze_staging_start': '2017-08-22T01:42:33.78045+00:00',
'preprocess_start': '2017-08-22T01:42:30.023073+00:00',
'copy_from_staging_end': '2017-08-22T01:42:38.553902+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:35.537445+00:00',
'validation_threshold_checks_start': '2017-08-22T01:42:36.208775+00:00',
'output_stats_start': '2017-08-22T01:42:38.721215+00:00',
'validation_historical_checks_end': '2017-08-22T01:42:37.049421+00:00',
'extract_split_end': '2017-08-22T01:42:29.855514+00:00',
'copy_from_staging_start': '2017-08-22T01:42:37.38383+00:00',
'extract_split_start': '2017-08-22T01:42:29.674068+00:00',
'validation_historical_checks_start': '2017-08-22T01:42:36.547579+00:00',
'preprocess_end': '2017-08-22T01:42:30.191182+00:00',
'postprocess_staging_end': '2017-08-22T01:42:35.370151+00:00',
'init_staging_start': '2017-08-22T01:42:30.358302+00:00',
'validation_binary_checks_end': '2017-08-22T01:42:36.041237+00:00',
'output_stats_end': '2017-08-22T01:42:39.225688+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:33.612194+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:30.862953+00:00',
'postprocess_staging_start': '2017-08-22T01:42:34.458834+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 20,
'num_records': 20,
'num_records_invalid': 0}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='stolen_list',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
def test_pairing_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing pairing_list job.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:41:59.925562+00:00',
'init_staging_start': '2017-08-22T01:41:59.588253+00:00',
'extract_split_start': '2017-08-22T01:41:58.901343+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:00.093237+00:00',
'analyze_staging_end': '2017-08-22T01:42:03.478264+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:02.788264+00:00',
'analyze_staging_start': '2017-08-22T01:42:02.956404+00:00',
'preprocess_start': '2017-08-22T01:41:59.252764+00:00',
'extract_split_end': '2017-08-22T01:41:59.08492+00:00',
'preprocess_end': '2017-08-22T01:41:59.421052+00:00',
'postprocess_staging_end': '2017-08-22T01:42:04.520465+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:04.68826+00:00',
'postprocess_staging_start': '2017-08-22T01:42:03.646232+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/duplicate.zip'} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_gsma_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing GSMA TAC data job.
"""
extra_metadata = {'output_stats':
{'num_records_updated': 4,
'num_records': 4,
'num_records_inserted': 4},
'performance_timing':
{'init_staging_end': '2017-08-22T01:56:25.875908+00:00',
'analyze_staging_end': '2017-08-22T01:56:29.386537+00:00',
'validation_threshold_checks_end': '2017-08-22T01:56:31.231756+00:00',
'analyze_staging_start': '2017-08-22T01:56:28.886486+00:00',
'preprocess_start': '2017-08-22T01:56:25.192466+00:00',
'copy_from_staging_end': '2017-08-22T01:56:33.42097+00:00',
'validation_binary_checks_start': '2017-08-22T01:56:30.725186+00:00',
'validation_threshold_checks_start': '2017-08-22T01:56:31.063007+00:00',
'output_stats_start': '2017-08-22T01:56:33.589227+00:00',
'validation_historical_checks_end': '2017-08-22T01:56:31.915001+00:00',
'extract_split_end': '2017-08-22T01:56:25.023654+00:00',
'copy_from_staging_start': '2017-08-22T01:56:32.250857+00:00',
'extract_split_start': '2017-08-22T01:56:24.844737+00:00',
'validation_historical_checks_start': '2017-08-22T01:56:31.400242+00:00',
'preprocess_end': '2017-08-22T01:56:25.368138+00:00',
'postprocess_staging_end': '2017-08-22T01:56:30.557336+00:00',
'init_staging_start': '2017-08-22T01:56:25.536523+00:00',
'validation_binary_checks_end': '2017-08-22T01:56:30.895228+00:00',
'output_stats_end': '2017-08-22T01:56:34.097277+00:00',
'prevalidate_upload_end': '2017-08-22T01:56:28.718421+00:00',
'prevalidate_upload_start': '2017-08-22T01:56:26.043878+00:00',
'postprocess_staging_start': '2017-08-22T01:56:29.554878+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/duplicate_gsma.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 4,
'num_records': 7,
'num_records_invalid': 3}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='gsma_tac',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
def test_registration_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing registration_list job.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:43:21.386498+00:00',
'init_staging_start': '2017-08-22T01:43:21.035571+00:00',
'extract_split_start': '2017-08-22T01:43:20.35253+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:21.554073+00:00',
'preprocess_start': '2017-08-22T01:43:20.699411+00:00',
'extract_split_end': '2017-08-22T01:43:20.531135+00:00',
'preprocess_end': '2017-08-22T01:43:20.867795+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/'
'sample_import_list.zip'} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='registration_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_golden_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing golden_list job.
"""
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-22T01:43:05.017337+00:00',
'init_staging_start': '2017-08-22T01:43:04.681766+00:00',
'extract_split_start': '2017-08-22T01:43:03.993331+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:05.18436+00:00',
'preprocess_start': '2017-08-22T01:43:04.337401+00:00',
'extract_split_end': '2017-08-22T01:43:04.17081+00:00',
'preprocess_end': '2017-08-22T01:43:04.504815+00:00'},
'perform_historic_check': True,
'pre_hashed': False,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='golden_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_db_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for db_schema.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-db', run_id=1, subcommand='upgrade',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_list_gen_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing list generation metadata.
"""
extra_metadata = {'blacklist':
{'file_size_bytes': 25,
'md5sum': 'd623e56b7c73d27fc7ce68e3dfc6e448',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/blacklist.csv'},
'notification_lists':
[{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator1.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator2.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator3.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator4.csv'}],
'curr_date': None,
'exception_lists':
[{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator1.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator2.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator3.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator4.csv'}],
'blocking_conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-listgen', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_report_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing report metadata.
"""
extra_metadata = {'refreshed_data': True,
'month': 2,
'output_dir': '/workspace/data',
'year': 2016}
job_metadata_importer(db_conn=db_conn, command='dirbs-report', run_id=1, subcommand='',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_job_metadata_bad_pos_int_params(flask_app, db_conn, api_version):
"""Test Depot ID unknown yet.
Verify that job_metadata API returns a 400 status for not positive integer run_id or max_result,
"""
if api_version == 'v1':
# not numeric run_id
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
# not positive run_id
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
# not numeric max_result
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results='a',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'max_results\':\'a\' argument format. Accepts only integer' in rv.data
# not positive max_result
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=0,
show_details=False))
assert rv.status_code == 400
assert b'Param \'max_results\':\'0\' must be greater than 0' in rv.data
# list of max_result (will take just the first elem of the list)
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=[1, -2],
show_details=False))
assert rv.status_code == 200
# set max_result to 1 and check that only one record is returned
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))) == 1
else: # api version 2.0
# not numeric run_id
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
# not positive run_id
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
# set max_result to 1 and check that only one record is returned
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))['jobs']) == 1
def test_job_metadata_bad_params(flask_app, api_version):
"""Test Depot ID unknown yet.
Verify that job_metadata API returns a 400 status for unknown status or not boolean show_details.
"""
if api_version == 'v1':
# unknown status
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# list of status containing an unknown status
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# not boolean show_details
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
else: # api version 2.0
# unknown status
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# list of status containing an unknown status
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# not boolean show_details
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
def test_json_show_details(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
with extra information if show_details is set to true.
"""
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0}, # noqa E127
'conditions':
[{'label': 'simple_dimension',
'blocking': True,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30},
{'label': 'compound_dimension',
'blocking': True,
'sticky': False,
'reason':
'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'module': 'duplicate_daily_avg',
'parameters':
{'period_days': 30,
'threshold': 3.1}}],
'grace_period_days': 0}],
'curr_date': None}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
# Step 1 show_details=True
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
# Step 2 show_details=False
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
else: # api version 2.0
# Step 1 show_details=True
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
# Step 2 show_details=False
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
def test_json_no_record_for_get_params(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata should return an empty JSON if params are well formatted
but not stored in the job_metadata table.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata={'metadata': 'metadata'})
if api_version == 'v1':
# Add row into job_metadata table with run_id=1 and get url for param run_id=2.
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8')) == []
else: # api version 2.0
# Add row into job_metadata table with run_id=1 and get url for param run_id=2.
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8'))['jobs'] == []
def test_json_unknown_command_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata doesn't allow unknown command params.
"""
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
def test_json_multiple_values_same_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata if get params
consists of a list of values.
"""
# Step 1 list of valid params: run_id=[1,2]; subcommand=['upgrade', 'operator']
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))[1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
# Step 2 list with invalid params: run_id=[1,-2];
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
max_results=10,
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))['jobs'][1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
# Step 2 list with invalid params: run_id=[1,-2];
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
def test_json_no_run_id_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that if run_id is set to empty list, it will not be used to filter the results of the query.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_default_params(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing all job metadata
if no request params are given.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_method_delete_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP DELETE and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_post_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP POST and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_put_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP PUT and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_job_metadata_most_recent_successful_job_start_time(db_conn):
"""Test Depot ID not known yet.
Verify metadata::test_job_metadata_most_recent_successful_job_start_time function.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing': {}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing-list',
status='success', extra_metadata=extra_metadata)
metadata.most_recent_job_start_time_by_command(db_conn, 'dirbs-import', subcommand='pairing-list',
successful_only=True)
def test_job_metadata_v2_pagination(flask_app, db_conn):
"""Test Depot ID not known yet.
Verify that results returned by metadata api version 2.0 are paginated.
"""
# insert 20 records
for i in range(10):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=i, subcommand='',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=i, subcommand='triplets', status='success')
# test all records are fetched when no pagination params are given
rv = flask_app.get(url_for('v2.job_metadata_get_api'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert len(data['jobs']) == 20
# test pagination, start from 1st record and 5 records per page
offset = 1
limit = 5
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
next_offset = offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
next_offset = next_offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit * 2)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
# pagination with sorting order ascending based on run_id
offset = 1
limit = 5
order = 'Ascending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] <= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] <= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] <= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] <= data['jobs'][4]['run_id']
# order Descending
order = 'Descending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] >= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] >= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] >= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] >= data['jobs'][4]['run_id']
| 49.023456 | 120 | 0.54409 |
import json
from flask import url_for
from _fixtures import *
from _helpers import job_metadata_importer
import dirbs.metadata as metadata
def test_classification_json_api(flask_app, db_conn, api_version):
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'curr_date': None,
'conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='', status='success',
extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 1
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert data['jobs'][0]['command'] == 'dirbs-classify'
assert data['jobs'][0]['run_id'] == 1
assert data['jobs'][0]['subcommand'] == ''
assert data['jobs'][0]['status'] == 'success'
assert data['jobs'][0]['extra_metadata'] == extra_metadata
def test_prune_json_api(flask_app, db_conn, api_version):
extra_metadata = {'rows_before': 0,
'retention_months': 6,
'curr_date': None,
'rows_after': 0}
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=9, subcommand='triplets', status='success',
extra_metadata=extra_metadata)
job_metadata_importer(db_conn=db_conn, command='dirbs-prune', run_id=10, subcommand='classification_state',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data[0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data[1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data['jobs'][0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data['jobs'][1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
assert data['_keys']['result_size'] == 2
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
def test_operator_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-16T01:05:17.17081+00:00',
'init_staging_start': '2017-08-16T01:05:16.817426+00:00',
'extract_split_start': '2017-08-16T01:05:16.10788+00:00',
'prevalidate_upload_start': '2017-08-16T01:05:17.34236+00:00',
'analyze_staging_end': '2017-08-16T01:05: 20.807413+00:00',
'validation_binary_checks_end': '2017-08-16T01:05:25.565519+00:00',
'prevalidate_upload_end': '2017-08-16T01:05:20.125746+00:00',
'analyze_staging_start': '2017-08-16T01:05:20.296765+00:00',
'preprocess_start': '2017-08-16T01:05:16.474489+00:00',
'extract_split_end': '2017-08-16T01:05:16.301238+00:00',
'preprocess_end': '2017-08-16T01:05:16.645968+00:00',
'postprocess_staging_end': '2017-08-16T01:05:24.531709+00:00',
'validation_threshold_checks_start': '2017-08-16T01:05:25.741384+00:00',
'validation_binary_checks_start': '2017-08-16T01:05:24.705607+00:00',
'postprocess_staging_start': '2017-08-16T01:05:20.978153+00:00'},
'home_threshold': 0.2,
'cc': ['22%'],
'clean_threshold': 0.05,
'null_msisdn_threshold': 0.05,
'perform_leading_zero_check': True,
'perform_file_daterange_check': True,
'perform_null_check': True,
'perform_clean_check': True,
'perform_historic_imsi_check': True,
'perform_null_imsi_check': True,
'perform_null_msisdn_check': True,
'perform_historic_msisdn_check': True,
'operator_id':
'operator1',
'input_file':
'/workspace/data/operator1_home_'
'check_exceeded_20160701_20160731.zip',
'batch_size':
1000000, 'mcc_mnc_pairs':
[{'mnc': '01', 'mcc': '111'}],
'perform_historic_imei_check': True,
'null_imsi_threshold': 0.05,
'perform_rat_import': False,
'perform_null_imei_check': True,
'perform_home_check': True,
'null_imei_threshold': 0.05,
'region_threshold': 0.1,
'perform_region_check': False}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='operator',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
print(data['command'])
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
def test_stolen_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'output_stats':
{'num_records_updated': 20,
'num_records': 20,
'num_records_inserted': 20},
'performance_timing':
{'init_staging_end': '2017-08-22T01:42:30.695313+00:00',
'analyze_staging_end': '2017-08-22T01:42:34.286028+00:00',
'validation_threshold_checks_end': '2017-08-22T01:42:36.380127+00:00',
'analyze_staging_start': '2017-08-22T01:42:33.78045+00:00',
'preprocess_start': '2017-08-22T01:42:30.023073+00:00',
'copy_from_staging_end': '2017-08-22T01:42:38.553902+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:35.537445+00:00',
'validation_threshold_checks_start': '2017-08-22T01:42:36.208775+00:00',
'output_stats_start': '2017-08-22T01:42:38.721215+00:00',
'validation_historical_checks_end': '2017-08-22T01:42:37.049421+00:00',
'extract_split_end': '2017-08-22T01:42:29.855514+00:00',
'copy_from_staging_start': '2017-08-22T01:42:37.38383+00:00',
'extract_split_start': '2017-08-22T01:42:29.674068+00:00',
'validation_historical_checks_start': '2017-08-22T01:42:36.547579+00:00',
'preprocess_end': '2017-08-22T01:42:30.191182+00:00',
'postprocess_staging_end': '2017-08-22T01:42:35.370151+00:00',
'init_staging_start': '2017-08-22T01:42:30.358302+00:00',
'validation_binary_checks_end': '2017-08-22T01:42:36.041237+00:00',
'output_stats_end': '2017-08-22T01:42:39.225688+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:33.612194+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:30.862953+00:00',
'postprocess_staging_start': '2017-08-22T01:42:34.458834+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 20,
'num_records': 20,
'num_records_invalid': 0}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='stolen_list',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
def test_pairing_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:41:59.925562+00:00',
'init_staging_start': '2017-08-22T01:41:59.588253+00:00',
'extract_split_start': '2017-08-22T01:41:58.901343+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:00.093237+00:00',
'analyze_staging_end': '2017-08-22T01:42:03.478264+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:02.788264+00:00',
'analyze_staging_start': '2017-08-22T01:42:02.956404+00:00',
'preprocess_start': '2017-08-22T01:41:59.252764+00:00',
'extract_split_end': '2017-08-22T01:41:59.08492+00:00',
'preprocess_end': '2017-08-22T01:41:59.421052+00:00',
'postprocess_staging_end': '2017-08-22T01:42:04.520465+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:04.68826+00:00',
'postprocess_staging_start': '2017-08-22T01:42:03.646232+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/duplicate.zip'}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_gsma_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'output_stats':
{'num_records_updated': 4,
'num_records': 4,
'num_records_inserted': 4},
'performance_timing':
{'init_staging_end': '2017-08-22T01:56:25.875908+00:00',
'analyze_staging_end': '2017-08-22T01:56:29.386537+00:00',
'validation_threshold_checks_end': '2017-08-22T01:56:31.231756+00:00',
'analyze_staging_start': '2017-08-22T01:56:28.886486+00:00',
'preprocess_start': '2017-08-22T01:56:25.192466+00:00',
'copy_from_staging_end': '2017-08-22T01:56:33.42097+00:00',
'validation_binary_checks_start': '2017-08-22T01:56:30.725186+00:00',
'validation_threshold_checks_start': '2017-08-22T01:56:31.063007+00:00',
'output_stats_start': '2017-08-22T01:56:33.589227+00:00',
'validation_historical_checks_end': '2017-08-22T01:56:31.915001+00:00',
'extract_split_end': '2017-08-22T01:56:25.023654+00:00',
'copy_from_staging_start': '2017-08-22T01:56:32.250857+00:00',
'extract_split_start': '2017-08-22T01:56:24.844737+00:00',
'validation_historical_checks_start': '2017-08-22T01:56:31.400242+00:00',
'preprocess_end': '2017-08-22T01:56:25.368138+00:00',
'postprocess_staging_end': '2017-08-22T01:56:30.557336+00:00',
'init_staging_start': '2017-08-22T01:56:25.536523+00:00',
'validation_binary_checks_end': '2017-08-22T01:56:30.895228+00:00',
'output_stats_end': '2017-08-22T01:56:34.097277+00:00',
'prevalidate_upload_end': '2017-08-22T01:56:28.718421+00:00',
'prevalidate_upload_start': '2017-08-22T01:56:26.043878+00:00',
'postprocess_staging_start': '2017-08-22T01:56:29.554878+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/duplicate_gsma.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 4,
'num_records': 7,
'num_records_invalid': 3}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='gsma_tac',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
def test_registration_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:43:21.386498+00:00',
'init_staging_start': '2017-08-22T01:43:21.035571+00:00',
'extract_split_start': '2017-08-22T01:43:20.35253+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:21.554073+00:00',
'preprocess_start': '2017-08-22T01:43:20.699411+00:00',
'extract_split_end': '2017-08-22T01:43:20.531135+00:00',
'preprocess_end': '2017-08-22T01:43:20.867795+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/'
'sample_import_list.zip'}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='registration_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_golden_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-22T01:43:05.017337+00:00',
'init_staging_start': '2017-08-22T01:43:04.681766+00:00',
'extract_split_start': '2017-08-22T01:43:03.993331+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:05.18436+00:00',
'preprocess_start': '2017-08-22T01:43:04.337401+00:00',
'extract_split_end': '2017-08-22T01:43:04.17081+00:00',
'preprocess_end': '2017-08-22T01:43:04.504815+00:00'},
'perform_historic_check': True,
'pre_hashed': False,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='golden_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_db_schema_json_api(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-db', run_id=1, subcommand='upgrade',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_list_gen_schema_json_api(flask_app, db_conn, api_version):
extra_metadata = {'blacklist':
{'file_size_bytes': 25,
'md5sum': 'd623e56b7c73d27fc7ce68e3dfc6e448',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/blacklist.csv'},
'notification_lists':
[{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator1.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator2.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator3.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator4.csv'}],
'curr_date': None,
'exception_lists':
[{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator1.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator2.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator3.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator4.csv'}],
'blocking_conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]}
job_metadata_importer(db_conn=db_conn, command='dirbs-listgen', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_report_schema_json_api(flask_app, db_conn, api_version):
extra_metadata = {'refreshed_data': True,
'month': 2,
'output_dir': '/workspace/data',
'year': 2016}
job_metadata_importer(db_conn=db_conn, command='dirbs-report', run_id=1, subcommand='',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_job_metadata_bad_pos_int_params(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results='a',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'max_results\':\'a\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=0,
show_details=False))
assert rv.status_code == 400
assert b'Param \'max_results\':\'0\' must be greater than 0' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=[1, -2],
show_details=False))
assert rv.status_code == 200
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))) == 1
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))['jobs']) == 1
def test_job_metadata_bad_params(flask_app, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
def test_json_show_details(flask_app, db_conn, api_version):
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'conditions':
[{'label': 'simple_dimension',
'blocking': True,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30},
{'label': 'compound_dimension',
'blocking': True,
'sticky': False,
'reason':
'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'module': 'duplicate_daily_avg',
'parameters':
{'period_days': 30,
'threshold': 3.1}}],
'grace_period_days': 0}],
'curr_date': None}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
def test_json_no_record_for_get_params(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata={'metadata': 'metadata'})
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8')) == []
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8'))['jobs'] == []
def test_json_unknown_command_param(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
def test_json_multiple_values_same_param(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))[1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
max_results=10,
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))['jobs'][1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
def test_json_no_run_id_param(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_default_params(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_method_delete_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_post_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_put_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_job_metadata_most_recent_successful_job_start_time(db_conn):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing': {}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing-list',
status='success', extra_metadata=extra_metadata)
metadata.most_recent_job_start_time_by_command(db_conn, 'dirbs-import', subcommand='pairing-list',
successful_only=True)
def test_job_metadata_v2_pagination(flask_app, db_conn):
for i in range(10):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=i, subcommand='',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=i, subcommand='triplets', status='success')
rv = flask_app.get(url_for('v2.job_metadata_get_api'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert len(data['jobs']) == 20
offset = 1
limit = 5
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
next_offset = offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
next_offset = next_offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit * 2)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
offset = 1
limit = 5
order = 'Ascending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] <= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] <= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] <= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] <= data['jobs'][4]['run_id']
order = 'Descending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] >= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] >= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] >= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] >= data['jobs'][4]['run_id']
| true | true |
f726669c4c59c770ca27ddaf00f3eafdc8ca9522 | 3,606 | py | Python | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | 1 | 2016-04-01T02:37:05.000Z | 2016-04-01T02:37:05.000Z | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | null | null | null | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from time import sleep
import random
import sys
import rospy
from rospy import Publisher, init_node
from sensor_msgs.msg import Range
from pandora_data_fusion_msgs.msg import VictimProbabilities
from pandora_sensor_msgs.msg import BatteryMsg, Co2Msg, Temperature
from pandora_sensor_msgs.msg import ThermalMeanMsg, ImuRPY
def random_battery(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/battery', BatteryMsg)
msg = BatteryMsg()
msg.name = ['PSU', 'Motors']
while not rospy.is_shutdown():
battery1 = random.randint(18, 25)
battery2 = random.randint(18, 25)
msg.voltage = [battery1, battery2]
sleep(delay)
pub.publish(msg)
def random_temperatures(delay=1):
print('Starting random temperatures...')
pub = Publisher('/cpu/temperature', Temperature)
msg = Temperature()
msg.name = ['cpu0', 'cpu1', 'cpu2', 'cpu2']
while not rospy.is_shutdown():
msg.temperature = [random.randint(30, 80) for i in range(4)]
sleep(delay)
pub.publish(msg)
def random_co2(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/co2', Co2Msg)
msg = Co2Msg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.co2_percentage = random.random()
sleep(delay)
pub.publish(msg)
def random_sonar(delay=1):
print('Starting random sonar...')
pub = Publisher('/sensors/range', Range)
msg = Range()
while not rospy.is_shutdown():
msg.header = rospy.Header(frame_id='right_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
sleep(delay)
msg.header = rospy.Header(frame_id='left_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
def random_imu(delay=1):
print('Starting random imu...')
pub = Publisher('/sensors/imu_rpy', ImuRPY)
msg = ImuRPY()
while not rospy.is_shutdown():
msg.roll = random.random() * 50
msg.pitch = random.random() * 50
msg.yaw = random.random() * 50
pub.publish(msg)
sleep(delay)
def random_thermal(delay=1):
print('Starting random thermal data...')
pub = Publisher('/sensors/thermal', ThermalMeanMsg)
msg = ThermalMeanMsg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.thermal_mean = random.randint(20, 40)
sleep(delay)
pub.publish(msg)
def random_signs_of_life(delay=1):
print('Starting random signs of life.')
pub = Publisher('/data_fusion/signs_of_life', VictimProbabilities)
msg = VictimProbabilities()
while not rospy.is_shutdown():
msg.thermal = random.random()
msg.co2 = random.random()
msg.sound = random.random()
msg.motion = random.random()
msg.visualVictim = random.random()
msg.hazmat = random.random()
sleep(delay)
pub.publish(msg)
if __name__ == '__main__':
init_node('mock_node', anonymous=True)
# Select a mock to use.
selection = sys.argv[1]
delay = float(sys.argv[2])
if selection == 'battery':
random_battery(delay)
elif selection == 'co2':
random_co2(delay)
elif selection == 'thermal':
random_thermal(delay)
elif selection == 'temp':
random_temperatures(delay)
elif selection == 'sonar':
random_sonar(delay)
elif selection == 'imu':
random_imu(delay)
elif selection == 'sol':
random_signs_of_life(delay)
| 28.848 | 70 | 0.642818 |
from __future__ import print_function
from time import sleep
import random
import sys
import rospy
from rospy import Publisher, init_node
from sensor_msgs.msg import Range
from pandora_data_fusion_msgs.msg import VictimProbabilities
from pandora_sensor_msgs.msg import BatteryMsg, Co2Msg, Temperature
from pandora_sensor_msgs.msg import ThermalMeanMsg, ImuRPY
def random_battery(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/battery', BatteryMsg)
msg = BatteryMsg()
msg.name = ['PSU', 'Motors']
while not rospy.is_shutdown():
battery1 = random.randint(18, 25)
battery2 = random.randint(18, 25)
msg.voltage = [battery1, battery2]
sleep(delay)
pub.publish(msg)
def random_temperatures(delay=1):
print('Starting random temperatures...')
pub = Publisher('/cpu/temperature', Temperature)
msg = Temperature()
msg.name = ['cpu0', 'cpu1', 'cpu2', 'cpu2']
while not rospy.is_shutdown():
msg.temperature = [random.randint(30, 80) for i in range(4)]
sleep(delay)
pub.publish(msg)
def random_co2(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/co2', Co2Msg)
msg = Co2Msg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.co2_percentage = random.random()
sleep(delay)
pub.publish(msg)
def random_sonar(delay=1):
print('Starting random sonar...')
pub = Publisher('/sensors/range', Range)
msg = Range()
while not rospy.is_shutdown():
msg.header = rospy.Header(frame_id='right_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
sleep(delay)
msg.header = rospy.Header(frame_id='left_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
def random_imu(delay=1):
print('Starting random imu...')
pub = Publisher('/sensors/imu_rpy', ImuRPY)
msg = ImuRPY()
while not rospy.is_shutdown():
msg.roll = random.random() * 50
msg.pitch = random.random() * 50
msg.yaw = random.random() * 50
pub.publish(msg)
sleep(delay)
def random_thermal(delay=1):
print('Starting random thermal data...')
pub = Publisher('/sensors/thermal', ThermalMeanMsg)
msg = ThermalMeanMsg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.thermal_mean = random.randint(20, 40)
sleep(delay)
pub.publish(msg)
def random_signs_of_life(delay=1):
print('Starting random signs of life.')
pub = Publisher('/data_fusion/signs_of_life', VictimProbabilities)
msg = VictimProbabilities()
while not rospy.is_shutdown():
msg.thermal = random.random()
msg.co2 = random.random()
msg.sound = random.random()
msg.motion = random.random()
msg.visualVictim = random.random()
msg.hazmat = random.random()
sleep(delay)
pub.publish(msg)
if __name__ == '__main__':
init_node('mock_node', anonymous=True)
selection = sys.argv[1]
delay = float(sys.argv[2])
if selection == 'battery':
random_battery(delay)
elif selection == 'co2':
random_co2(delay)
elif selection == 'thermal':
random_thermal(delay)
elif selection == 'temp':
random_temperatures(delay)
elif selection == 'sonar':
random_sonar(delay)
elif selection == 'imu':
random_imu(delay)
elif selection == 'sol':
random_signs_of_life(delay)
| true | true |
f72666c6d7fa28865bf0cc1af0a8928f8b710444 | 674 | py | Python | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | # ---------------------------------------------------------------------
# Gufo Err: AlwaysFailFast
# ---------------------------------------------------------------------
# Copyright (C) 2022, Gufo Labs
# ---------------------------------------------------------------------
# Python modules
from typing import Type
from types import TracebackType
# Gufo Labs modules
from ..abc.failfast import BaseFailFast
class AlwaysFailFast(BaseFailFast):
"""
Always fail-fast. Trigger fail-fast unconditionally.
"""
def must_die(
self,
t: Type[BaseException],
v: BaseException,
tb: TracebackType,
) -> bool:
return True
| 24.962963 | 71 | 0.449555 |
from typing import Type
from types import TracebackType
from ..abc.failfast import BaseFailFast
class AlwaysFailFast(BaseFailFast):
def must_die(
self,
t: Type[BaseException],
v: BaseException,
tb: TracebackType,
) -> bool:
return True
| true | true |
f726670921d44f21aa09f17d795a742ee0c1fa0c | 8,397 | py | Python | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
# mock out the request
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
# mock out the request
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
# mock out the request
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
# mock out the request
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| 37.995475 | 400 | 0.609265 | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| true | true |
f726671fe689fac6b9c56332427272898befdccc | 388 | py | Python | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
from django.views.decorators.csrf import csrf_exempt
#GET charts?userid=0&clientid=1
#GET charts?userid=0&clientid=1&chartid=2
#DELETE charts?userid=0&clientid=1&chartid=2
#POST charts?userid=0&clientid=1&chartid=2
#POST charts?userid=0&clientid=1
urlpatterns = [
url(r'^charts$', csrf_exempt(views.doTheMagic)),
]
| 27.714286 | 53 | 0.744845 | from django.conf.urls import url
from . import views
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
url(r'^charts$', csrf_exempt(views.doTheMagic)),
]
| true | true |
f7266798c32cdc9f7db93d85cd1ba7543a2f7525 | 4,011 | py | Python | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | import csv
import xlsxwriter
import io
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext
from users.models import CustomUser
from users.forms import SignUpForm
# Create your views here.
def index(request):
"""Index"""
user = request.user
return render(request, 'index.html', {
"user": user,
})
def signup(request):
"""User Sign Up"""
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
password = form.cleaned_data.get('password1')
if not password:
password = CustomUser.make_password()
user.set_password(password)
user.refresh_from_db()
user.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('index')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def dashboard(request):
"""List of Users"""
user_list = CustomUser.objects.order_by('date_joined')
return render(request, 'dashboard.html', {'users' : user_list})
@login_required
def jsonUsers(request):
"""Export user list to JSON"""
user_list = list(CustomUser.objects.values())
return JsonResponse(user_list, safe=False)
@login_required
def csvUsers(request):
"""Export user list to CSV"""
users = CustomUser.objects.all().values_list('username', 'birthdate', 'date_joined', 'last_login')
response = HttpResponse(
content_type='text/csv',
headers={'Content-Disposition': 'attachment; filename="users.csv"'},
)
writer = csv.writer(response)
writer.writerow(['username', 'birthdate', 'date_joined', 'last_login'])
for user in users:
writer.writerow(user)
return response
@login_required
def xlsxUsers(request):
"""Export user list to XLSX"""
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
title = workbook.add_format({
'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'text_wrap': True,
})
header = workbook.add_format({
'color': 'black',
'align': 'center',
'valign': 'top',
'text_wrap': True,
'border': 1
})
cell = workbook.add_format({
'color': 'black',
'text_wrap': True,
'top': 1,
'bottom': 1
})
worksheet = workbook.add_worksheet()
title_text = u"Usuários Cadastrados"
worksheet.merge_range('A2:E2', title_text, title)
worksheet.write(2, 0, ("N"), header)
worksheet.write(2, 1, ("username"), header)
worksheet.write(2, 2, ("birthdate"), header)
worksheet.write(2, 3, ("date_joined"), header)
worksheet.write(2, 4, ("last_login"), header)
users = CustomUser.objects.all()
for index, user in enumerate(users):
row = 3 + index
worksheet.write_number(row, 0, index + 1, cell)
worksheet.write_string(row, 1, user.username, cell)
worksheet.write(row, 2, user.birthdate.strftime('%d/%M/%Y'), cell)
worksheet.write(row, 3, user.date_joined.strftime('%d/%M/%Y'), cell)
if user.last_login != None:
worksheet.write(row, 4, user.last_login.strftime('%d/%M/%Y'), cell)
else:
worksheet.write(row, 4, str(user.last_login), cell)
workbook.close()
output.seek(0)
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
headers={'Content-Disposition': 'attachment; filename="users.xlsx"'},
)
return response | 29.065217 | 102 | 0.612565 | import csv
import xlsxwriter
import io
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext
from users.models import CustomUser
from users.forms import SignUpForm
def index(request):
user = request.user
return render(request, 'index.html', {
"user": user,
})
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
password = form.cleaned_data.get('password1')
if not password:
password = CustomUser.make_password()
user.set_password(password)
user.refresh_from_db()
user.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('index')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def dashboard(request):
user_list = CustomUser.objects.order_by('date_joined')
return render(request, 'dashboard.html', {'users' : user_list})
@login_required
def jsonUsers(request):
user_list = list(CustomUser.objects.values())
return JsonResponse(user_list, safe=False)
@login_required
def csvUsers(request):
users = CustomUser.objects.all().values_list('username', 'birthdate', 'date_joined', 'last_login')
response = HttpResponse(
content_type='text/csv',
headers={'Content-Disposition': 'attachment; filename="users.csv"'},
)
writer = csv.writer(response)
writer.writerow(['username', 'birthdate', 'date_joined', 'last_login'])
for user in users:
writer.writerow(user)
return response
@login_required
def xlsxUsers(request):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
title = workbook.add_format({
'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'text_wrap': True,
})
header = workbook.add_format({
'color': 'black',
'align': 'center',
'valign': 'top',
'text_wrap': True,
'border': 1
})
cell = workbook.add_format({
'color': 'black',
'text_wrap': True,
'top': 1,
'bottom': 1
})
worksheet = workbook.add_worksheet()
title_text = u"Usuários Cadastrados"
worksheet.merge_range('A2:E2', title_text, title)
worksheet.write(2, 0, ("N"), header)
worksheet.write(2, 1, ("username"), header)
worksheet.write(2, 2, ("birthdate"), header)
worksheet.write(2, 3, ("date_joined"), header)
worksheet.write(2, 4, ("last_login"), header)
users = CustomUser.objects.all()
for index, user in enumerate(users):
row = 3 + index
worksheet.write_number(row, 0, index + 1, cell)
worksheet.write_string(row, 1, user.username, cell)
worksheet.write(row, 2, user.birthdate.strftime('%d/%M/%Y'), cell)
worksheet.write(row, 3, user.date_joined.strftime('%d/%M/%Y'), cell)
if user.last_login != None:
worksheet.write(row, 4, user.last_login.strftime('%d/%M/%Y'), cell)
else:
worksheet.write(row, 4, str(user.last_login), cell)
workbook.close()
output.seek(0)
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
headers={'Content-Disposition': 'attachment; filename="users.xlsx"'},
)
return response | true | true |
f72667a1d24d7941c968cd6a40963f081df72e57 | 2,540 | py | Python | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server') # todo add to test setup the creation of pods and nodes we can SSH into
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
# base methods
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts() # todo: add method to programatically add the server to the known_hosts file
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'Linux'
# def create_pods(self, count):
# return self.ssh.exec(f'/home/ubuntu/icap-infrastructure/scripts/create_pod.sh {count}')
#
# def test_created_pod(self):
# self.create_pods(1)
# #assert 'bin' in self.ssh.exec('ls')
# # helper methods: esxcli
| 35.277778 | 136 | 0.606693 | import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server')
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts()
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
def test_uname(self):
assert self.ssh.uname() == 'Linux'
| true | true |
f72667ffda4b29a371dcff42c53030225f8f2e83 | 331 | py | Python | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review Title', validators=[Required()])
review = TextAreaField('Movie review',validators=[Required()])
submit = SubmitField('Submit')
| 36.777778 | 66 | 0.770393 | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review Title', validators=[Required()])
review = TextAreaField('Movie review',validators=[Required()])
submit = SubmitField('Submit')
| true | true |
f72668f9f0f1bcd1855b3e06f521866410ab3bc5 | 23,269 | py | Python | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | """
Company database model definitions
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Sum, Q, UniqueConstraint
from django.apps import apps
from django.urls import reverse
from moneyed import CURRENCIES
from markdownx.models import MarkdownxField
from stdimage.models import StdImageField
from InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail
from InvenTree.fields import InvenTreeURLField
from InvenTree.status_codes import PurchaseOrderStatus
import InvenTree.validators
import common.models
import common.settings
from common.settings import currency_code_default
def rename_company_image(instance, filename):
""" Function to rename a company image after upload
Args:
instance: Company object
filename: uploaded image filename
Returns:
New image filename
"""
base = 'company_images'
if filename.count('.') > 0:
ext = filename.split('.')[-1]
else:
ext = ''
fn = 'company_{pk}_img'.format(pk=instance.pk)
if ext:
fn += '.' + ext
return os.path.join(base, fn)
class Company(models.Model):
""" A Company object represents an external company.
It may be a supplier or a customer or a manufacturer (or a combination)
- A supplier is a company from which parts can be purchased
- A customer is a company to which parts can be sold
- A manufacturer is a company which manufactures a raw good (they may or may not be a "supplier" also)
Attributes:
name: Brief name of the company
description: Longer form description
website: URL for the company website
address: Postal address
phone: contact phone number
email: contact email address
link: Secondary URL e.g. for link to internal Wiki page
image: Company image / logo
notes: Extra notes about the company
is_customer: boolean value, is this company a customer
is_supplier: boolean value, is this company a supplier
is_manufacturer: boolean value, is this company a manufacturer
currency_code: Specifies the default currency for the company
"""
@staticmethod
def get_api_url():
return reverse('api-company-list')
class Meta:
ordering = ['name', ]
constraints = [
UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')
]
verbose_name_plural = "Companies"
name = models.CharField(max_length=100, blank=False,
help_text=_('Company name'),
verbose_name=_('Company name'))
description = models.CharField(
max_length=500,
verbose_name=_('Company description'),
help_text=_('Description of the company'),
blank=True,
)
website = models.URLField(
blank=True,
verbose_name=_('Website'),
help_text=_('Company website URL')
)
address = models.CharField(max_length=200,
verbose_name=_('Address'),
blank=True, help_text=_('Company address'))
phone = models.CharField(max_length=50,
verbose_name=_('Phone number'),
blank=True, help_text=_('Contact phone number'))
email = models.EmailField(blank=True, null=True,
verbose_name=_('Email'), help_text=_('Contact email address'))
contact = models.CharField(max_length=100,
verbose_name=_('Contact'),
blank=True, help_text=_('Point of contact'))
link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))
image = StdImageField(
upload_to=rename_company_image,
null=True,
blank=True,
variations={'thumbnail': (128, 128)},
delete_orphans=True,
verbose_name=_('Image'),
)
notes = MarkdownxField(blank=True, verbose_name=_('Notes'))
is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))
is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))
is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))
currency = models.CharField(
max_length=3,
verbose_name=_('Currency'),
blank=True,
default=currency_code_default,
help_text=_('Default currency used for this company'),
validators=[InvenTree.validators.validate_currency_code],
)
@property
def currency_code(self):
"""
Return the currency code associated with this company.
- If the currency code is invalid, use the default currency
- If the currency code is not specified, use the default currency
"""
code = self.currency
if code not in CURRENCIES:
code = common.settings.currency_code_default()
return code
def __str__(self):
""" Get string representation of a Company """
return "{n} - {d}".format(n=self.name, d=self.description)
def get_absolute_url(self):
""" Get the web URL for the detail view for this Company """
return reverse('company-detail', kwargs={'pk': self.id})
def get_image_url(self):
""" Return the URL of the image for this company """
if self.image:
return getMediaUrl(self.image.url)
else:
return getBlankImage()
def get_thumbnail_url(self):
""" Return the URL for the thumbnail image for this Company """
if self.image:
return getMediaUrl(self.image.thumbnail.url)
else:
return getBlankThumbnail()
@property
def manufactured_part_count(self):
""" The number of parts manufactured by this company """
return self.manufactured_parts.count()
@property
def has_manufactured_parts(self):
return self.manufactured_part_count > 0
@property
def supplied_part_count(self):
""" The number of parts supplied by this company """
return self.supplied_parts.count()
@property
def has_supplied_parts(self):
""" Return True if this company supplies any parts """
return self.supplied_part_count > 0
@property
def parts(self):
""" Return SupplierPart objects which are supplied or manufactured by this company """
return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))
@property
def part_count(self):
""" The number of parts manufactured (or supplied) by this Company """
return self.parts.count()
@property
def has_parts(self):
return self.part_count > 0
@property
def stock_items(self):
""" Return a list of all stock items supplied or manufactured by this company """
stock = apps.get_model('stock', 'StockItem')
return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()
@property
def stock_count(self):
""" Return the number of stock items supplied or manufactured by this company """
return self.stock_items.count()
def outstanding_purchase_orders(self):
""" Return purchase orders which are 'outstanding' """
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)
def pending_purchase_orders(self):
""" Return purchase orders which are PENDING (not yet issued) """
return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)
def closed_purchase_orders(self):
""" Return purchase orders which are not 'outstanding'
- Complete
- Failed / lost
- Returned
"""
return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)
def complete_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)
def failed_purchase_orders(self):
""" Return any purchase orders which were not successful """
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)
class Contact(models.Model):
""" A Contact represents a person who works at a particular company.
A Company may have zero or more associated Contact objects.
Attributes:
company: Company link for this contact
name: Name of the contact
phone: contact phone number
email: contact email
role: position in company
"""
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=100, blank=True)
email = models.EmailField(blank=True)
role = models.CharField(max_length=100, blank=True)
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
class ManufacturerPart(models.Model):
""" Represents a unique part as provided by a Manufacturer
Each ManufacturerPart is identified by a MPN (Manufacturer Part Number)
Each ManufacturerPart is also linked to a Part object.
A Part may be available from multiple manufacturers
Attributes:
part: Link to the master Part
manufacturer: Company that manufactures the ManufacturerPart
MPN: Manufacture part number
link: Link to external website for this manufacturer part
description: Descriptive notes field
"""
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-list')
class Meta:
unique_together = ('part', 'manufacturer', 'MPN')
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='manufacturer_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
manufacturer = models.ForeignKey(
Company,
on_delete=models.CASCADE,
null=True,
related_name='manufactured_parts',
limit_choices_to={
'is_manufacturer': True
},
verbose_name=_('Manufacturer'),
help_text=_('Select manufacturer'),
)
MPN = models.CharField(
null=True,
max_length=100,
verbose_name=_('MPN'),
help_text=_('Manufacturer Part Number')
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external manufacturer part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Manufacturer part description')
)
@classmethod
def create(cls, part, manufacturer, mpn, description, link=None):
""" Check if ManufacturerPart instance does not already exist
then create it
"""
manufacturer_part = None
try:
manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)
except ManufacturerPart.DoesNotExist:
pass
if not manufacturer_part:
manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)
manufacturer_part.save()
return manufacturer_part
def __str__(self):
s = ''
if self.manufacturer:
s += f'{self.manufacturer.name}'
s += ' | '
s += f'{self.MPN}'
return s
class ManufacturerPartParameter(models.Model):
"""
A ManufacturerPartParameter represents a key:value parameter for a MnaufacturerPart.
This is used to represent parmeters / properties for a particular manufacturer part.
Each parameter is a simple string (text) value.
"""
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-parameter-list')
class Meta:
unique_together = ('manufacturer_part', 'name')
manufacturer_part = models.ForeignKey(
ManufacturerPart,
on_delete=models.CASCADE,
related_name='parameters',
verbose_name=_('Manufacturer Part'),
)
name = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Name'),
help_text=_('Parameter name')
)
value = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Value'),
help_text=_('Parameter value')
)
units = models.CharField(
max_length=64,
blank=True, null=True,
verbose_name=_('Units'),
help_text=_('Parameter units')
)
class SupplierPartManager(models.Manager):
""" Define custom SupplierPart objects manager
The main purpose of this manager is to improve database hit as the
SupplierPart model involves A LOT of foreign keys lookups
"""
def get_queryset(self):
# Always prefetch related models
return super().get_queryset().prefetch_related(
'part',
'supplier',
'manufacturer_part__manufacturer',
)
class SupplierPart(models.Model):
""" Represents a unique part as provided by a Supplier
Each SupplierPart is identified by a SKU (Supplier Part Number)
Each SupplierPart is also linked to a Part or ManufacturerPart object.
A Part may be available from multiple suppliers
Attributes:
part: Link to the master Part (Obsolete)
source_item: The sourcing item linked to this SupplierPart instance
supplier: Company that supplies this SupplierPart object
SKU: Stock keeping unit (supplier part number)
link: Link to external website for this supplier part
description: Descriptive notes field
note: Longer form note field
base_cost: Base charge added to order independent of quantity e.g. "Reeling Fee"
multiple: Multiple that the part is provided in
lead_time: Supplier lead time
packaging: packaging that the part is supplied in, e.g. "Reel"
"""
objects = SupplierPartManager()
@staticmethod
def get_api_url():
return reverse('api-supplier-part-list')
def get_absolute_url(self):
return reverse('supplier-part-detail', kwargs={'pk': self.id})
def api_instance_filters(self):
return {
'manufacturer_part': {
'part': self.part.pk
}
}
class Meta:
unique_together = ('part', 'supplier', 'SKU')
# This model was moved from the 'Part' app
db_table = 'part_supplierpart'
def clean(self):
super().clean()
# Ensure that the linked manufacturer_part points to the same part!
if self.manufacturer_part and self.part:
if not self.manufacturer_part.part == self.part:
raise ValidationError({
'manufacturer_part': _("Linked manufacturer part must reference the same base part"),
})
def save(self, *args, **kwargs):
""" Overriding save method to connect an existing ManufacturerPart """
manufacturer_part = None
if all(key in kwargs for key in ('manufacturer', 'MPN')):
manufacturer_name = kwargs.pop('manufacturer')
MPN = kwargs.pop('MPN')
# Retrieve manufacturer part
try:
manufacturer_part = ManufacturerPart.objects.get(manufacturer__name=manufacturer_name, MPN=MPN)
except (ValueError, Company.DoesNotExist):
# ManufacturerPart does not exist
pass
if manufacturer_part:
if not self.manufacturer_part:
# Connect ManufacturerPart to SupplierPart
self.manufacturer_part = manufacturer_part
else:
raise ValidationError(f'SupplierPart {self.__str__} is already linked to {self.manufacturer_part}')
self.clean()
self.validate_unique()
super().save(*args, **kwargs)
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='supplier_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
supplier = models.ForeignKey(Company, on_delete=models.CASCADE,
related_name='supplied_parts',
limit_choices_to={'is_supplier': True},
verbose_name=_('Supplier'),
help_text=_('Select supplier'),
)
SKU = models.CharField(
max_length=100,
verbose_name=_('SKU'),
help_text=_('Supplier stock keeping unit')
)
manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,
blank=True, null=True,
related_name='supplier_parts',
verbose_name=_('Manufacturer Part'),
help_text=_('Select manufacturer part'),
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external supplier part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Supplier part description')
)
note = models.CharField(
max_length=100, blank=True, null=True,
verbose_name=_('Note'),
help_text=_('Notes')
)
base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))
packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))
multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))
# TODO - Reimplement lead-time as a charfield with special validation (pattern matching).
# lead_time = models.DurationField(blank=True, null=True)
@property
def manufacturer_string(self):
""" Format a MPN string for this SupplierPart.
Concatenates manufacture name and part number.
"""
items = []
if self.manufacturer_part:
if self.manufacturer_part.manufacturer:
items.append(self.manufacturer_part.manufacturer.name)
if self.manufacturer_part.MPN:
items.append(self.manufacturer_part.MPN)
return ' | '.join(items)
@property
def has_price_breaks(self):
return self.price_breaks.count() > 0
@property
def price_breaks(self):
""" Return the associated price breaks in the correct order """
return self.pricebreaks.order_by('quantity').all()
@property
def unit_pricing(self):
return self.get_price(1)
def add_price_break(self, quantity, price):
"""
Create a new price break for this part
args:
quantity - Numerical quantity
price - Must be a Money object
"""
# Check if a price break at that quantity already exists...
if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():
return
SupplierPriceBreak.objects.create(
part=self,
quantity=quantity,
price=price
)
get_price = common.models.get_price
def open_orders(self):
""" Return a database query for PO line items for this SupplierPart,
limited to purchase orders that are open / outstanding.
"""
return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)
def on_order(self):
""" Return the total quantity of items currently on order.
Subtract partially received stock as appropriate
"""
totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))
# Quantity on order
q = totals.get('quantity__sum', 0)
# Quantity received
r = totals.get('received__sum', 0)
if q is None or r is None:
return 0
else:
return max(q - r, 0)
def purchase_orders(self):
""" Returns a list of purchase orders relating to this supplier part """
return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]
@property
def pretty_name(self):
return str(self)
def __str__(self):
s = ''
if self.part.IPN:
s += f'{self.part.IPN}'
s += ' | '
s += f'{self.supplier.name} | {self.SKU}'
if self.manufacturer_string:
s = s + ' | ' + self.manufacturer_string
return s
class SupplierPriceBreak(common.models.PriceBreak):
""" Represents a quantity price break for a SupplierPart.
- Suppliers can offer discounts at larger quantities
- SupplierPart(s) may have zero-or-more associated SupplierPriceBreak(s)
Attributes:
part: Link to a SupplierPart object that this price break applies to
updated: Automatic DateTime field that shows last time the price break was updated
quantity: Quantity required for price break
cost: Cost at specified quantity
currency: Reference to the currency of this pricebreak (leave empty for base currency)
"""
@staticmethod
def get_api_url():
return reverse('api-part-supplier-price-list')
part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)
updated = models.DateTimeField(auto_now=True, null=True, verbose_name=_('last updated'))
class Meta:
unique_together = ("part", "quantity")
# This model was moved from the 'Part' app
db_table = 'part_supplierpricebreak'
def __str__(self):
return f'{self.part.SKU} - {self.price} @ {self.quantity}'
| 32.408078 | 194 | 0.628089 |
from __future__ import unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Sum, Q, UniqueConstraint
from django.apps import apps
from django.urls import reverse
from moneyed import CURRENCIES
from markdownx.models import MarkdownxField
from stdimage.models import StdImageField
from InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail
from InvenTree.fields import InvenTreeURLField
from InvenTree.status_codes import PurchaseOrderStatus
import InvenTree.validators
import common.models
import common.settings
from common.settings import currency_code_default
def rename_company_image(instance, filename):
base = 'company_images'
if filename.count('.') > 0:
ext = filename.split('.')[-1]
else:
ext = ''
fn = 'company_{pk}_img'.format(pk=instance.pk)
if ext:
fn += '.' + ext
return os.path.join(base, fn)
class Company(models.Model):
@staticmethod
def get_api_url():
return reverse('api-company-list')
class Meta:
ordering = ['name', ]
constraints = [
UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')
]
verbose_name_plural = "Companies"
name = models.CharField(max_length=100, blank=False,
help_text=_('Company name'),
verbose_name=_('Company name'))
description = models.CharField(
max_length=500,
verbose_name=_('Company description'),
help_text=_('Description of the company'),
blank=True,
)
website = models.URLField(
blank=True,
verbose_name=_('Website'),
help_text=_('Company website URL')
)
address = models.CharField(max_length=200,
verbose_name=_('Address'),
blank=True, help_text=_('Company address'))
phone = models.CharField(max_length=50,
verbose_name=_('Phone number'),
blank=True, help_text=_('Contact phone number'))
email = models.EmailField(blank=True, null=True,
verbose_name=_('Email'), help_text=_('Contact email address'))
contact = models.CharField(max_length=100,
verbose_name=_('Contact'),
blank=True, help_text=_('Point of contact'))
link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))
image = StdImageField(
upload_to=rename_company_image,
null=True,
blank=True,
variations={'thumbnail': (128, 128)},
delete_orphans=True,
verbose_name=_('Image'),
)
notes = MarkdownxField(blank=True, verbose_name=_('Notes'))
is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))
is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))
is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))
currency = models.CharField(
max_length=3,
verbose_name=_('Currency'),
blank=True,
default=currency_code_default,
help_text=_('Default currency used for this company'),
validators=[InvenTree.validators.validate_currency_code],
)
@property
def currency_code(self):
code = self.currency
if code not in CURRENCIES:
code = common.settings.currency_code_default()
return code
def __str__(self):
return "{n} - {d}".format(n=self.name, d=self.description)
def get_absolute_url(self):
return reverse('company-detail', kwargs={'pk': self.id})
def get_image_url(self):
if self.image:
return getMediaUrl(self.image.url)
else:
return getBlankImage()
def get_thumbnail_url(self):
if self.image:
return getMediaUrl(self.image.thumbnail.url)
else:
return getBlankThumbnail()
@property
def manufactured_part_count(self):
return self.manufactured_parts.count()
@property
def has_manufactured_parts(self):
return self.manufactured_part_count > 0
@property
def supplied_part_count(self):
return self.supplied_parts.count()
@property
def has_supplied_parts(self):
return self.supplied_part_count > 0
@property
def parts(self):
return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))
@property
def part_count(self):
return self.parts.count()
@property
def has_parts(self):
return self.part_count > 0
@property
def stock_items(self):
stock = apps.get_model('stock', 'StockItem')
return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()
@property
def stock_count(self):
return self.stock_items.count()
def outstanding_purchase_orders(self):
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)
def pending_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)
def closed_purchase_orders(self):
return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)
def complete_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)
def failed_purchase_orders(self):
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)
class Contact(models.Model):
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=100, blank=True)
email = models.EmailField(blank=True)
role = models.CharField(max_length=100, blank=True)
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
class ManufacturerPart(models.Model):
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-list')
class Meta:
unique_together = ('part', 'manufacturer', 'MPN')
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='manufacturer_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
manufacturer = models.ForeignKey(
Company,
on_delete=models.CASCADE,
null=True,
related_name='manufactured_parts',
limit_choices_to={
'is_manufacturer': True
},
verbose_name=_('Manufacturer'),
help_text=_('Select manufacturer'),
)
MPN = models.CharField(
null=True,
max_length=100,
verbose_name=_('MPN'),
help_text=_('Manufacturer Part Number')
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external manufacturer part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Manufacturer part description')
)
@classmethod
def create(cls, part, manufacturer, mpn, description, link=None):
manufacturer_part = None
try:
manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)
except ManufacturerPart.DoesNotExist:
pass
if not manufacturer_part:
manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)
manufacturer_part.save()
return manufacturer_part
def __str__(self):
s = ''
if self.manufacturer:
s += f'{self.manufacturer.name}'
s += ' | '
s += f'{self.MPN}'
return s
class ManufacturerPartParameter(models.Model):
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-parameter-list')
class Meta:
unique_together = ('manufacturer_part', 'name')
manufacturer_part = models.ForeignKey(
ManufacturerPart,
on_delete=models.CASCADE,
related_name='parameters',
verbose_name=_('Manufacturer Part'),
)
name = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Name'),
help_text=_('Parameter name')
)
value = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Value'),
help_text=_('Parameter value')
)
units = models.CharField(
max_length=64,
blank=True, null=True,
verbose_name=_('Units'),
help_text=_('Parameter units')
)
class SupplierPartManager(models.Manager):
def get_queryset(self):
return super().get_queryset().prefetch_related(
'part',
'supplier',
'manufacturer_part__manufacturer',
)
class SupplierPart(models.Model):
objects = SupplierPartManager()
@staticmethod
def get_api_url():
return reverse('api-supplier-part-list')
def get_absolute_url(self):
return reverse('supplier-part-detail', kwargs={'pk': self.id})
def api_instance_filters(self):
return {
'manufacturer_part': {
'part': self.part.pk
}
}
class Meta:
unique_together = ('part', 'supplier', 'SKU')
db_table = 'part_supplierpart'
def clean(self):
super().clean()
if self.manufacturer_part and self.part:
if not self.manufacturer_part.part == self.part:
raise ValidationError({
'manufacturer_part': _("Linked manufacturer part must reference the same base part"),
})
def save(self, *args, **kwargs):
manufacturer_part = None
if all(key in kwargs for key in ('manufacturer', 'MPN')):
manufacturer_name = kwargs.pop('manufacturer')
MPN = kwargs.pop('MPN')
try:
manufacturer_part = ManufacturerPart.objects.get(manufacturer__name=manufacturer_name, MPN=MPN)
except (ValueError, Company.DoesNotExist):
pass
if manufacturer_part:
if not self.manufacturer_part:
self.manufacturer_part = manufacturer_part
else:
raise ValidationError(f'SupplierPart {self.__str__} is already linked to {self.manufacturer_part}')
self.clean()
self.validate_unique()
super().save(*args, **kwargs)
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='supplier_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
supplier = models.ForeignKey(Company, on_delete=models.CASCADE,
related_name='supplied_parts',
limit_choices_to={'is_supplier': True},
verbose_name=_('Supplier'),
help_text=_('Select supplier'),
)
SKU = models.CharField(
max_length=100,
verbose_name=_('SKU'),
help_text=_('Supplier stock keeping unit')
)
manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,
blank=True, null=True,
related_name='supplier_parts',
verbose_name=_('Manufacturer Part'),
help_text=_('Select manufacturer part'),
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external supplier part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Supplier part description')
)
note = models.CharField(
max_length=100, blank=True, null=True,
verbose_name=_('Note'),
help_text=_('Notes')
)
base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))
packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))
multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))
@property
def manufacturer_string(self):
items = []
if self.manufacturer_part:
if self.manufacturer_part.manufacturer:
items.append(self.manufacturer_part.manufacturer.name)
if self.manufacturer_part.MPN:
items.append(self.manufacturer_part.MPN)
return ' | '.join(items)
@property
def has_price_breaks(self):
return self.price_breaks.count() > 0
@property
def price_breaks(self):
return self.pricebreaks.order_by('quantity').all()
@property
def unit_pricing(self):
return self.get_price(1)
def add_price_break(self, quantity, price):
if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():
return
SupplierPriceBreak.objects.create(
part=self,
quantity=quantity,
price=price
)
get_price = common.models.get_price
def open_orders(self):
return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)
def on_order(self):
totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))
q = totals.get('quantity__sum', 0)
r = totals.get('received__sum', 0)
if q is None or r is None:
return 0
else:
return max(q - r, 0)
def purchase_orders(self):
return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]
@property
def pretty_name(self):
return str(self)
def __str__(self):
s = ''
if self.part.IPN:
s += f'{self.part.IPN}'
s += ' | '
s += f'{self.supplier.name} | {self.SKU}'
if self.manufacturer_string:
s = s + ' | ' + self.manufacturer_string
return s
class SupplierPriceBreak(common.models.PriceBreak):
@staticmethod
def get_api_url():
return reverse('api-part-supplier-price-list')
part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)
updated = models.DateTimeField(auto_now=True, null=True, verbose_name=_('last updated'))
class Meta:
unique_together = ("part", "quantity")
db_table = 'part_supplierpricebreak'
def __str__(self):
return f'{self.part.SKU} - {self.price} @ {self.quantity}'
| true | true |
f7266b0a144bf41133781daf4df6f25ffb28d3a9 | 463 | py | Python | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/jedi/shared_frn_all_table_dark_01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_all_jedi_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.235294 | 84 | 0.736501 | true | true | |
f7266b31a38ebc7a5721d50d8c4e8bcd245cd5d2 | 292 | py | Python | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from pdf_crawler_test.urls import urlpatterns as pdf_crawler_test_urls
urlpatterns = [
url(r'^', include(pdf_crawler_test_urls, namespace='pdf_crawler_test')),
]
| 26.545455 | 76 | 0.777397 |
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from pdf_crawler_test.urls import urlpatterns as pdf_crawler_test_urls
urlpatterns = [
url(r'^', include(pdf_crawler_test_urls, namespace='pdf_crawler_test')),
]
| true | true |
f7266dca016862413492651e6ea69b794172bb4d | 1,926 | py | Python | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | import time
import json
import requests
import urllib3
from random import randint
from bs4 import BeautifulSoup
from threading import Thread
urllib3.disable_warnings()
BASE_URL = "https://jobs.ksl.com/search/posted/last-7-days"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
JOBS = {}
def getJobDescriptions(url, headers):
data = requests.get(url=url, headers=headers, verify=False, timeout=20)
data.close()
soup = BeautifulSoup(data.text, "html.parser")
descriptionTag = soup.find_all(
"meta", {"property": "og:description"}, "html.parser"
)
description = descriptionTag[0]["content"]
JOBS[url]["description"] = description
def writeToFile():
global JOBS
with open("sample.json", "w") as outfile:
json.dump(JOBS, outfile)
def getJobListings(url, headers):
dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)
soup = BeautifulSoup(dataX.text, "html.parser")
dataX.close()
script = soup.find_all('script', {'type': 'application/ld+json'})
content = script[0].contents[0]
jobsArray = json.loads(content)["itemListElement"]
threads = []
for job in jobsArray:
JOBS[job["url"]] = {
"name": job["title"],
"employer": job["hiringOrganization"]["name"],
"url": job["url"],
}
t = Thread(target=getJobDescriptions, args=(job["url"], headers))
threads.append(t)
for i in threads:
i.start()
# Making sure all the jobs description is fetched
for i in threads:
i.join()
print(f"Number of jobs Scraped {len(JOBS)}")
writeToFile()
next_page = soup.find("a", {"class": "next link"})
if next_page is not None:
getJobListings(next_page.get("href"), HEADERS)
getJobListings(BASE_URL, HEADERS)
| 26.027027 | 141 | 0.648494 | import time
import json
import requests
import urllib3
from random import randint
from bs4 import BeautifulSoup
from threading import Thread
urllib3.disable_warnings()
BASE_URL = "https://jobs.ksl.com/search/posted/last-7-days"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
JOBS = {}
def getJobDescriptions(url, headers):
data = requests.get(url=url, headers=headers, verify=False, timeout=20)
data.close()
soup = BeautifulSoup(data.text, "html.parser")
descriptionTag = soup.find_all(
"meta", {"property": "og:description"}, "html.parser"
)
description = descriptionTag[0]["content"]
JOBS[url]["description"] = description
def writeToFile():
global JOBS
with open("sample.json", "w") as outfile:
json.dump(JOBS, outfile)
def getJobListings(url, headers):
dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)
soup = BeautifulSoup(dataX.text, "html.parser")
dataX.close()
script = soup.find_all('script', {'type': 'application/ld+json'})
content = script[0].contents[0]
jobsArray = json.loads(content)["itemListElement"]
threads = []
for job in jobsArray:
JOBS[job["url"]] = {
"name": job["title"],
"employer": job["hiringOrganization"]["name"],
"url": job["url"],
}
t = Thread(target=getJobDescriptions, args=(job["url"], headers))
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
print(f"Number of jobs Scraped {len(JOBS)}")
writeToFile()
next_page = soup.find("a", {"class": "next link"})
if next_page is not None:
getJobListings(next_page.get("href"), HEADERS)
getJobListings(BASE_URL, HEADERS)
| true | true |
f7266f67638e9576f4b43d525e963cfc0fa2a7b5 | 536 | py | Python | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._device_update import DeviceUpdate
__all__ = ['DeviceUpdate']
| 48.727273 | 94 | 0.561567 |
from ._device_update import DeviceUpdate
__all__ = ['DeviceUpdate']
| true | true |
f726701582b7fe85721853e639a19adb9c7fe1fe | 7,883 | py | Python | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.082126 | 129 | 0.614994 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| true | true |
f7267055d2c097c704fe63ba2a7aa2a991fa61d0 | 6,510 | py | Python | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import errors
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import ServerCertificateStubber
class TestCreateServerCertificate(StubberTestCase):
def test_create_server_certificate(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=chain_file.read(),
),
"apply",
)
)
)
# first list is to find things to delete
server_certificate.add_list_server_certificate_empty_response()
# second is to find if there is an existing matching cert
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_upload_server_certificate()
# CreateAction needs to look up cert again as create response has no info
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
# refresh resource metadata
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
# sanity check / PostCreation
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
goal.execute()
def test_create_server_certificate_idempotent(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
def test_create_server_certificate_wrong_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=chain_file.read(), # to trigger error
private_key=key_file.read(),
certificate_chain=cert_file.read(), # to trigger error
)
self.assertIn("Certificate does not match private_key", str(cm.exception))
def test_create_server_certificate_bad_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.bad_chain_file) as bad_chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(), # to trigger error
private_key=key_file.read(),
certificate_chain=bad_chain_file.read(), # to trigger error
)
self.assertIn("Invalid chain for", str(cm.exception))
class TestDestroyServerCertificate(StubberTestCase):
def test_destroy_server_certificate(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_delete_server_certificate()
goal.execute()
def test_destroy_server_certificate_idempotent(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
| 43.986486 | 87 | 0.650691 |
from touchdown.core import errors
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import ServerCertificateStubber
class TestCreateServerCertificate(StubberTestCase):
def test_create_server_certificate(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=chain_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_upload_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
goal.execute()
def test_create_server_certificate_idempotent(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
def test_create_server_certificate_wrong_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=chain_file.read(),
private_key=key_file.read(),
certificate_chain=cert_file.read(),
)
self.assertIn("Certificate does not match private_key", str(cm.exception))
def test_create_server_certificate_bad_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.bad_chain_file) as bad_chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=bad_chain_file.read(),
)
self.assertIn("Invalid chain for", str(cm.exception))
class TestDestroyServerCertificate(StubberTestCase):
def test_destroy_server_certificate(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_delete_server_certificate()
goal.execute()
def test_destroy_server_certificate_idempotent(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
| true | true |
f7267151612dbfc56f3defd079b3c1b033ab0459 | 2,573 | py | Python | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
NOT_COMPLETE = 'NC'
COMPLETE = 'C'
TASK_STATUS_CHOICES = [
(NOT_COMPLETE, 'Not Complete'),
(COMPLETE, 'Complete'),
]
@deconstructible
class GenerateAttachmentFilePath(object):
def __init__(self):
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'task/{instance.task.id}/attachments/'
name = f'{instance.id}.{ext}'
return os.path.join(path, name)
attachment_file_path = GenerateAttachmentFilePath()
class TaskList(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
house = models.ForeignKey('house.House', on_delete=models.CASCADE, related_name='lists')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='lists')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Task(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
task_list = models.ForeignKey('task.TaskList', default=1, on_delete=models.CASCADE, related_name='tasks')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='created_tasks')
completed_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='completed_tasks')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Attachment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
task = models.ForeignKey('task.Task', on_delete=models.CASCADE, related_name='attachments')
file_path = models.FileField(upload_to=attachment_file_path)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.task}'
| 38.402985 | 134 | 0.733385 | import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
NOT_COMPLETE = 'NC'
COMPLETE = 'C'
TASK_STATUS_CHOICES = [
(NOT_COMPLETE, 'Not Complete'),
(COMPLETE, 'Complete'),
]
@deconstructible
class GenerateAttachmentFilePath(object):
def __init__(self):
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'task/{instance.task.id}/attachments/'
name = f'{instance.id}.{ext}'
return os.path.join(path, name)
attachment_file_path = GenerateAttachmentFilePath()
class TaskList(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
house = models.ForeignKey('house.House', on_delete=models.CASCADE, related_name='lists')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='lists')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Task(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
task_list = models.ForeignKey('task.TaskList', default=1, on_delete=models.CASCADE, related_name='tasks')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='created_tasks')
completed_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='completed_tasks')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Attachment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
task = models.ForeignKey('task.Task', on_delete=models.CASCADE, related_name='attachments')
file_path = models.FileField(upload_to=attachment_file_path)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.task}'
| true | true |
f72671733725c0c68f0d949630bf8d7a44e4778a | 14,483 | py | Python | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 13 | 2019-09-22T06:34:18.000Z | 2021-07-17T06:19:39.000Z | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 3 | 2020-02-10T03:23:08.000Z | 2020-06-06T10:02:36.000Z | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 22 | 2015-08-23T00:44:46.000Z | 2020-03-16T08:33:13.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import cryptor, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self.tunnel_remote = config.get('tunnel_remote', "8.8.8.8")
self.tunnel_remote_port = config.get('tunnel_remote_port', 53)
self.tunnel_port = config.get('tunnel_port', 53)
self._is_tunnel = False
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
self._forbidden_iplist = config.get('forbidden_ip')
self._crypto_path = config['crypto_path']
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
if self._is_tunnel:
# add ss header to data
tunnel_remote = self.tunnel_remote
tunnel_remote_port = self.tunnel_remote_port
data = common.add_header(tunnel_remote,
tunnel_remote_port, data)
else:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
# decrypt data
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method,
data, self._crypto_path)
except Exception:
logging.debug('UDP handle_server: decrypt data failed')
return
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
logging.info("udp data to %s:%d from %s:%d"
% (dest_addr, dest_port, r_addr[0], r_addr[1]))
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
self._ota_enable_session = addrtype & ADDRTYPE_AUTH
if self._ota_enable and not self._ota_enable_session:
logging.warn('client one time auth is required')
return
if self._ota_enable_session:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = cryptor.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable_session:
data = self._ota_chunk_data_gen(key, iv, data)
try:
data = cryptor.encrypt_all_m(key, iv, m, self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_server: encrypt data failed")
return
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
try:
response = cryptor.encrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_client: encrypt data failed")
return
if not response:
return
else:
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug('UDP handle_client: decrypt data failed')
return
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_tunnel:
# remove ss header
response = data[header_length:]
else:
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
logging.debug("send udp response to %s:%d"
% (client_addr[0], client_addr[1]))
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| 39.571038 | 79 | 0.538908 |
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import cryptor, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self.tunnel_remote = config.get('tunnel_remote', "8.8.8.8")
self.tunnel_remote_port = config.get('tunnel_remote_port', 53)
self.tunnel_port = config.get('tunnel_port', 53)
self._is_tunnel = False
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
self._forbidden_iplist = config.get('forbidden_ip')
self._crypto_path = config['crypto_path']
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
if self._is_tunnel:
# add ss header to data
tunnel_remote = self.tunnel_remote
tunnel_remote_port = self.tunnel_remote_port
data = common.add_header(tunnel_remote,
tunnel_remote_port, data)
else:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
# decrypt data
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method,
data, self._crypto_path)
except Exception:
logging.debug('UDP handle_server: decrypt data failed')
return
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
logging.info("udp data to %s:%d from %s:%d"
% (dest_addr, dest_port, r_addr[0], r_addr[1]))
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
self._ota_enable_session = addrtype & ADDRTYPE_AUTH
if self._ota_enable and not self._ota_enable_session:
logging.warn('client one time auth is required')
return
if self._ota_enable_session:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = cryptor.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable_session:
data = self._ota_chunk_data_gen(key, iv, data)
try:
data = cryptor.encrypt_all_m(key, iv, m, self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_server: encrypt data failed")
return
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
try:
response = cryptor.encrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_client: encrypt data failed")
return
if not response:
return
else:
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug('UDP handle_client: decrypt data failed')
return
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_tunnel:
# remove ss header
response = data[header_length:]
else:
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
logging.debug("send udp response to %s:%d"
% (client_addr[0], client_addr[1]))
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| true | true |
f72671937db542e223720da043ede8cedbc9e9b9 | 15,617 | py | Python | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event is more tha max seconds ago, cap it
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
# if this is the ending clip, add an outpoint
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
if media_type == 'clips':
retain_config = self.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
# only expire events every 5 minutes
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
| 37.90534 | 129 | 0.539284 | import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
if clip["start_time"] + clip["duration"] < playlist_start:
continue
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
elf.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
for l in distinct_labels:
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
):
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
for l in distinct_labels:
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
| true | true |
f726743c2af55562de0a399b95ee66d6a3d5ea4c | 900 | py | Python | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author: Alan
@time: 2021/05/18
"""
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import traceback
class MultiThread(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
def thread_log(self, worker):
"""捕获线程异常,并保存日志"""
try:
result = worker.result()
return result
except:
traceback.print_exc()
def execute(self, fn, *args, **kwargs):
"""生成新线程,并捕捉异常"""
thread = self.submit(fn, *args, **kwargs)
thread.add_done_callback(self.thread_log)
return thread
@staticmethod
def execute_after_done(fn, workers, *args, **kwargs):
wait(workers, timeout=86400, return_when=ALL_COMPLETED)
return fn(*args, **kwargs)
| 26.470588 | 70 | 0.628889 |
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import traceback
class MultiThread(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
def thread_log(self, worker):
try:
result = worker.result()
return result
except:
traceback.print_exc()
def execute(self, fn, *args, **kwargs):
thread = self.submit(fn, *args, **kwargs)
thread.add_done_callback(self.thread_log)
return thread
@staticmethod
def execute_after_done(fn, workers, *args, **kwargs):
wait(workers, timeout=86400, return_when=ALL_COMPLETED)
return fn(*args, **kwargs)
| true | true |
f7267621fdc306ffafe3ecda31e41278dccdb8f2 | 2,478 | py | Python | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of DataCite.
#
# Copyright (C) 2015, 2016 CERN.
#
# DataCite is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
"""Tests for /metadata GET."""
from __future__ import absolute_import, print_function
import pytest
import responses
from helpers import APIURL, get_client
from datacite.errors import DataCiteForbiddenError, DataCiteGoneError, \
DataCiteNotFoundError, DataCiteServerError, DataCiteUnauthorizedError
@responses.activate
def test_metadata_get_200():
"""Test."""
doc = "<resource></resource>"
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body=doc,
status=200,
content_type="application/xml",
)
d = get_client()
assert doc == d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_401():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Unauthorized",
status=401,
)
d = get_client()
with pytest.raises(DataCiteUnauthorizedError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_403():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Forbidden",
status=403,
)
d = get_client()
with pytest.raises(DataCiteForbiddenError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_404():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Not Found",
status=404,
)
d = get_client()
with pytest.raises(DataCiteNotFoundError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_410():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Gone",
status=410,
)
d = get_client()
with pytest.raises(DataCiteGoneError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_500():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Internal Server Error",
status=500,
)
d = get_client()
with pytest.raises(DataCiteServerError):
d.metadata_get("10.1234/1")
| 22.125 | 73 | 0.633172 |
from __future__ import absolute_import, print_function
import pytest
import responses
from helpers import APIURL, get_client
from datacite.errors import DataCiteForbiddenError, DataCiteGoneError, \
DataCiteNotFoundError, DataCiteServerError, DataCiteUnauthorizedError
@responses.activate
def test_metadata_get_200():
doc = "<resource></resource>"
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body=doc,
status=200,
content_type="application/xml",
)
d = get_client()
assert doc == d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_401():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Unauthorized",
status=401,
)
d = get_client()
with pytest.raises(DataCiteUnauthorizedError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_403():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Forbidden",
status=403,
)
d = get_client()
with pytest.raises(DataCiteForbiddenError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_404():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Not Found",
status=404,
)
d = get_client()
with pytest.raises(DataCiteNotFoundError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_410():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Gone",
status=410,
)
d = get_client()
with pytest.raises(DataCiteGoneError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_500():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Internal Server Error",
status=500,
)
d = get_client()
with pytest.raises(DataCiteServerError):
d.metadata_get("10.1234/1")
| true | true |
f72676b4fc76edae6f6177c02163528b28ab531e | 7,484 | py | Python | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 1 | 2020-09-23T21:40:16.000Z | 2020-09-23T21:40:16.000Z | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | null | null | null | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 3 | 2020-07-11T19:18:12.000Z | 2021-08-14T17:43:06.000Z | import json
import os
import boto3
import yaml
from lib.dynamodb import accounts_table, requirements_table, user_table, config_table
from lib.lambda_decorator.decorator import states_decorator
client_s3 = boto3.client('s3')
user_bucket = os.getenv('USER_BUCKET')
account_bucket = os.getenv('ACCOUNT_BUCKET')
requirements_bucket = os.getenv('REQUIREMENTS_BUCKET')
@states_decorator
def load_handler(event, context):
"""
Imports users, accounts, and requirements files.
Returns assorted information regarding the scan
including account ids, accounts to scan with
cloudsploit, payer account ids, cloudsploit settings,
user emails, s3 import requirements, etc
Expected input event format
{}
"""
accounts = load_accounts()
load_user()
requirements = load_requirements()
return {
'accountIds': list({a['accountId'] for a in accounts}),
'payerIds': list({a.get('payer_id') for a in accounts if a.get('payer_id')}),
's3RequirementIds': list({r_id for r_id, r in requirements['requirements'].items() if r.get('source') == 's3Import'}),
'cloudsploitSettingsMap': requirements['cloudsploitSettingsMap']
}
def load_accounts():
"""Syncs accounts in accounts table with those present in the S3 bucket"""
account_ids_to_delete = []
accounts_to_add = []
s3_response = client_s3.get_object(Bucket=account_bucket, Key=os.getenv('ACCOUNT_FILE_PATH'))
account_list_from_s3 = json.loads(s3_response['Body'].read())['accounts']
for account in account_list_from_s3:
accounts_table.normalize_account_record(account)
accounts_from_s3 = {account['accountId']: account for account in account_list_from_s3}
ddb_data = accounts_table.scan_all()
accounts_from_ddb = {account['accountId']: account for account in ddb_data}
for account_id in accounts_from_ddb:
if account_id in accounts_from_s3:
if accounts_from_ddb[account_id] != accounts_from_s3[account_id]:
accounts_to_add.append(accounts_from_s3[account_id])
else:
account_ids_to_delete.append(account_id)
for account_id in accounts_from_s3:
if account_id not in accounts_from_ddb:
accounts_to_add.append(accounts_from_s3[account_id])
with accounts_table.batch_writer() as batch:
for account_id in account_ids_to_delete:
batch.delete_item(Key={'accountId': account_id})
for account in accounts_to_add:
batch.put_item(Item=account)
return account_list_from_s3
def load_user():
"""Syncs users in user's table with those present in S3 bucket,
ensures admin permissions are retained"""
user_emails_to_delete = []
users_to_add = []
s3_response = client_s3.get_object(Bucket=user_bucket, Key=os.getenv('USER_FILE_PATH'))
user_list_from_s3 = json.loads(s3_response['Body'].read())
users_from_s3 = {}
for user in user_list_from_s3:
user['email'] = user['email'].lower()
users_from_s3[user['email']] = user
ddb_data = user_table.scan_all()
users_from_ddb = {user['email']: user for user in ddb_data}
for user_email, existing_user in users_from_ddb.items():
if user_email in users_from_s3:
if existing_user != users_from_s3[user_email]:
if existing_user.get('isAdmin', False):
# update incoming user
users_to_add.append(dict(
users_from_s3[user_email],
**{
'isAdmin': existing_user.get('isAdmin'),
}))
else:
users_to_add.append(users_from_s3[user_email])
else:
if existing_user.get('isAdmin', False):
users_to_add.append({
'email': existing_user.get('email'),
'isAdmin': existing_user.get('isAdmin'),
})
else:
user_emails_to_delete.append(user_email)
for user_email in users_from_s3:
if user_email not in users_from_ddb:
users_to_add.append(users_from_s3[user_email])
with user_table.batch_writer() as batch:
for user_email in user_emails_to_delete:
batch.delete_item(Key={'email': user_email})
for user in users_to_add:
batch.put_item(Item=user)
return user_list_from_s3
def load_requirements():
"""Loads requirements yaml from s3 and updates
requirements in requirements table along with
various other configs"""
s3_response = client_s3.get_object(Bucket=requirements_bucket, Key=os.getenv('REQUIREMENTS_FILE_PATH'))
requirements_file = yaml.safe_load(s3_response['Body'].read())
cloudsploit_settings_map = requirements_file['cloudsploitSettings']
severity_weight_map = requirements_file['severityWeightings']
exclusion_types = requirements_file['exclusionTypes']
version = requirements_file['version']
severity_colors = requirements_file['severityColors']
remediations = requirements_file['remediations']
requirements = requirements_file['database']
# denormalize weights and add requirement id inside object for dynamodb storage
for requirement_id, requirement in requirements.items():
requirement['requirementId'] = requirement_id
requirement['weight'] = severity_weight_map[requirement['severity']]
update_requirements(requirements)
update_exclusion_types(exclusion_types)
update_version(version)
update_severity_colors(severity_colors)
update_severity_weights(severity_weight_map)
update_remediations(remediations)
return {
'requirements': requirements,
'cloudsploitSettingsMap': cloudsploit_settings_map,
}
def update_requirements(requirements):
"""Syncs requirements in requirements table
with the parameters that are passed"""
requirement_ids_to_delete = []
reqs_to_add = []
# load requirements saved in dynamodb
ddb_data = requirements_table.scan_all()
requirements_from_ddb = {requirement['requirementId']: requirement for requirement in ddb_data}
for requirement_id in requirements_from_ddb:
if requirement_id in requirements:
if requirements_from_ddb[requirement_id] != requirements[requirement_id]:
reqs_to_add.append(requirements[requirement_id])
else:
requirement_ids_to_delete.append(requirement_id)
for requirement_id in requirements:
if requirement_id not in requirements_from_ddb:
reqs_to_add.append(requirements[requirement_id])
with requirements_table.batch_writer() as batch:
for requirement_id in requirement_ids_to_delete:
batch.delete_item(Key={'requirementId': requirement_id})
for requirement in reqs_to_add:
batch.put_item(Item=requirement)
def update_version(version):
config_table.set_config(config_table.VERSION, version)
def update_exclusion_types(exclusions):
config_table.set_config(config_table.EXCLUSIONS, exclusions)
def update_severity_colors(severity_colors):
config_table.set_config(config_table.SEVERITYCOLORS, severity_colors)
def update_severity_weights(severity_weight_map):
config_table.set_config(config_table.SEVERITYWEIGHTS, severity_weight_map)
def update_remediations(remediations):
config_table.set_config(config_table.REMEDIATIONS, remediations)
| 36.686275 | 126 | 0.704436 | import json
import os
import boto3
import yaml
from lib.dynamodb import accounts_table, requirements_table, user_table, config_table
from lib.lambda_decorator.decorator import states_decorator
client_s3 = boto3.client('s3')
user_bucket = os.getenv('USER_BUCKET')
account_bucket = os.getenv('ACCOUNT_BUCKET')
requirements_bucket = os.getenv('REQUIREMENTS_BUCKET')
@states_decorator
def load_handler(event, context):
accounts = load_accounts()
load_user()
requirements = load_requirements()
return {
'accountIds': list({a['accountId'] for a in accounts}),
'payerIds': list({a.get('payer_id') for a in accounts if a.get('payer_id')}),
's3RequirementIds': list({r_id for r_id, r in requirements['requirements'].items() if r.get('source') == 's3Import'}),
'cloudsploitSettingsMap': requirements['cloudsploitSettingsMap']
}
def load_accounts():
account_ids_to_delete = []
accounts_to_add = []
s3_response = client_s3.get_object(Bucket=account_bucket, Key=os.getenv('ACCOUNT_FILE_PATH'))
account_list_from_s3 = json.loads(s3_response['Body'].read())['accounts']
for account in account_list_from_s3:
accounts_table.normalize_account_record(account)
accounts_from_s3 = {account['accountId']: account for account in account_list_from_s3}
ddb_data = accounts_table.scan_all()
accounts_from_ddb = {account['accountId']: account for account in ddb_data}
for account_id in accounts_from_ddb:
if account_id in accounts_from_s3:
if accounts_from_ddb[account_id] != accounts_from_s3[account_id]:
accounts_to_add.append(accounts_from_s3[account_id])
else:
account_ids_to_delete.append(account_id)
for account_id in accounts_from_s3:
if account_id not in accounts_from_ddb:
accounts_to_add.append(accounts_from_s3[account_id])
with accounts_table.batch_writer() as batch:
for account_id in account_ids_to_delete:
batch.delete_item(Key={'accountId': account_id})
for account in accounts_to_add:
batch.put_item(Item=account)
return account_list_from_s3
def load_user():
user_emails_to_delete = []
users_to_add = []
s3_response = client_s3.get_object(Bucket=user_bucket, Key=os.getenv('USER_FILE_PATH'))
user_list_from_s3 = json.loads(s3_response['Body'].read())
users_from_s3 = {}
for user in user_list_from_s3:
user['email'] = user['email'].lower()
users_from_s3[user['email']] = user
ddb_data = user_table.scan_all()
users_from_ddb = {user['email']: user for user in ddb_data}
for user_email, existing_user in users_from_ddb.items():
if user_email in users_from_s3:
if existing_user != users_from_s3[user_email]:
if existing_user.get('isAdmin', False):
users_to_add.append(dict(
users_from_s3[user_email],
**{
'isAdmin': existing_user.get('isAdmin'),
}))
else:
users_to_add.append(users_from_s3[user_email])
else:
if existing_user.get('isAdmin', False):
users_to_add.append({
'email': existing_user.get('email'),
'isAdmin': existing_user.get('isAdmin'),
})
else:
user_emails_to_delete.append(user_email)
for user_email in users_from_s3:
if user_email not in users_from_ddb:
users_to_add.append(users_from_s3[user_email])
with user_table.batch_writer() as batch:
for user_email in user_emails_to_delete:
batch.delete_item(Key={'email': user_email})
for user in users_to_add:
batch.put_item(Item=user)
return user_list_from_s3
def load_requirements():
s3_response = client_s3.get_object(Bucket=requirements_bucket, Key=os.getenv('REQUIREMENTS_FILE_PATH'))
requirements_file = yaml.safe_load(s3_response['Body'].read())
cloudsploit_settings_map = requirements_file['cloudsploitSettings']
severity_weight_map = requirements_file['severityWeightings']
exclusion_types = requirements_file['exclusionTypes']
version = requirements_file['version']
severity_colors = requirements_file['severityColors']
remediations = requirements_file['remediations']
requirements = requirements_file['database']
for requirement_id, requirement in requirements.items():
requirement['requirementId'] = requirement_id
requirement['weight'] = severity_weight_map[requirement['severity']]
update_requirements(requirements)
update_exclusion_types(exclusion_types)
update_version(version)
update_severity_colors(severity_colors)
update_severity_weights(severity_weight_map)
update_remediations(remediations)
return {
'requirements': requirements,
'cloudsploitSettingsMap': cloudsploit_settings_map,
}
def update_requirements(requirements):
requirement_ids_to_delete = []
reqs_to_add = []
ddb_data = requirements_table.scan_all()
requirements_from_ddb = {requirement['requirementId']: requirement for requirement in ddb_data}
for requirement_id in requirements_from_ddb:
if requirement_id in requirements:
if requirements_from_ddb[requirement_id] != requirements[requirement_id]:
reqs_to_add.append(requirements[requirement_id])
else:
requirement_ids_to_delete.append(requirement_id)
for requirement_id in requirements:
if requirement_id not in requirements_from_ddb:
reqs_to_add.append(requirements[requirement_id])
with requirements_table.batch_writer() as batch:
for requirement_id in requirement_ids_to_delete:
batch.delete_item(Key={'requirementId': requirement_id})
for requirement in reqs_to_add:
batch.put_item(Item=requirement)
def update_version(version):
config_table.set_config(config_table.VERSION, version)
def update_exclusion_types(exclusions):
config_table.set_config(config_table.EXCLUSIONS, exclusions)
def update_severity_colors(severity_colors):
config_table.set_config(config_table.SEVERITYCOLORS, severity_colors)
def update_severity_weights(severity_weight_map):
config_table.set_config(config_table.SEVERITYWEIGHTS, severity_weight_map)
def update_remediations(remediations):
config_table.set_config(config_table.REMEDIATIONS, remediations)
| true | true |
f72676d4da3438923201ee65422d31a01243108f | 25,485 | py | Python | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
"""Like itertools.cycle, but will call iter on the original iterable instead.
This limits it to not be able to run on say raw generators, but also doesn't
store a copy of the iterable in memory for repetition."""
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
# index == sample_size - 1 represents the last backward pass
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients in FP16 parameters (e.g delay unscale)
and only unscale to FP32 parameters after the last backward pass.
"""
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
"""
Base Trainer class that provide ways to
1 Train model, compute metrics against eval set and use the metrics for
model selection.
2 Test trained model, compute and publish metrics against a blind test set.
Attributes:
epochs (int): Training epochs
early_stop_after (int): Stop after how many epochs when the eval metric
is not improving
max_clip_norm (Optional[float]): Clip gradient norm if set
report_train_metrics (bool): Whether metrics on training data should be
computed and reported.
target_time_limit_seconds (float): Target time limit for training in seconds. If
the expected time to train another epoch exceeds this limit, stop training.
"""
class Config(ConfigBase):
#: Training epochs
epochs: int = 10
#: Stop after how many epochs when the eval metric is not improving
early_stop_after: int = 0
#: Clip gradient norm if set
max_clip_norm: Optional[float] = None
#: Whether metrics on training data should be computed and reported.
report_train_metrics: bool = True
#: Target time limit for training, default (None) to no time limit.
target_time_limit_seconds: Optional[int] = None
#: Whether to do evaluation and model selection based on it.
do_eval: bool = True
#: Number of samples for logging training progress.
num_samples_to_log_progress: int = 1000
#: Number of forward & backward per batch before update gradients, the
#: actual_batch_size = batch_size x num_accumulated_batches
num_accumulated_batches: int = 1
#: Define epoch as a fixed number of batches. Subsequent epochs will continue
#: to iterate through the data, cycling through it when they reach the end.
#: If not set, use exactly one pass through the dataset as one epoch.
#: This configuration only affects the train epochs, test and eval
#: will always test their entire datasets.
num_batches_per_epoch: Optional[int] = None
#: config for optimizer, used in parameter update
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
# next to add new config and implementation of frequency on checkpointing
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
# Move current model to CPU to avoid multiple models in GPU memory
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
# Move model back to GPU
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model, the model states will be modified.
Args:
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
training_result (Optional): only meaningful for Hogwild training. default
is None
rank (int): only used in distributed training, the rank of the current
training thread, evaluation will only be done in rank 0
Returns:
model, best_metric: the trained model together with the best metric
"""
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model from a given training state will be modified.
This function iterates epochs specified in config, and for each epoch do:
1. Train model using training data, aggregate and report training results
2. Adjust learning rate if scheduler is specified
3. Evaluate model using evaluation data
4. Calculate metrics based on evaluation results and select best model
Args:
training_state (TrainingState): contrains stateful information to be
able to restore a training job
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
Returns:
model, best_metric: the trained model together with the best metric
"""
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {', '.join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
# We want to limit the number of batches in the epoch;
# equivalent to epoch_data[:num_batches_per_epoch] for iterators.
# In this case we set the training data iterator to cycle earlier
# in the training process, so when it reaches the end it will
# loop back to the beginning.
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
# Step the learning rate scheduler(s)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
# Did we train a better model?
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
# Only bother loading the best model for master worker
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
# This method is due for some refactoring, pushing it off because it interacts
# with the metric reporter too much. Much of the logic here either changes in
# the NewTaskTrainer or should change with a better metric reporter design.
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
"""
Sometimes, a batch of inputs is too large to fit into GPU, which has to
be split into several micro-batches. However, to improve efficiency,
it would be helpful to only apply params/gradients sync at original batch
boundaries instead of micro-batch boundaries.
num_accumulated_batches specified the number of accumulating gradients
locally before sync gradients, total training_batch_size =
train_batch_size x num_accumulated_batches and it will improve the system
performance by reduce the total network transfer bytes.
"""
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
# pass context to model to use in forward call if needed
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
# update gradients after len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
"""Make mypy happy"""
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
"""Our run_step is a bit different, because we're wrapping the model forward
call with model.train_batch, which arranges tensors and gets loss, etc.
Whenever "samples" contains more than one mini-batch (sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
# enter ddp no_sync context and fp16 delay_scale context if needed
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
# TODO merge this step into add_batch_stats once all data
# migration is done
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
# update gradients after #len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
"""Batch based schedulers require knowing the number of batches in
the data. We're not supporting that yet with the Data api, need to figure out
how to expose this info or restructure batch-based schedulers to not need it."""
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
| 41.371753 | 95 | 0.626368 |
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
class Config(ConfigBase):
epochs: int = 10
early_stop_after: int = 0
max_clip_norm: Optional[float] = None
report_train_metrics: bool = True
target_time_limit_seconds: Optional[int] = None
do_eval: bool = True
num_samples_to_log_progress: int = 1000
num_accumulated_batches: int = 1
num_batches_per_epoch: Optional[int] = None
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {', '.join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
te)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
| true | true |
f7267815389b318d6a8e4cfe05c3808d59d955a5 | 6,214 | py | Python | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | null | null | null | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | null | null | null | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | 1 | 2020-04-01T03:31:00.000Z | 2020-04-01T03:31:00.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_adlm')
class MaskedAdLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, args, task):
super(MaskedAdLmLoss, self).__init__(args, task)
self.vocab = self.task.source_dictionary
print(len(self.vocab.count))
self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))
self.margins.requires_grad = False
self.margin_lambda = args.margin_lambda
self.margin_lr = args.margin_lr
self.margin_norm = args.margin_norm
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
super(MaskedAdLmLoss,
MaskedAdLmLoss).add_args(parser)
parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin-norm', default='l1', type=str,
help='Type of margin norm in the loss')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# compute MLM loss
#self.margins.requires_grad = model.training
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
#import IPython
#IPython.embed()
if sample_size != 0:
targets = targets[masked_tokens]
# targets shape: [x]
# logits.shape: [x, 32769]
one_hot = F.one_hot(targets, len(self.vocab.count)) # [x, 32769]
#import IPython
#IPython.embed()
m = F.embedding(targets, self.margins) # [x, 1]
#m = self.margins(targets).squeeze(dim=-1)
margin = m * one_hot # [x, 32769]
#import IPython
#IPython.embed()
logits_minus_margin = logits - margin
log_softmax = F.log_softmax(
logits_minus_margin.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
) # [x, 32769]
adm_loss = F.nll_loss(
log_softmax,
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
# cal margin grad
with torch.no_grad():
margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,
index=targets.unsqueeze(-1)) # [x, 1]
margin_grad_cross = torch.exp(margin_log_grad) - \
torch.ones_like(margin_log_grad)
if self.margin_norm == 'l1':
margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda
else:
# l2 norm
margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0
margin_update = -1.0 * margin_grad * self.margin_lr
self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())
# for logging below! margin_norm; normal loss
margin_norm = torch.mean(self.margins) * sample['nsentences']# used for log!
normal_loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,
'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,
}
return adm_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)
margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)
metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 37.660606 | 100 | 0.60251 |
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_adlm')
class MaskedAdLmLoss(FairseqCriterion):
def __init__(self, args, task):
super(MaskedAdLmLoss, self).__init__(args, task)
self.vocab = self.task.source_dictionary
print(len(self.vocab.count))
self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))
self.margins.requires_grad = False
self.margin_lambda = args.margin_lambda
self.margin_lr = args.margin_lr
self.margin_norm = args.margin_norm
@staticmethod
def add_args(parser):
super(MaskedAdLmLoss,
MaskedAdLmLoss).add_args(parser)
parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin-norm', default='l1', type=str,
help='Type of margin norm in the loss')
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if sample_size == 0:
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
one_hot = F.one_hot(targets, len(self.vocab.count))
m = F.embedding(targets, self.margins)
margin = m * one_hot
logits_minus_margin = logits - margin
log_softmax = F.log_softmax(
logits_minus_margin.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
adm_loss = F.nll_loss(
log_softmax,
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
with torch.no_grad():
margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,
index=targets.unsqueeze(-1))
margin_grad_cross = torch.exp(margin_log_grad) - \
torch.ones_like(margin_log_grad)
if self.margin_norm == 'l1':
margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda
else:
margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0
margin_update = -1.0 * margin_grad * self.margin_lr
self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())
margin_norm = torch.mean(self.margins) * sample['nsentences']
normal_loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,
'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,
}
return adm_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)
margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)
metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return True
| true | true |
f726783468013df7d862175c11e8671b1275bee8 | 1,929 | py | Python | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | null | null | null | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | null | null | null | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | 1 | 2021-09-02T20:01:25.000Z | 2021-09-02T20:01:25.000Z | """
"""
from campy.cameras import unicam
import os
import time
import logging
import sys
import numpy as np
from collections import deque
import csv
import imageio
def LoadSystem(params):
return params["cameraMake"]
def GetDeviceList(system):
return system
def LoadDevice(cam_params):
return cam_params["device"]
def GetSerialNumber(device):
return device
def GetModelName(camera):
return "Emulated_Camera"
def OpenCamera(cam_params, device):
# Open video reader for emulation
videoFileName = cam_params["videoFilename"][3 : len(cam_params["videoFilename"])]
full_file_name = os.path.join(
cam_params["videoFolder"], cam_params["cameraName"], videoFileName
)
camera = imageio.get_reader(full_file_name)
# Set features manually or automatically, depending on configuration
frame_size = camera.get_meta_data()["size"]
cam_params["frameWidth"] = frame_size[0]
cam_params["frameHeight"] = frame_size[1]
print("Opened {} emulation.".format(cam_params["cameraName"]))
return camera, cam_params
def LoadSettings(cam_params, camera):
return cam_params
def StartGrabbing(camera):
return True
def GrabFrame(camera, frameNumber):
return camera.get_data(frameNumber)
def GetImageArray(grabResult):
return grabResult
def GetTimeStamp(grabResult):
return time.perf_counter()
def DisplayImage(cam_params, dispQueue, grabResult):
# Downsample image
img = grabResult[
:: cam_params["displayDownsample"], :: cam_params["displayDownsample"], :
]
# Send to display queue
dispQueue.append(img)
def ReleaseFrame(grabResult):
del grabResult
def CloseCamera(cam_params, camera):
print("Closing {}... Please wait.".format(cam_params["cameraName"]))
# Close camera after acquisition stops
del camera
def CloseSystem(system, device_list):
del system
del device_list
| 18.028037 | 85 | 0.718507 |
from campy.cameras import unicam
import os
import time
import logging
import sys
import numpy as np
from collections import deque
import csv
import imageio
def LoadSystem(params):
return params["cameraMake"]
def GetDeviceList(system):
return system
def LoadDevice(cam_params):
return cam_params["device"]
def GetSerialNumber(device):
return device
def GetModelName(camera):
return "Emulated_Camera"
def OpenCamera(cam_params, device):
videoFileName = cam_params["videoFilename"][3 : len(cam_params["videoFilename"])]
full_file_name = os.path.join(
cam_params["videoFolder"], cam_params["cameraName"], videoFileName
)
camera = imageio.get_reader(full_file_name)
frame_size = camera.get_meta_data()["size"]
cam_params["frameWidth"] = frame_size[0]
cam_params["frameHeight"] = frame_size[1]
print("Opened {} emulation.".format(cam_params["cameraName"]))
return camera, cam_params
def LoadSettings(cam_params, camera):
return cam_params
def StartGrabbing(camera):
return True
def GrabFrame(camera, frameNumber):
return camera.get_data(frameNumber)
def GetImageArray(grabResult):
return grabResult
def GetTimeStamp(grabResult):
return time.perf_counter()
def DisplayImage(cam_params, dispQueue, grabResult):
img = grabResult[
:: cam_params["displayDownsample"], :: cam_params["displayDownsample"], :
]
dispQueue.append(img)
def ReleaseFrame(grabResult):
del grabResult
def CloseCamera(cam_params, camera):
print("Closing {}... Please wait.".format(cam_params["cameraName"]))
del camera
def CloseSystem(system, device_list):
del system
del device_list
| true | true |
f72678c8ed7735a73d4972ede696b84b944d05d8 | 5,881 | py | Python | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | import json
import logging
import geojson
import numpy as np
from tqdm import tqdm
from scipy import stats
from odm_report_shot_coverage.models.camera import Camera, json_parse_camera
from odm_report_shot_coverage.models.shot import Shot, shot_boundaries_from_points, Boundaries
from odm_report_shot_coverage.models.wavefront_25d import Wavefront25D, parse_wavefront_25d_obj
class Reconstruction:
cameras: 'dict[str, Camera]' = {}
_shots: 'list[Shot]' = []
mesh = Wavefront25D
orthophoto_boundaries: Boundaries
@property
def shots(self) -> 'list[Shot]':
self._shots.sort(key=lambda s: s.image_name)
return self._shots
def add_camera(self, name: str, camera: Camera):
self.cameras[name] = camera
def add_shot(self, shot: Shot):
self._shots.append(shot)
def to_json(self) -> dict:
return {
'cameras': {n: c.to_json() for n, c in self.cameras.items()},
'shots': [s.to_json() for s in self.shots],
# 'mesh': self.mesh.to_json(),
'boundaries': self.mesh.boundaries.to_json(),
'orthophotoBoundaries': self.orthophoto_boundaries.to_json(),
}
def compute_shot_boundaries(self):
"""
From shots and points, fill the shot_boundaries
:rtype: None
"""
for shot in tqdm(self.shots, desc='Computing shot boundaries'):
points = []
for i, point in enumerate(self.mesh.points):
pixel = shot.camera_pixel(point)
if shot.camera.in_frame(pixel):
points.append(point)
shot.boundaries = shot_boundaries_from_points(points)
def find_camera_by_width_height(self, width: int, height: int) -> Camera:
cs = [c for c in self.cameras.values() if c.width == width and c.height == height]
if len(cs) != 1:
raise Exception('Not exactly one camera found with size %s x %s' % (width, height))
return cs[0]
class ReconstructionCollection:
reconstructions: 'list[Reconstruction]' = []
def append(self, reconstruction: Reconstruction):
self.reconstructions.append(reconstruction)
def __getitem__(self, i: int):
return self.reconstructions[i]
def __len__(self):
return len(self.reconstructions)
def lin_reg(pairs: 'list[(float, float)]') -> (float, float, float, float):
x = [p[0] for p in pairs]
y = [p[1] for p in pairs]
return stats.linregress(x, y)
def _parse_point_cloud_boundaries(path: str) -> Boundaries:
with open('%s/odm_report/stats.json' % path, 'r') as fd:
stats_json = json.load(fd)
bbox = stats_json['point_cloud_statistics']['stats']['bbox']['native']['bbox']
return Boundaries(
x_min=bbox['minx'],
x_max=bbox['maxx'],
y_min=bbox['miny'],
y_max=bbox['maxy'],
z_min=bbox['minz'],
z_max=bbox['maxz'],
)
def _parse_camera_shotgeojson(path: str, reconstruction: Reconstruction, native_to_25d_coordinates):
with open('%s/cameras.json' % path, 'r') as fd:
cameras_json = json.load(fd)
for n, j in cameras_json.items():
camera = json_parse_camera(n, j)
reconstruction.add_camera(n, camera)
(tr_x, tr_y, tr_z) = native_to_25d_coordinates
with open('%s/odm_report/shots.geojson' % path, 'r') as fd:
shots_geojson = geojson.load(fd)
for feat in shots_geojson['features']:
shot = Shot()
props = feat['properties']
shot.camera = reconstruction.find_camera_by_width_height(props['width'], props['height'])
shot.image_name = props['filename']
translation = props['translation']
shot.translation = (tr_x(translation[0]), tr_y(translation[1]), tr_z(translation[2]))
shot.rotation = props['rotation']
reconstruction.add_shot(shot)
def _native_to_model_25d_coordinates(native_boundaries: Boundaries, model_25d_boundaries: Boundaries):
width_25d = model_25d_boundaries.x_max - model_25d_boundaries.x_min
height_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
elevation_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
width_native = native_boundaries.x_max - native_boundaries.x_min
height_native = native_boundaries.y_max - native_boundaries.y_min
elevation_native = native_boundaries.y_max - native_boundaries.y_min
width_ratio = np.abs(1 - width_native / width_25d)
height_ratio = np.abs(1 - height_native / height_25d)
elevation_ratio = np.abs(1 - elevation_native / elevation_25d)
logging.info(
'native/25d model boundaries discrepancies width=%.2f%% height=%.2f%% elevation=%.2f%%' % (
width_ratio * 100, height_ratio * 100, elevation_ratio * 100))
return (
lambda x: (x - (native_boundaries.x_max + native_boundaries.x_min) / 2) + (
model_25d_boundaries.x_max + model_25d_boundaries.x_min) / 2,
lambda y: (y - (native_boundaries.y_max + native_boundaries.y_min) / 2) + (
model_25d_boundaries.y_max + model_25d_boundaries.y_min) / 2,
lambda z: (z - (native_boundaries.z_max + native_boundaries.z_min) / 2) + (
model_25d_boundaries.z_max + model_25d_boundaries.z_min) / 2
)
def parse_reconstruction(path: str) -> Reconstruction:
reconstruction = Reconstruction()
wf = parse_wavefront_25d_obj('%s/odm_texturing_25d/odm_textured_model_geo.obj' % path)
reconstruction.mesh = wf
reconstruction.orthophoto_boundaries = wf.boundaries
native_boundaries = _parse_point_cloud_boundaries(path)
_parse_camera_shotgeojson(path, reconstruction,
_native_to_model_25d_coordinates(native_boundaries, wf.boundaries))
return reconstruction
| 38.94702 | 102 | 0.660602 | import json
import logging
import geojson
import numpy as np
from tqdm import tqdm
from scipy import stats
from odm_report_shot_coverage.models.camera import Camera, json_parse_camera
from odm_report_shot_coverage.models.shot import Shot, shot_boundaries_from_points, Boundaries
from odm_report_shot_coverage.models.wavefront_25d import Wavefront25D, parse_wavefront_25d_obj
class Reconstruction:
cameras: 'dict[str, Camera]' = {}
_shots: 'list[Shot]' = []
mesh = Wavefront25D
orthophoto_boundaries: Boundaries
@property
def shots(self) -> 'list[Shot]':
self._shots.sort(key=lambda s: s.image_name)
return self._shots
def add_camera(self, name: str, camera: Camera):
self.cameras[name] = camera
def add_shot(self, shot: Shot):
self._shots.append(shot)
def to_json(self) -> dict:
return {
'cameras': {n: c.to_json() for n, c in self.cameras.items()},
'shots': [s.to_json() for s in self.shots],
'boundaries': self.mesh.boundaries.to_json(),
'orthophotoBoundaries': self.orthophoto_boundaries.to_json(),
}
def compute_shot_boundaries(self):
for shot in tqdm(self.shots, desc='Computing shot boundaries'):
points = []
for i, point in enumerate(self.mesh.points):
pixel = shot.camera_pixel(point)
if shot.camera.in_frame(pixel):
points.append(point)
shot.boundaries = shot_boundaries_from_points(points)
def find_camera_by_width_height(self, width: int, height: int) -> Camera:
cs = [c for c in self.cameras.values() if c.width == width and c.height == height]
if len(cs) != 1:
raise Exception('Not exactly one camera found with size %s x %s' % (width, height))
return cs[0]
class ReconstructionCollection:
reconstructions: 'list[Reconstruction]' = []
def append(self, reconstruction: Reconstruction):
self.reconstructions.append(reconstruction)
def __getitem__(self, i: int):
return self.reconstructions[i]
def __len__(self):
return len(self.reconstructions)
def lin_reg(pairs: 'list[(float, float)]') -> (float, float, float, float):
x = [p[0] for p in pairs]
y = [p[1] for p in pairs]
return stats.linregress(x, y)
def _parse_point_cloud_boundaries(path: str) -> Boundaries:
with open('%s/odm_report/stats.json' % path, 'r') as fd:
stats_json = json.load(fd)
bbox = stats_json['point_cloud_statistics']['stats']['bbox']['native']['bbox']
return Boundaries(
x_min=bbox['minx'],
x_max=bbox['maxx'],
y_min=bbox['miny'],
y_max=bbox['maxy'],
z_min=bbox['minz'],
z_max=bbox['maxz'],
)
def _parse_camera_shotgeojson(path: str, reconstruction: Reconstruction, native_to_25d_coordinates):
with open('%s/cameras.json' % path, 'r') as fd:
cameras_json = json.load(fd)
for n, j in cameras_json.items():
camera = json_parse_camera(n, j)
reconstruction.add_camera(n, camera)
(tr_x, tr_y, tr_z) = native_to_25d_coordinates
with open('%s/odm_report/shots.geojson' % path, 'r') as fd:
shots_geojson = geojson.load(fd)
for feat in shots_geojson['features']:
shot = Shot()
props = feat['properties']
shot.camera = reconstruction.find_camera_by_width_height(props['width'], props['height'])
shot.image_name = props['filename']
translation = props['translation']
shot.translation = (tr_x(translation[0]), tr_y(translation[1]), tr_z(translation[2]))
shot.rotation = props['rotation']
reconstruction.add_shot(shot)
def _native_to_model_25d_coordinates(native_boundaries: Boundaries, model_25d_boundaries: Boundaries):
width_25d = model_25d_boundaries.x_max - model_25d_boundaries.x_min
height_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
elevation_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
width_native = native_boundaries.x_max - native_boundaries.x_min
height_native = native_boundaries.y_max - native_boundaries.y_min
elevation_native = native_boundaries.y_max - native_boundaries.y_min
width_ratio = np.abs(1 - width_native / width_25d)
height_ratio = np.abs(1 - height_native / height_25d)
elevation_ratio = np.abs(1 - elevation_native / elevation_25d)
logging.info(
'native/25d model boundaries discrepancies width=%.2f%% height=%.2f%% elevation=%.2f%%' % (
width_ratio * 100, height_ratio * 100, elevation_ratio * 100))
return (
lambda x: (x - (native_boundaries.x_max + native_boundaries.x_min) / 2) + (
model_25d_boundaries.x_max + model_25d_boundaries.x_min) / 2,
lambda y: (y - (native_boundaries.y_max + native_boundaries.y_min) / 2) + (
model_25d_boundaries.y_max + model_25d_boundaries.y_min) / 2,
lambda z: (z - (native_boundaries.z_max + native_boundaries.z_min) / 2) + (
model_25d_boundaries.z_max + model_25d_boundaries.z_min) / 2
)
def parse_reconstruction(path: str) -> Reconstruction:
reconstruction = Reconstruction()
wf = parse_wavefront_25d_obj('%s/odm_texturing_25d/odm_textured_model_geo.obj' % path)
reconstruction.mesh = wf
reconstruction.orthophoto_boundaries = wf.boundaries
native_boundaries = _parse_point_cloud_boundaries(path)
_parse_camera_shotgeojson(path, reconstruction,
_native_to_model_25d_coordinates(native_boundaries, wf.boundaries))
return reconstruction
| true | true |
f72678e2fc3806ded2ce42f11e552a9f29cbd3ae | 3,757 | py | Python | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | null | null | null | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | null | null | null | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | 1 | 2022-03-18T19:09:47.000Z | 2022-03-18T19:09:47.000Z | import argparse
import scipy.io
import torch
import numpy as np
import os
from torchvision import datasets
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
#######################################################################
# Evaluate
parser = argparse.ArgumentParser(description='Demo')
parser.add_argument('--query_index', default=777, type=int, help='test_image_index')
parser.add_argument('--test_dir',default='/mnt/hdd_data/Dataset/market1501_ss/pytorch',type=str, help='./test_data')
opts = parser.parse_args()
data_dir = opts.test_dir
image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['gallery','query']}
#####################################################################
#Show result
def imshow(path, title=None):
"""Imshow for Tensor."""
im = plt.imread(path)
plt.imshow(im)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
######################################################################
result = scipy.io.loadmat('pytorch_result.mat')
query_feature = torch.FloatTensor(result['query_f'])
query_cam = result['query_cam'][0]
query_label = result['query_label'][0]
gallery_feature = torch.FloatTensor(result['gallery_f'])
gallery_cam = result['gallery_cam'][0]
gallery_label = result['gallery_label'][0]
multi = os.path.isfile('multi_query.mat')
if multi:
m_result = scipy.io.loadmat('multi_query.mat')
mquery_feature = torch.FloatTensor(m_result['mquery_f'])
mquery_cam = m_result['mquery_cam'][0]
mquery_label = m_result['mquery_label'][0]
mquery_feature = mquery_feature.cuda()
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
#######################################################################
# sort the images
def sort_img(qf, ql, qc, gf, gl, gc):
query = qf.view(-1,1)
# print(query.shape)
score = torch.mm(gf,query)
score = score.squeeze(1).cpu()
score = score.numpy()
# predict index
index = np.argsort(score) #from small to large
index = index[::-1]
# index = index[0:2000]
# good index
query_index = np.argwhere(gl==ql)
#same camera
camera_index = np.argwhere(gc==qc)
#good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl==-1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1)
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
return index
i = opts.query_index
index = sort_img(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)
########################################################################
# Visualize the rank result
query_path, _ = image_datasets['query'].imgs[i]
query_label = query_label[i]
print(query_path)
print('Top 10 images are as follow:')
try: # Visualize Ranking Result
# Graphical User Interface is needed
fig = plt.figure(figsize=(16,4))
ax = plt.subplot(1,11,1)
ax.axis('off')
imshow(query_path,'query')
for i in range(10):
ax = plt.subplot(1,11,i+2)
ax.axis('off')
img_path, _ = image_datasets['gallery'].imgs[index[i]]
label = gallery_label[index[i]]
imshow(img_path)
if label == query_label:
ax.set_title('%d'%(i+1), color='green')
else:
ax.set_title('%d'%(i+1), color='red')
print(img_path)
except RuntimeError:
for i in range(10):
img_path = image_datasets.imgs[index[i]]
print(img_path[0])
print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
fig.savefig("show.png")
| 33.846847 | 116 | 0.625233 | import argparse
import scipy.io
import torch
import numpy as np
import os
from torchvision import datasets
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
| true | true |
f72678eeeaa3ab01da38b3b2d287901a2acbed5a | 512 | py | Python | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | 2 | 2020-11-20T07:43:40.000Z | 2021-05-14T14:40:41.000Z | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | null | null | null | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | 2 | 2020-06-18T20:35:40.000Z | 2020-09-27T02:54:31.000Z | test = {
'name': '8.2',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like one of your variables is not named
>>> # correctly. Maybe there's a typo?
>>> 'avg_flt' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your function returns the wrong result.
>>> # Check your function.
>>> avg_flt == 2.0
True
"""
},
]
}
]
} | 20.48 | 69 | 0.363281 | test = {
'name': '8.2',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like one of your variables is not named
>>> # correctly. Maybe there's a typo?
>>> 'avg_flt' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your function returns the wrong result.
>>> # Check your function.
>>> avg_flt == 2.0
True
"""
},
]
}
]
} | true | true |
f726790477f564ff202bf60ffff1010f2a6df250 | 3,687 | py | Python | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 381 | 2019-03-19T22:55:14.000Z | 2022-03-26T18:56:17.000Z | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 27 | 2019-04-30T04:04:51.000Z | 2022-03-03T18:20:11.000Z | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 107 | 2019-03-22T06:09:24.000Z | 2022-01-30T13:34:38.000Z | import numpy as np
from gym import spaces
from gym import Env
from . import register_env
@register_env('point-robot')
class PointEnv(Env):
"""
point robot on a 2-D plane with position control
tasks (aka goals) are positions on the plane
- tasks sampled from unit square
- reward is L2 distance
"""
def __init__(self, randomize_tasks=False, n_tasks=2):
if randomize_tasks:
np.random.seed(1337)
goals = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for _ in range(n_tasks)]
else:
# some hand-coded goals for debugging
goals = [np.array([10, -10]),
np.array([10, 10]),
np.array([-10, 10]),
np.array([-10, -10]),
np.array([0, 0]),
np.array([7, 2]),
np.array([0, 4]),
np.array([-6, 9])
]
goals = [g / 10. for g in goals]
self.goals = goals
self.reset_task(0)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,))
self.action_space = spaces.Box(low=-0.1, high=0.1, shape=(2,))
def reset_task(self, idx):
''' reset goal AND reset the agent '''
self._goal = self.goals[idx]
self.reset()
def get_all_task_idx(self):
return range(len(self.goals))
def reset_model(self):
# reset to a random location on the unit square
self._state = np.random.uniform(-1., 1., size=(2,))
return self._get_obs()
def reset(self):
return self.reset_model()
def _get_obs(self):
return np.copy(self._state)
def step(self, action):
self._state = self._state + action
x, y = self._state
x -= self._goal[0]
y -= self._goal[1]
reward = - (x ** 2 + y ** 2) ** 0.5
done = False
ob = self._get_obs()
return ob, reward, done, dict()
def viewer_setup(self):
print('no viewer')
pass
def render(self):
print('current state:', self._state)
@register_env('sparse-point-robot')
class SparsePointEnv(PointEnv):
'''
- tasks sampled from unit half-circle
- reward is L2 distance given only within goal radius
NOTE that `step()` returns the dense reward because this is used during meta-training
the algorithm should call `sparsify_rewards()` to get the sparse rewards
'''
def __init__(self, randomize_tasks=False, n_tasks=2, goal_radius=0.2):
super().__init__(randomize_tasks, n_tasks)
self.goal_radius = goal_radius
if randomize_tasks:
np.random.seed(1337)
radius = 1.0
angles = np.linspace(0, np.pi, num=n_tasks)
xs = radius * np.cos(angles)
ys = radius * np.sin(angles)
goals = np.stack([xs, ys], axis=1)
np.random.shuffle(goals)
goals = goals.tolist()
self.goals = goals
self.reset_task(0)
def sparsify_rewards(self, r):
''' zero out rewards when outside the goal radius '''
mask = (r >= -self.goal_radius).astype(np.float32)
r = r * mask
return r
def reset_model(self):
self._state = np.array([0, 0])
return self._get_obs()
def step(self, action):
ob, reward, done, d = super().step(action)
sparse_reward = self.sparsify_rewards(reward)
# make sparse rewards positive
if reward >= -self.goal_radius:
sparse_reward += 1
d.update({'sparse_reward': sparse_reward})
return ob, reward, done, d
| 29.97561 | 102 | 0.55872 | import numpy as np
from gym import spaces
from gym import Env
from . import register_env
@register_env('point-robot')
class PointEnv(Env):
def __init__(self, randomize_tasks=False, n_tasks=2):
if randomize_tasks:
np.random.seed(1337)
goals = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for _ in range(n_tasks)]
else:
goals = [np.array([10, -10]),
np.array([10, 10]),
np.array([-10, 10]),
np.array([-10, -10]),
np.array([0, 0]),
np.array([7, 2]),
np.array([0, 4]),
np.array([-6, 9])
]
goals = [g / 10. for g in goals]
self.goals = goals
self.reset_task(0)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,))
self.action_space = spaces.Box(low=-0.1, high=0.1, shape=(2,))
def reset_task(self, idx):
self._goal = self.goals[idx]
self.reset()
def get_all_task_idx(self):
return range(len(self.goals))
def reset_model(self):
self._state = np.random.uniform(-1., 1., size=(2,))
return self._get_obs()
def reset(self):
return self.reset_model()
def _get_obs(self):
return np.copy(self._state)
def step(self, action):
self._state = self._state + action
x, y = self._state
x -= self._goal[0]
y -= self._goal[1]
reward = - (x ** 2 + y ** 2) ** 0.5
done = False
ob = self._get_obs()
return ob, reward, done, dict()
def viewer_setup(self):
print('no viewer')
pass
def render(self):
print('current state:', self._state)
@register_env('sparse-point-robot')
class SparsePointEnv(PointEnv):
def __init__(self, randomize_tasks=False, n_tasks=2, goal_radius=0.2):
super().__init__(randomize_tasks, n_tasks)
self.goal_radius = goal_radius
if randomize_tasks:
np.random.seed(1337)
radius = 1.0
angles = np.linspace(0, np.pi, num=n_tasks)
xs = radius * np.cos(angles)
ys = radius * np.sin(angles)
goals = np.stack([xs, ys], axis=1)
np.random.shuffle(goals)
goals = goals.tolist()
self.goals = goals
self.reset_task(0)
def sparsify_rewards(self, r):
mask = (r >= -self.goal_radius).astype(np.float32)
r = r * mask
return r
def reset_model(self):
self._state = np.array([0, 0])
return self._get_obs()
def step(self, action):
ob, reward, done, d = super().step(action)
sparse_reward = self.sparsify_rewards(reward)
if reward >= -self.goal_radius:
sparse_reward += 1
d.update({'sparse_reward': sparse_reward})
return ob, reward, done, d
| true | true |
f726796f43413d696b184ac1361a1c85145f966b | 217 | py | Python | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | 1 | 2017-06-12T16:24:11.000Z | 2017-06-12T16:24:11.000Z | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | null | null | null | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | null | null | null | """ Namespace for functions and constants corresponding to
OpenGL ES 2.0 extensions.
"""
from __future__ import division
from ._constants_ext import * # noqa
# Filled with functions when vispy.gloo.gl is imported
| 21.7 | 58 | 0.774194 |
from __future__ import division
from ._constants_ext import *
| true | true |
f72679f63807e54ba5c5c8c8ffd1b689e7012f08 | 297 | py | Python | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | from typing import List, Union, Tuple
from lab.logger.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, StyleCode]]], *,
is_new_line=True):
raise NotImplementedError()
def new_line(self):
raise NotImplementedError()
| 22.846154 | 68 | 0.680135 | from typing import List, Union, Tuple
from lab.logger.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, StyleCode]]], *,
is_new_line=True):
raise NotImplementedError()
def new_line(self):
raise NotImplementedError()
| true | true |
f7267a892bdf070ee3c7619d0af334682563f6a0 | 3,432 | py | Python | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | '''
Created on Oct 12, 2016
@author: mwittie
'''
import network_3
import link_3
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 # 0 means unlimited
simulation_time = 1 # give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
routing_dict = {'router_a': {"1" : 0, "2": 1}, 'router_d': {"3" : 0, "4": 1}}
object_L = [] # keeps track of objects, so we can kill their threads
# create network nodes
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
router_a = network_3.Router(routing_dict,name='router_a', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
router_b = network_3.Router(routing_dict, name='router_b', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
router_c = network_3.Router(routing_dict, name='router_c', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
router_d = network_3.Router(routing_dict, name='router_d', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
# create a Link Layer to keep track of links between network nodes
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
# add all the links
# link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(router_a, 0,router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
# start all the objects
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
# create some send events
for i in range(1):
host_1.udt_send(1,4,"from 1 to 4")
host_2.udt_send(2,4, "from 2 to 4")
host_2.udt_send(2,3, "from 2 to 3")
host_1.udt_send(1,3, "from 1 to 3")
# give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
# join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically | 32.377358 | 110 | 0.708333 | import network_3
import link_3
import threading
from time import sleep
simulation_time = 1
if __name__ == '__main__':
routing_dict = {'router_a': {"1" : 0, "2": 1}, 'router_d': {"3" : 0, "4": 1}}
object_L = []
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
router_a = network_3.Router(routing_dict,name='router_a', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
router_b = network_3.Router(routing_dict, name='router_b', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
router_c = network_3.Router(routing_dict, name='router_c', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
router_d = network_3.Router(routing_dict, name='router_d', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(router_a, 0,router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
for i in range(1):
host_1.udt_send(1,4,"from 1 to 4")
host_2.udt_send(2,4, "from 2 to 4")
host_2.udt_send(2,3, "from 2 to 3")
host_1.udt_send(1,3, "from 1 to 3")
sleep(simulation_time)
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
| true | true |
f7267bf3fd0336a906e4c44012962c01fabd366c | 799 | py | Python | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | """
Task
The provided code stub reads two integers from STDIN, and . Add code to print three lines where:
The first line contains the sum of the two numbers.
The second line contains the difference of the two numbers (first - second).
The third line contains the product of the two numbers.
Example
Print the following:
8
-2
15
Input Format
The first line contains the first integer, .
The second line contains the second integer, .
Constraints
Output Format
Print the three lines as explained above.
Sample Input 0
3
2
Sample Output 0
5
1
6
Explanation 0
"""
def check(n):
if n>=1 and n<=10**10:
return True
if __name__ == '__main__':
a = int(input())
b = int(input())
if check(a) and check(b):
print(a+b)
print(a-b)
print(a*b) | 13.775862 | 97 | 0.677096 |
def check(n):
if n>=1 and n<=10**10:
return True
if __name__ == '__main__':
a = int(input())
b = int(input())
if check(a) and check(b):
print(a+b)
print(a-b)
print(a*b) | true | true |
f7267c622825114ec43fa387fd5603751837e983 | 3,783 | py | Python | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | 5 | 2018-02-12T11:40:38.000Z | 2018-06-10T20:29:00.000Z | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | null | null | null | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | 1 | 2020-01-25T03:54:39.000Z | 2020-01-25T03:54:39.000Z | from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
from allure_commons_test.container import has_container
from allure_commons_test.container import has_after
def test_failed_finalizer_fixture(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.fixture
... def failed_finalizer_fixture(request):
... def fixture_finalizer():
... assert False
... request.addfinalizer(fixture_finalizer)
...
... def test_failed_finalizer_fixture_example(failed_finalizer_fixture):
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
def test_pytest_failed_finalizer_fixture(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.fixture
... def pytest_failed_finalizer_fixture(request):
... def fixture_finalizer():
... pytest.fail()
... request.addfinalizer(fixture_finalizer)
>>> def test_pytest_failed_finalizer_fixture_example(pytest_failed_finalizer_fixture):
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_pytest_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="pytest_failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
| 49.12987 | 118 | 0.487708 | from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
from allure_commons_test.container import has_container
from allure_commons_test.container import has_after
def test_failed_finalizer_fixture(executed_docstring_source):
assert_that(executed_docstring_source.allure_report,
has_test_case("test_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
def test_pytest_failed_finalizer_fixture(executed_docstring_source):
assert_that(executed_docstring_source.allure_report,
has_test_case("test_pytest_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="pytest_failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
| true | true |
f7267d5166a3b5c62a2fa38a85120010bbb91685 | 264 | py | Python | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | from django.contrib import admin
from authentication.models import CustomUser
# Register your models here.
class CustomUserAdmin(admin.ModelAdmin):
list_display = ['username','first_name','last_name','email']
admin.site.register(CustomUser, CustomUserAdmin) | 29.333333 | 64 | 0.799242 | from django.contrib import admin
from authentication.models import CustomUser
class CustomUserAdmin(admin.ModelAdmin):
list_display = ['username','first_name','last_name','email']
admin.site.register(CustomUser, CustomUserAdmin) | true | true |
f7267f0cc12c13e4532cd65a56c9b3d01c13894f | 14,738 | py | Python | selfdrive/locationd/locationd.py | sejongjoa/openpilot_083 | ff8277b6b51241af6b9ba37dcf55dcb6bfcc9d18 | [
"MIT"
] | 20 | 2020-12-04T12:20:57.000Z | 2022-03-31T00:40:15.000Z | selfdrive/locationd/locationd.py | wangyueguo/- | 301500dff6bd53e64257898cac939b24f56befac | [
"MIT"
] | 6 | 2020-03-06T18:13:55.000Z | 2020-07-20T05:10:20.000Z | selfdrive/locationd/locationd.py | wangyueguo/- | 301500dff6bd53e64257898cac939b24f56befac | [
"MIT"
] | 35 | 2021-03-18T23:28:11.000Z | 2021-06-24T17:36:22.000Z | #!/usr/bin/env python3
import json
import numpy as np
import sympy as sp
import cereal.messaging as messaging
from cereal import log
from common.params import Params
import common.transformations.coordinates as coord
from common.transformations.orientation import ecef_euler_from_ned, \
euler_from_quat, \
ned_euler_from_ecef, \
quat_from_euler, euler_from_rot, \
rot_from_quat, rot_from_euler
from rednose.helpers import KalmanError
from selfdrive.locationd.models.live_kf import LiveKalman, States, ObservationKind
from selfdrive.locationd.models.constants import GENERATED_DIR
from selfdrive.swaglog import cloudlog
#from datetime import datetime
#from laika.gps_time import GPSTime
from sympy.utilities.lambdify import lambdify
from rednose.helpers.sympy_helpers import euler_rotate
SensorSource = log.SensorEventData.SensorSource
VISION_DECIMATION = 2
SENSOR_DECIMATION = 10
POSENET_STD_HIST = 40
def to_float(arr):
return [float(arr[0]), float(arr[1]), float(arr[2])]
def get_H():
# this returns a function to eval the jacobian
# of the observation function of the local vel
roll = sp.Symbol('roll')
pitch = sp.Symbol('pitch')
yaw = sp.Symbol('yaw')
vx = sp.Symbol('vx')
vy = sp.Symbol('vy')
vz = sp.Symbol('vz')
h = euler_rotate(roll, pitch, yaw).T*(sp.Matrix([vx, vy, vz]))
H = h.jacobian(sp.Matrix([roll, pitch, yaw, vx, vy, vz]))
H_f = lambdify([roll, pitch, yaw, vx, vy, vz], H)
return H_f
class Localizer():
def __init__(self, disabled_logs=None, dog=None):
if disabled_logs is None:
disabled_logs = []
self.kf = LiveKalman(GENERATED_DIR)
self.reset_kalman()
self.max_age = .1 # seconds
self.disabled_logs = disabled_logs
self.calib = np.zeros(3)
self.device_from_calib = np.eye(3)
self.calib_from_device = np.eye(3)
self.calibrated = False
self.H = get_H()
self.posenet_invalid_count = 0
self.posenet_speed = 0
self.car_speed = 0
self.posenet_stds = 10*np.ones((POSENET_STD_HIST))
self.converter = coord.LocalCoord.from_ecef(self.kf.x[States.ECEF_POS])
self.unix_timestamp_millis = 0
self.last_gps_fix = 0
self.device_fell = False
@staticmethod
def msg_from_state(converter, calib_from_device, H, predicted_state, predicted_cov, calibrated):
predicted_std = np.sqrt(np.diagonal(predicted_cov))
fix_ecef = predicted_state[States.ECEF_POS]
fix_ecef_std = predicted_std[States.ECEF_POS_ERR]
vel_ecef = predicted_state[States.ECEF_VELOCITY]
vel_ecef_std = predicted_std[States.ECEF_VELOCITY_ERR]
fix_pos_geo = coord.ecef2geodetic(fix_ecef)
#fix_pos_geo_std = np.abs(coord.ecef2geodetic(fix_ecef + fix_ecef_std) - fix_pos_geo)
orientation_ecef = euler_from_quat(predicted_state[States.ECEF_ORIENTATION])
orientation_ecef_std = predicted_std[States.ECEF_ORIENTATION_ERR]
device_from_ecef = rot_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
calibrated_orientation_ecef = euler_from_rot(calib_from_device.dot(device_from_ecef))
acc_calib = calib_from_device.dot(predicted_state[States.ACCELERATION])
acc_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ACCELERATION_ERR, States.ACCELERATION_ERR]).dot(
calib_from_device.T)))
ang_vel_calib = calib_from_device.dot(predicted_state[States.ANGULAR_VELOCITY])
ang_vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ANGULAR_VELOCITY_ERR, States.ANGULAR_VELOCITY_ERR]).dot(
calib_from_device.T)))
vel_device = device_from_ecef.dot(vel_ecef)
device_from_ecef_eul = euler_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
idxs = list(range(States.ECEF_ORIENTATION_ERR.start, States.ECEF_ORIENTATION_ERR.stop)) + \
list(range(States.ECEF_VELOCITY_ERR.start, States.ECEF_VELOCITY_ERR.stop))
condensed_cov = predicted_cov[idxs][:, idxs]
HH = H(*list(np.concatenate([device_from_ecef_eul, vel_ecef])))
vel_device_cov = HH.dot(condensed_cov).dot(HH.T)
vel_device_std = np.sqrt(np.diagonal(vel_device_cov))
vel_calib = calib_from_device.dot(vel_device)
vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
vel_device_cov).dot(calib_from_device.T)))
orientation_ned = ned_euler_from_ecef(fix_ecef, orientation_ecef)
#orientation_ned_std = ned_euler_from_ecef(fix_ecef, orientation_ecef + orientation_ecef_std) - orientation_ned
ned_vel = converter.ecef2ned(fix_ecef + vel_ecef) - converter.ecef2ned(fix_ecef)
#ned_vel_std = self.converter.ecef2ned(fix_ecef + vel_ecef + vel_ecef_std) - self.converter.ecef2ned(fix_ecef + vel_ecef)
fix = messaging.log.LiveLocationKalman.new_message()
# write measurements to msg
measurements = [
# measurement field, value, std, valid
(fix.positionGeodetic, fix_pos_geo, np.nan*np.zeros(3), True),
(fix.positionECEF, fix_ecef, fix_ecef_std, True),
(fix.velocityECEF, vel_ecef, vel_ecef_std, True),
(fix.velocityNED, ned_vel, np.nan*np.zeros(3), True),
(fix.velocityDevice, vel_device, vel_device_std, True),
(fix.accelerationDevice, predicted_state[States.ACCELERATION], predicted_std[States.ACCELERATION_ERR], True),
(fix.orientationECEF, orientation_ecef, orientation_ecef_std, True),
(fix.calibratedOrientationECEF, calibrated_orientation_ecef, np.nan*np.zeros(3), calibrated),
(fix.orientationNED, orientation_ned, np.nan*np.zeros(3), True),
(fix.angularVelocityDevice, predicted_state[States.ANGULAR_VELOCITY], predicted_std[States.ANGULAR_VELOCITY_ERR], True),
(fix.velocityCalibrated, vel_calib, vel_calib_std, calibrated),
(fix.angularVelocityCalibrated, ang_vel_calib, ang_vel_calib_std, calibrated),
(fix.accelerationCalibrated, acc_calib, acc_calib_std, calibrated),
]
for field, value, std, valid in measurements:
# TODO: can we write the lists faster?
field.value = to_float(value)
field.std = to_float(std)
field.valid = valid
return fix
def liveLocationMsg(self):
fix = self.msg_from_state(self.converter, self.calib_from_device, self.H, self.kf.x, self.kf.P, self.calibrated)
# experimentally found these values, no false positives in 20k minutes of driving
old_mean, new_mean = np.mean(self.posenet_stds[:POSENET_STD_HIST//2]), np.mean(self.posenet_stds[POSENET_STD_HIST//2:])
std_spike = new_mean/old_mean > 4 and new_mean > 7
fix.posenetOK = not (std_spike and self.car_speed > 5)
fix.deviceStable = not self.device_fell
self.device_fell = False
#fix.gpsWeek = self.time.week
#fix.gpsTimeOfWeek = self.time.tow
fix.unixTimestampMillis = self.unix_timestamp_millis
if np.linalg.norm(fix.positionECEF.std) < 50 and self.calibrated:
fix.status = 'valid'
elif np.linalg.norm(fix.positionECEF.std) < 50:
fix.status = 'uncalibrated'
else:
fix.status = 'uninitialized'
return fix
def update_kalman(self, time, kind, meas, R=None):
try:
self.kf.predict_and_observe(time, kind, meas, R)
except KalmanError:
cloudlog.error("Error in predict and observe, kalman reset")
self.reset_kalman()
def handle_gps(self, current_time, log):
# ignore the message if the fix is invalid
if log.flags % 2 == 0:
return
self.last_gps_fix = current_time
self.converter = coord.LocalCoord.from_geodetic([log.latitude, log.longitude, log.altitude])
ecef_pos = self.converter.ned2ecef([0, 0, 0])
ecef_vel = self.converter.ned2ecef(np.array(log.vNED)) - ecef_pos
ecef_pos_R = np.diag([(3*log.verticalAccuracy)**2]*3)
ecef_vel_R = np.diag([(log.speedAccuracy)**2]*3)
#self.time = GPSTime.from_datetime(datetime.utcfromtimestamp(log.timestamp*1e-3))
self.unix_timestamp_millis = log.timestamp
gps_est_error = np.sqrt((self.kf.x[0] - ecef_pos[0])**2 +
(self.kf.x[1] - ecef_pos[1])**2 +
(self.kf.x[2] - ecef_pos[2])**2)
orientation_ecef = euler_from_quat(self.kf.x[States.ECEF_ORIENTATION])
orientation_ned = ned_euler_from_ecef(ecef_pos, orientation_ecef)
orientation_ned_gps = np.array([0, 0, np.radians(log.bearingDeg)])
orientation_error = np.mod(orientation_ned - orientation_ned_gps - np.pi, 2*np.pi) - np.pi
initial_pose_ecef_quat = quat_from_euler(ecef_euler_from_ned(ecef_pos, orientation_ned_gps))
if np.linalg.norm(ecef_vel) > 5 and np.linalg.norm(orientation_error) > 1:
cloudlog.error("Locationd vs ubloxLocation orientation difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_ORIENTATION_FROM_GPS, initial_pose_ecef_quat)
elif gps_est_error > 50:
cloudlog.error("Locationd vs ubloxLocation position difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_POS, ecef_pos, R=ecef_pos_R)
self.update_kalman(current_time, ObservationKind.ECEF_VEL, ecef_vel, R=ecef_vel_R)
def handle_car_state(self, current_time, log):
self.speed_counter += 1
if self.speed_counter % SENSOR_DECIMATION == 0:
self.update_kalman(current_time, ObservationKind.ODOMETRIC_SPEED, [log.vEgo])
self.car_speed = abs(log.vEgo)
if log.vEgo == 0:
self.update_kalman(current_time, ObservationKind.NO_ROT, [0, 0, 0])
def handle_cam_odo(self, current_time, log):
self.cam_counter += 1
if self.cam_counter % VISION_DECIMATION == 0:
rot_device = self.device_from_calib.dot(log.rot)
rot_device_std = self.device_from_calib.dot(log.rotStd)
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_ROTATION,
np.concatenate([rot_device, 10*rot_device_std]))
trans_device = self.device_from_calib.dot(log.trans)
trans_device_std = self.device_from_calib.dot(log.transStd)
self.posenet_speed = np.linalg.norm(trans_device)
self.posenet_stds[:-1] = self.posenet_stds[1:]
self.posenet_stds[-1] = trans_device_std[0]
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_TRANSLATION,
np.concatenate([trans_device, 10*trans_device_std]))
def handle_sensors(self, current_time, log):
# TODO does not yet account for double sensor readings in the log
for sensor_reading in log:
sensor_time = 1e-9 * sensor_reading.timestamp
# TODO: handle messages from two IMUs at the same time
if sensor_reading.source == SensorSource.lsm6ds3:
continue
# Gyro Uncalibrated
if sensor_reading.sensor == 5 and sensor_reading.type == 16:
self.gyro_counter += 1
if self.gyro_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.gyroUncalibrated.v
self.update_kalman(sensor_time, ObservationKind.PHONE_GYRO, [-v[2], -v[1], -v[0]])
# Accelerometer
if sensor_reading.sensor == 1 and sensor_reading.type == 1:
# check if device fell, estimate 10 for g
# 40m/s**2 is a good filter for falling detection, no false positives in 20k minutes of driving
self.device_fell = self.device_fell or (np.linalg.norm(np.array(sensor_reading.acceleration.v) - np.array([10, 0, 0])) > 40)
self.acc_counter += 1
if self.acc_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.acceleration.v
self.update_kalman(sensor_time, ObservationKind.PHONE_ACCEL, [-v[2], -v[1], -v[0]])
def handle_live_calib(self, current_time, log):
if len(log.rpyCalib):
self.calib = log.rpyCalib
self.device_from_calib = rot_from_euler(self.calib)
self.calib_from_device = self.device_from_calib.T
self.calibrated = log.calStatus == 1
def reset_kalman(self, current_time=None, init_orient=None, init_pos=None):
self.filter_time = current_time
init_x = LiveKalman.initial_x.copy()
# too nonlinear to init on completely wrong
if init_orient is not None:
init_x[3:7] = init_orient
if init_pos is not None:
init_x[:3] = init_pos
self.kf.init_state(init_x, covs=np.diag(LiveKalman.initial_P_diag), filter_time=current_time)
self.observation_buffer = []
self.gyro_counter = 0
self.acc_counter = 0
self.speed_counter = 0
self.cam_counter = 0
def locationd_thread(sm, pm, disabled_logs=None):
if disabled_logs is None:
disabled_logs = []
if sm is None:
socks = ['gpsLocationExternal', 'sensorEvents', 'cameraOdometry', 'liveCalibration', 'carState']
sm = messaging.SubMaster(socks, ignore_alive=['gpsLocationExternal'])
if pm is None:
pm = messaging.PubMaster(['liveLocationKalman'])
params = Params()
localizer = Localizer(disabled_logs=disabled_logs)
while True:
sm.update()
for sock, updated in sm.updated.items():
if updated and sm.valid[sock]:
t = sm.logMonoTime[sock] * 1e-9
if sock == "sensorEvents":
localizer.handle_sensors(t, sm[sock])
elif sock == "gpsLocationExternal":
localizer.handle_gps(t, sm[sock])
elif sock == "carState":
localizer.handle_car_state(t, sm[sock])
elif sock == "cameraOdometry":
localizer.handle_cam_odo(t, sm[sock])
elif sock == "liveCalibration":
localizer.handle_live_calib(t, sm[sock])
if sm.updated['cameraOdometry']:
t = sm.logMonoTime['cameraOdometry']
msg = messaging.new_message('liveLocationKalman')
msg.logMonoTime = t
msg.liveLocationKalman = localizer.liveLocationMsg()
msg.liveLocationKalman.inputsOK = sm.all_alive_and_valid()
msg.liveLocationKalman.sensorsOK = sm.alive['sensorEvents'] and sm.valid['sensorEvents']
gps_age = (t / 1e9) - localizer.last_gps_fix
msg.liveLocationKalman.gpsOK = gps_age < 1.0
pm.send('liveLocationKalman', msg)
if sm.frame % 1200 == 0 and msg.liveLocationKalman.gpsOK: # once a minute
location = {
'latitude': msg.liveLocationKalman.positionGeodetic.value[0],
'longitude': msg.liveLocationKalman.positionGeodetic.value[1],
'altitude': msg.liveLocationKalman.positionGeodetic.value[2],
}
params.put("LastGPSPosition", json.dumps(location))
def main(sm=None, pm=None):
locationd_thread(sm, pm)
if __name__ == "__main__":
import os
os.environ["OMP_NUM_THREADS"] = "1"
main()
| 41.988604 | 132 | 0.70688 |
import json
import numpy as np
import sympy as sp
import cereal.messaging as messaging
from cereal import log
from common.params import Params
import common.transformations.coordinates as coord
from common.transformations.orientation import ecef_euler_from_ned, \
euler_from_quat, \
ned_euler_from_ecef, \
quat_from_euler, euler_from_rot, \
rot_from_quat, rot_from_euler
from rednose.helpers import KalmanError
from selfdrive.locationd.models.live_kf import LiveKalman, States, ObservationKind
from selfdrive.locationd.models.constants import GENERATED_DIR
from selfdrive.swaglog import cloudlog
from sympy.utilities.lambdify import lambdify
from rednose.helpers.sympy_helpers import euler_rotate
SensorSource = log.SensorEventData.SensorSource
VISION_DECIMATION = 2
SENSOR_DECIMATION = 10
POSENET_STD_HIST = 40
def to_float(arr):
return [float(arr[0]), float(arr[1]), float(arr[2])]
def get_H():
roll = sp.Symbol('roll')
pitch = sp.Symbol('pitch')
yaw = sp.Symbol('yaw')
vx = sp.Symbol('vx')
vy = sp.Symbol('vy')
vz = sp.Symbol('vz')
h = euler_rotate(roll, pitch, yaw).T*(sp.Matrix([vx, vy, vz]))
H = h.jacobian(sp.Matrix([roll, pitch, yaw, vx, vy, vz]))
H_f = lambdify([roll, pitch, yaw, vx, vy, vz], H)
return H_f
class Localizer():
def __init__(self, disabled_logs=None, dog=None):
if disabled_logs is None:
disabled_logs = []
self.kf = LiveKalman(GENERATED_DIR)
self.reset_kalman()
self.max_age = .1
self.disabled_logs = disabled_logs
self.calib = np.zeros(3)
self.device_from_calib = np.eye(3)
self.calib_from_device = np.eye(3)
self.calibrated = False
self.H = get_H()
self.posenet_invalid_count = 0
self.posenet_speed = 0
self.car_speed = 0
self.posenet_stds = 10*np.ones((POSENET_STD_HIST))
self.converter = coord.LocalCoord.from_ecef(self.kf.x[States.ECEF_POS])
self.unix_timestamp_millis = 0
self.last_gps_fix = 0
self.device_fell = False
@staticmethod
def msg_from_state(converter, calib_from_device, H, predicted_state, predicted_cov, calibrated):
predicted_std = np.sqrt(np.diagonal(predicted_cov))
fix_ecef = predicted_state[States.ECEF_POS]
fix_ecef_std = predicted_std[States.ECEF_POS_ERR]
vel_ecef = predicted_state[States.ECEF_VELOCITY]
vel_ecef_std = predicted_std[States.ECEF_VELOCITY_ERR]
fix_pos_geo = coord.ecef2geodetic(fix_ecef)
orientation_ecef = euler_from_quat(predicted_state[States.ECEF_ORIENTATION])
orientation_ecef_std = predicted_std[States.ECEF_ORIENTATION_ERR]
device_from_ecef = rot_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
calibrated_orientation_ecef = euler_from_rot(calib_from_device.dot(device_from_ecef))
acc_calib = calib_from_device.dot(predicted_state[States.ACCELERATION])
acc_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ACCELERATION_ERR, States.ACCELERATION_ERR]).dot(
calib_from_device.T)))
ang_vel_calib = calib_from_device.dot(predicted_state[States.ANGULAR_VELOCITY])
ang_vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ANGULAR_VELOCITY_ERR, States.ANGULAR_VELOCITY_ERR]).dot(
calib_from_device.T)))
vel_device = device_from_ecef.dot(vel_ecef)
device_from_ecef_eul = euler_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
idxs = list(range(States.ECEF_ORIENTATION_ERR.start, States.ECEF_ORIENTATION_ERR.stop)) + \
list(range(States.ECEF_VELOCITY_ERR.start, States.ECEF_VELOCITY_ERR.stop))
condensed_cov = predicted_cov[idxs][:, idxs]
HH = H(*list(np.concatenate([device_from_ecef_eul, vel_ecef])))
vel_device_cov = HH.dot(condensed_cov).dot(HH.T)
vel_device_std = np.sqrt(np.diagonal(vel_device_cov))
vel_calib = calib_from_device.dot(vel_device)
vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
vel_device_cov).dot(calib_from_device.T)))
orientation_ned = ned_euler_from_ecef(fix_ecef, orientation_ecef)
ned_vel = converter.ecef2ned(fix_ecef + vel_ecef) - converter.ecef2ned(fix_ecef)
fix = messaging.log.LiveLocationKalman.new_message()
measurements = [
(fix.positionGeodetic, fix_pos_geo, np.nan*np.zeros(3), True),
(fix.positionECEF, fix_ecef, fix_ecef_std, True),
(fix.velocityECEF, vel_ecef, vel_ecef_std, True),
(fix.velocityNED, ned_vel, np.nan*np.zeros(3), True),
(fix.velocityDevice, vel_device, vel_device_std, True),
(fix.accelerationDevice, predicted_state[States.ACCELERATION], predicted_std[States.ACCELERATION_ERR], True),
(fix.orientationECEF, orientation_ecef, orientation_ecef_std, True),
(fix.calibratedOrientationECEF, calibrated_orientation_ecef, np.nan*np.zeros(3), calibrated),
(fix.orientationNED, orientation_ned, np.nan*np.zeros(3), True),
(fix.angularVelocityDevice, predicted_state[States.ANGULAR_VELOCITY], predicted_std[States.ANGULAR_VELOCITY_ERR], True),
(fix.velocityCalibrated, vel_calib, vel_calib_std, calibrated),
(fix.angularVelocityCalibrated, ang_vel_calib, ang_vel_calib_std, calibrated),
(fix.accelerationCalibrated, acc_calib, acc_calib_std, calibrated),
]
for field, value, std, valid in measurements:
field.value = to_float(value)
field.std = to_float(std)
field.valid = valid
return fix
def liveLocationMsg(self):
fix = self.msg_from_state(self.converter, self.calib_from_device, self.H, self.kf.x, self.kf.P, self.calibrated)
old_mean, new_mean = np.mean(self.posenet_stds[:POSENET_STD_HIST//2]), np.mean(self.posenet_stds[POSENET_STD_HIST//2:])
std_spike = new_mean/old_mean > 4 and new_mean > 7
fix.posenetOK = not (std_spike and self.car_speed > 5)
fix.deviceStable = not self.device_fell
self.device_fell = False
fix.unixTimestampMillis = self.unix_timestamp_millis
if np.linalg.norm(fix.positionECEF.std) < 50 and self.calibrated:
fix.status = 'valid'
elif np.linalg.norm(fix.positionECEF.std) < 50:
fix.status = 'uncalibrated'
else:
fix.status = 'uninitialized'
return fix
def update_kalman(self, time, kind, meas, R=None):
try:
self.kf.predict_and_observe(time, kind, meas, R)
except KalmanError:
cloudlog.error("Error in predict and observe, kalman reset")
self.reset_kalman()
def handle_gps(self, current_time, log):
if log.flags % 2 == 0:
return
self.last_gps_fix = current_time
self.converter = coord.LocalCoord.from_geodetic([log.latitude, log.longitude, log.altitude])
ecef_pos = self.converter.ned2ecef([0, 0, 0])
ecef_vel = self.converter.ned2ecef(np.array(log.vNED)) - ecef_pos
ecef_pos_R = np.diag([(3*log.verticalAccuracy)**2]*3)
ecef_vel_R = np.diag([(log.speedAccuracy)**2]*3)
self.unix_timestamp_millis = log.timestamp
gps_est_error = np.sqrt((self.kf.x[0] - ecef_pos[0])**2 +
(self.kf.x[1] - ecef_pos[1])**2 +
(self.kf.x[2] - ecef_pos[2])**2)
orientation_ecef = euler_from_quat(self.kf.x[States.ECEF_ORIENTATION])
orientation_ned = ned_euler_from_ecef(ecef_pos, orientation_ecef)
orientation_ned_gps = np.array([0, 0, np.radians(log.bearingDeg)])
orientation_error = np.mod(orientation_ned - orientation_ned_gps - np.pi, 2*np.pi) - np.pi
initial_pose_ecef_quat = quat_from_euler(ecef_euler_from_ned(ecef_pos, orientation_ned_gps))
if np.linalg.norm(ecef_vel) > 5 and np.linalg.norm(orientation_error) > 1:
cloudlog.error("Locationd vs ubloxLocation orientation difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_ORIENTATION_FROM_GPS, initial_pose_ecef_quat)
elif gps_est_error > 50:
cloudlog.error("Locationd vs ubloxLocation position difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_POS, ecef_pos, R=ecef_pos_R)
self.update_kalman(current_time, ObservationKind.ECEF_VEL, ecef_vel, R=ecef_vel_R)
def handle_car_state(self, current_time, log):
self.speed_counter += 1
if self.speed_counter % SENSOR_DECIMATION == 0:
self.update_kalman(current_time, ObservationKind.ODOMETRIC_SPEED, [log.vEgo])
self.car_speed = abs(log.vEgo)
if log.vEgo == 0:
self.update_kalman(current_time, ObservationKind.NO_ROT, [0, 0, 0])
def handle_cam_odo(self, current_time, log):
self.cam_counter += 1
if self.cam_counter % VISION_DECIMATION == 0:
rot_device = self.device_from_calib.dot(log.rot)
rot_device_std = self.device_from_calib.dot(log.rotStd)
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_ROTATION,
np.concatenate([rot_device, 10*rot_device_std]))
trans_device = self.device_from_calib.dot(log.trans)
trans_device_std = self.device_from_calib.dot(log.transStd)
self.posenet_speed = np.linalg.norm(trans_device)
self.posenet_stds[:-1] = self.posenet_stds[1:]
self.posenet_stds[-1] = trans_device_std[0]
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_TRANSLATION,
np.concatenate([trans_device, 10*trans_device_std]))
def handle_sensors(self, current_time, log):
for sensor_reading in log:
sensor_time = 1e-9 * sensor_reading.timestamp
if sensor_reading.source == SensorSource.lsm6ds3:
continue
if sensor_reading.sensor == 5 and sensor_reading.type == 16:
self.gyro_counter += 1
if self.gyro_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.gyroUncalibrated.v
self.update_kalman(sensor_time, ObservationKind.PHONE_GYRO, [-v[2], -v[1], -v[0]])
if sensor_reading.sensor == 1 and sensor_reading.type == 1:
self.device_fell = self.device_fell or (np.linalg.norm(np.array(sensor_reading.acceleration.v) - np.array([10, 0, 0])) > 40)
self.acc_counter += 1
if self.acc_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.acceleration.v
self.update_kalman(sensor_time, ObservationKind.PHONE_ACCEL, [-v[2], -v[1], -v[0]])
def handle_live_calib(self, current_time, log):
if len(log.rpyCalib):
self.calib = log.rpyCalib
self.device_from_calib = rot_from_euler(self.calib)
self.calib_from_device = self.device_from_calib.T
self.calibrated = log.calStatus == 1
def reset_kalman(self, current_time=None, init_orient=None, init_pos=None):
self.filter_time = current_time
init_x = LiveKalman.initial_x.copy()
if init_orient is not None:
init_x[3:7] = init_orient
if init_pos is not None:
init_x[:3] = init_pos
self.kf.init_state(init_x, covs=np.diag(LiveKalman.initial_P_diag), filter_time=current_time)
self.observation_buffer = []
self.gyro_counter = 0
self.acc_counter = 0
self.speed_counter = 0
self.cam_counter = 0
def locationd_thread(sm, pm, disabled_logs=None):
if disabled_logs is None:
disabled_logs = []
if sm is None:
socks = ['gpsLocationExternal', 'sensorEvents', 'cameraOdometry', 'liveCalibration', 'carState']
sm = messaging.SubMaster(socks, ignore_alive=['gpsLocationExternal'])
if pm is None:
pm = messaging.PubMaster(['liveLocationKalman'])
params = Params()
localizer = Localizer(disabled_logs=disabled_logs)
while True:
sm.update()
for sock, updated in sm.updated.items():
if updated and sm.valid[sock]:
t = sm.logMonoTime[sock] * 1e-9
if sock == "sensorEvents":
localizer.handle_sensors(t, sm[sock])
elif sock == "gpsLocationExternal":
localizer.handle_gps(t, sm[sock])
elif sock == "carState":
localizer.handle_car_state(t, sm[sock])
elif sock == "cameraOdometry":
localizer.handle_cam_odo(t, sm[sock])
elif sock == "liveCalibration":
localizer.handle_live_calib(t, sm[sock])
if sm.updated['cameraOdometry']:
t = sm.logMonoTime['cameraOdometry']
msg = messaging.new_message('liveLocationKalman')
msg.logMonoTime = t
msg.liveLocationKalman = localizer.liveLocationMsg()
msg.liveLocationKalman.inputsOK = sm.all_alive_and_valid()
msg.liveLocationKalman.sensorsOK = sm.alive['sensorEvents'] and sm.valid['sensorEvents']
gps_age = (t / 1e9) - localizer.last_gps_fix
msg.liveLocationKalman.gpsOK = gps_age < 1.0
pm.send('liveLocationKalman', msg)
if sm.frame % 1200 == 0 and msg.liveLocationKalman.gpsOK:
location = {
'latitude': msg.liveLocationKalman.positionGeodetic.value[0],
'longitude': msg.liveLocationKalman.positionGeodetic.value[1],
'altitude': msg.liveLocationKalman.positionGeodetic.value[2],
}
params.put("LastGPSPosition", json.dumps(location))
def main(sm=None, pm=None):
locationd_thread(sm, pm)
if __name__ == "__main__":
import os
os.environ["OMP_NUM_THREADS"] = "1"
main()
| true | true |
f7267fe4a54f406a59df97aff7b19b741d435024 | 3,773 | py | Python | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | # Getting started with OpenStack using libcloud
# http://developer.openstack.org/firstapp-libcloud/getting_started.html
from libcloud.compute.ssh import *
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from Cloud import Cloud
from settings import *
# noinspection PyPep8Naming
class OpenStack(Cloud):
def __init__(self):
super().__init__()
openstack = get_driver(Provider.OPENSTACK)
self.driver = openstack(user,
password,
ex_tenant_name = tenant_name,
ex_force_auth_url = auth_url,
ex_force_auth_version = '2.0_password',
ex_force_service_region = service_region)
self.activeIps = []
def create(self):
print('Retrieving infrastructure information from SwitchEngines ...')
images = self.driver.list_images()
sizes = self.driver.list_sizes()
security_groups = self.driver.ex_list_security_groups()
networks = self.driver.ex_list_networks()
print('Done.')
security_group = [s for s in security_groups if s.name == 'anywhere'][0]
network = [s for s in networks if s.name == 'My network'][0]
size = [s for s in sizes if s.name == 'c1.micro'][0]
# noinspection PyPep8Naming
mongoDbIp = self.__run_instance('MongoDB', size, images, security_group, network)
restServerIP = self.__run_instance('RESTServer', size, images, security_group, network)
restClientIP = self.__run_instance('RESTClient', size, images, security_group, network)
self.__additionalOperations(restServerIP, restClientIP, mongoDbIp)
@staticmethod
def __additionalOperations(restServerIP, restClientIP, mongoDbIp):
clientSSH = ShellOutSSHClient(restServerIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyserver.py %s &' % mongoDbIp)
finally:
clientSSH.close()
clientSSH = ShellOutSSHClient(restClientIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyclient.py %s &' % mongoDbIp)
finally:
clientSSH.close()
def __run_instance(self, instancename, size, images, security_group, network):
print('Creating a new node ...')
image = [s for s in images if s.name == instancename][0]
node = self.driver.create_node(name = instancename,
size = size,
image = image,
ex_security_groups = [security_group],
ex_keyname = 'switch-engine',
networks = [network])
print('Done.')
print("Waiting for %s ..." % instancename)
self.driver.wait_until_running([node])
self.activeNodes.append(node)
nodes = self.driver.list_nodes()
instanceNode = [s for s in nodes if s.name == instancename][0]
privateIp = instanceNode.private_ips[0]
print('Instance ready.')
print('Attaching a Public IP ...')
ip = self.driver.ex_create_floating_ip()
self.activeIps.append(ip)
self.driver.ex_attach_floating_ip_to_node(node, ip)
print('Done.')
return privateIp
def destroy(self):
print('Destroying the instance on SwitchEngines ...')
for node in self.activeNodes:
node.destroy()
for ip in self.activeIps:
self.driver.ex_delete_floating_ip(ip)
print('Done.')
| 35.59434 | 95 | 0.602703 |
from libcloud.compute.ssh import *
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from Cloud import Cloud
from settings import *
class OpenStack(Cloud):
def __init__(self):
super().__init__()
openstack = get_driver(Provider.OPENSTACK)
self.driver = openstack(user,
password,
ex_tenant_name = tenant_name,
ex_force_auth_url = auth_url,
ex_force_auth_version = '2.0_password',
ex_force_service_region = service_region)
self.activeIps = []
def create(self):
print('Retrieving infrastructure information from SwitchEngines ...')
images = self.driver.list_images()
sizes = self.driver.list_sizes()
security_groups = self.driver.ex_list_security_groups()
networks = self.driver.ex_list_networks()
print('Done.')
security_group = [s for s in security_groups if s.name == 'anywhere'][0]
network = [s for s in networks if s.name == 'My network'][0]
size = [s for s in sizes if s.name == 'c1.micro'][0]
mongoDbIp = self.__run_instance('MongoDB', size, images, security_group, network)
restServerIP = self.__run_instance('RESTServer', size, images, security_group, network)
restClientIP = self.__run_instance('RESTClient', size, images, security_group, network)
self.__additionalOperations(restServerIP, restClientIP, mongoDbIp)
@staticmethod
def __additionalOperations(restServerIP, restClientIP, mongoDbIp):
clientSSH = ShellOutSSHClient(restServerIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyserver.py %s &' % mongoDbIp)
finally:
clientSSH.close()
clientSSH = ShellOutSSHClient(restClientIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyclient.py %s &' % mongoDbIp)
finally:
clientSSH.close()
def __run_instance(self, instancename, size, images, security_group, network):
print('Creating a new node ...')
image = [s for s in images if s.name == instancename][0]
node = self.driver.create_node(name = instancename,
size = size,
image = image,
ex_security_groups = [security_group],
ex_keyname = 'switch-engine',
networks = [network])
print('Done.')
print("Waiting for %s ..." % instancename)
self.driver.wait_until_running([node])
self.activeNodes.append(node)
nodes = self.driver.list_nodes()
instanceNode = [s for s in nodes if s.name == instancename][0]
privateIp = instanceNode.private_ips[0]
print('Instance ready.')
print('Attaching a Public IP ...')
ip = self.driver.ex_create_floating_ip()
self.activeIps.append(ip)
self.driver.ex_attach_floating_ip_to_node(node, ip)
print('Done.')
return privateIp
def destroy(self):
print('Destroying the instance on SwitchEngines ...')
for node in self.activeNodes:
node.destroy()
for ip in self.activeIps:
self.driver.ex_delete_floating_ip(ip)
print('Done.')
| true | true |
f726800b4ff9c7c04844b758f410a70599a4f3f6 | 967 | py | Python | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
import os
from docxtpl import DocxTemplate
class TestDocxTemplate(TestCase):
def setUp(self):
self.APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
self.FIXTURE_DIR = os.path.join(os.path.dirname(self.APP_DIR), 'fixtures')
self.FORM_DIR = os.path.join(self.APP_DIR, 'static', 'formlib')
def test_render1(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR, 'test_docx.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc.docx'))
def test_render2(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR,'do-letter-form.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc2.docx'))
| 34.535714 | 84 | 0.679421 | from unittest import TestCase, skip
import os
from docxtpl import DocxTemplate
class TestDocxTemplate(TestCase):
def setUp(self):
self.APP_DIR = os.path.abspath(os.path.dirname(__file__))
self.FIXTURE_DIR = os.path.join(os.path.dirname(self.APP_DIR), 'fixtures')
self.FORM_DIR = os.path.join(self.APP_DIR, 'static', 'formlib')
def test_render1(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR, 'test_docx.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc.docx'))
def test_render2(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR,'do-letter-form.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc2.docx'))
| true | true |
f726812fa813373e14802dbfe2defbcd7a440e6d | 4,637 | py | Python | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | 1 | 2019-12-15T13:44:29.000Z | 2019-12-15T13:44:29.000Z | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | null | null | null | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 ShipChain, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions, status
from shipchain_common.authentication import get_jwt_from_request
from apps.shipments.models import Shipment, PermissionLink
PROFILES_WALLET_URL = f'{settings.PROFILES_URL}/api/v1/wallet'
def get_user(request):
if request.user.is_authenticated:
return request.user.id, request.user.token.get('organization_id', None)
return None, None
def shipment_owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(shipment__owner_id=organization_id) | Q(shipment__owner_id=user_id) if organization_id else \
Q(shipment__owner_id=user_id)
def owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(owner_id=organization_id) | Q(owner_id=user_id) if organization_id else Q(owner_id=user_id)
def get_owner_id(request):
user_id, organization_id = get_user(request)
return organization_id if organization_id else user_id
def has_owner_access(request, obj):
user_id, organization_id = get_user(request)
return (organization_id and obj.owner_id == organization_id) or obj.owner_id == user_id
def is_carrier(request, shipment):
"""
Custom permission for carrier shipment access
"""
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.carrier_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def is_moderator(request, shipment):
"""
Custom permission for moderator shipment access
"""
if shipment.moderator_wallet_id:
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.moderator_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
return False
def is_shipper(request, shipment):
"""
Custom permission for shipper shipment access
"""
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.shipper_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def shipment_exists(shipment_id):
"""
Check whether a shipment_id included in a nested route exists.
Returns False if it isn't otherwise returns the Shipment object
"""
try:
shipment_obj = Shipment.objects.get(pk=shipment_id)
except ObjectDoesNotExist:
return False
return shipment_obj
def check_permission_link(request, shipment_obj):
permission_link_id = request.query_params.get('permission_link', None)
if permission_link_id:
try:
permission_obj = PermissionLink.objects.get(pk=permission_link_id)
except ObjectDoesNotExist:
return False
if not permission_obj.is_valid:
return check_has_shipment_owner_access(request, shipment_obj)
return shipment_obj.pk == permission_obj.shipment.pk and request.method == 'GET'
return check_has_shipment_owner_access(request, shipment_obj)
def check_has_shipment_owner_access(request, obj):
return request.user.is_authenticated and (has_owner_access(request, obj) or is_shipper(request, obj) or
is_carrier(request, obj) or is_moderator(request, obj))
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it
"""
def has_object_permission(self, request, view, obj):
# Permissions are only allowed to the owner of the shipment.
return has_owner_access(request, obj)
| 35.669231 | 116 | 0.723528 |
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions, status
from shipchain_common.authentication import get_jwt_from_request
from apps.shipments.models import Shipment, PermissionLink
PROFILES_WALLET_URL = f'{settings.PROFILES_URL}/api/v1/wallet'
def get_user(request):
if request.user.is_authenticated:
return request.user.id, request.user.token.get('organization_id', None)
return None, None
def shipment_owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(shipment__owner_id=organization_id) | Q(shipment__owner_id=user_id) if organization_id else \
Q(shipment__owner_id=user_id)
def owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(owner_id=organization_id) | Q(owner_id=user_id) if organization_id else Q(owner_id=user_id)
def get_owner_id(request):
user_id, organization_id = get_user(request)
return organization_id if organization_id else user_id
def has_owner_access(request, obj):
user_id, organization_id = get_user(request)
return (organization_id and obj.owner_id == organization_id) or obj.owner_id == user_id
def is_carrier(request, shipment):
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.carrier_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def is_moderator(request, shipment):
if shipment.moderator_wallet_id:
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.moderator_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
return False
def is_shipper(request, shipment):
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.shipper_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def shipment_exists(shipment_id):
try:
shipment_obj = Shipment.objects.get(pk=shipment_id)
except ObjectDoesNotExist:
return False
return shipment_obj
def check_permission_link(request, shipment_obj):
permission_link_id = request.query_params.get('permission_link', None)
if permission_link_id:
try:
permission_obj = PermissionLink.objects.get(pk=permission_link_id)
except ObjectDoesNotExist:
return False
if not permission_obj.is_valid:
return check_has_shipment_owner_access(request, shipment_obj)
return shipment_obj.pk == permission_obj.shipment.pk and request.method == 'GET'
return check_has_shipment_owner_access(request, shipment_obj)
def check_has_shipment_owner_access(request, obj):
return request.user.is_authenticated and (has_owner_access(request, obj) or is_shipper(request, obj) or
is_carrier(request, obj) or is_moderator(request, obj))
class IsOwner(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return has_owner_access(request, obj)
| true | true |
f7268157ef2a0bd8d69f9adfb748756be3029bea | 5,766 | py | Python | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 14 | 2017-03-28T16:32:24.000Z | 2021-03-13T23:03:57.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 8 | 2017-03-01T07:56:09.000Z | 2017-08-15T07:37:12.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 15 | 2017-02-24T01:30:23.000Z | 2021-06-27T08:46:43.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/get_asset_digest_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.enums import platform_pb2 as pogoprotos_dot_enums_dot_platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_asset_digest_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/get_asset_digest_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\x1fpogoprotos/enums/platform.proto\"\xdc\x01\n\x15GetAssetDigestMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.pogoprotos.enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\r\x12\x10\n\x08paginate\x18\x06 \x01(\x08\x12\x13\n\x0bpage_offset\x18\x07 \x01(\x05\x12\x16\n\x0epage_timestamp\x18\x08 \x01(\x04\x62\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_platform__pb2.DESCRIPTOR,])
_GETASSETDIGESTMESSAGE = _descriptor.Descriptor(
name='GetAssetDigestMessage',
full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paginate', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.paginate', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_offset', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_offset', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_timestamp', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=369,
)
_GETASSETDIGESTMESSAGE.fields_by_name['platform'].enum_type = pogoprotos_dot_enums_dot_platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['GetAssetDigestMessage'] = _GETASSETDIGESTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetDigestMessage = _reflection.GeneratedProtocolMessageType('GetAssetDigestMessage', (_message.Message,), dict(
DESCRIPTOR = _GETASSETDIGESTMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_asset_digest_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetAssetDigestMessage)
))
_sym_db.RegisterMessage(GetAssetDigestMessage)
# @@protoc_insertion_point(module_scope)
| 47.262295 | 613 | 0.772112 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from pogoprotos.enums import platform_pb2 as pogoprotos_dot_enums_dot_platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_asset_digest_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/get_asset_digest_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\x1fpogoprotos/enums/platform.proto\"\xdc\x01\n\x15GetAssetDigestMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.pogoprotos.enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\r\x12\x10\n\x08paginate\x18\x06 \x01(\x08\x12\x13\n\x0bpage_offset\x18\x07 \x01(\x05\x12\x16\n\x0epage_timestamp\x18\x08 \x01(\x04\x62\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_platform__pb2.DESCRIPTOR,])
_GETASSETDIGESTMESSAGE = _descriptor.Descriptor(
name='GetAssetDigestMessage',
full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paginate', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.paginate', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_offset', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_offset', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_timestamp', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=369,
)
_GETASSETDIGESTMESSAGE.fields_by_name['platform'].enum_type = pogoprotos_dot_enums_dot_platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['GetAssetDigestMessage'] = _GETASSETDIGESTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetDigestMessage = _reflection.GeneratedProtocolMessageType('GetAssetDigestMessage', (_message.Message,), dict(
DESCRIPTOR = _GETASSETDIGESTMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_asset_digest_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetAssetDigestMessage)
))
_sym_db.RegisterMessage(GetAssetDigestMessage)
# @@protoc_insertion_point(module_scope)
| true | true |
f726822728d9822a12dc779ca59f2edd95742b2b | 812 | py | Python | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | null | null | null | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | 5 | 2020-02-11T23:26:24.000Z | 2022-01-13T00:39:54.000Z | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | null | null | null | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
# Setup __all__
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
# Python 2 style zip() for Python 3
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
"""List zip() version."""
return list(_zip(*seqs))
__all__ += ['zip'] # HACK: using this instead of .append() to not trigger PyCharm
else:
zip = zip
| 23.882353 | 87 | 0.64532 | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
return list(_zip(*seqs))
__all__ += ['zip']
else:
zip = zip
| true | true |
f726841a3369bd4d21c176bdb301ddb10b209714 | 160 | py | Python | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 12 | 2015-04-12T06:18:33.000Z | 2021-03-03T23:54:19.000Z | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 1 | 2021-11-24T20:10:19.000Z | 2021-11-24T20:10:19.000Z | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 14 | 2015-03-23T17:52:06.000Z | 2021-11-24T11:04:15.000Z | import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
key = balanced.APIKey.fetch('/api_keys/AK3DQGzROuoRYulKXMQdHBxX')
key.delete() | 26.666667 | 65 | 0.83125 | import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
key = balanced.APIKey.fetch('/api_keys/AK3DQGzROuoRYulKXMQdHBxX')
key.delete() | true | true |
f726841edd23cffe106d88311ba375ae4ca2b996 | 7,722 | py | Python | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | 1 | 2020-03-19T13:58:33.000Z | 2020-03-19T13:58:33.000Z | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class HFT(Recommender):
"""Hidden Factors and Hidden Topics
Parameters
----------
name: string, default: 'HFT'
The name of the recommender model.
k: int, optional, default: 10
The dimension of the latent factors.
max_iter: int, optional, default: 50
Maximum number of iterations for EM.
grad_iter: int, optional, default: 50
Maximum number of iterations for L-BFGS.
lambda_text: float, default: 0.1
Weight of corpus likelihood in objective function.
l2_reg: float, default: 0.001
Regularization for user item latent factors.
vocab_size: int, optional, default: 8000
Size of vocabulary for review text.
init_params: dictionary, optional, default: None
List of initial parameters, e.g., init_params = {'alpha': alpha, 'beta_u': beta_u,
'beta_i': beta_i, 'gamma_u': gamma_u, 'gamma_v': gamma_v}
alpha: float
Model offset, optional initialization via init_params.
beta_u: ndarray. shape (n_user, 1)
User biases, optional initialization via init_params.
beta_u: ndarray. shape (n_item, 1)
Item biases, optional initialization via init_params.
gamma_u: ndarray, shape (n_users,k)
The user latent factors, optional initialization via init_params.
gamma_v: ndarray, shape (n_items,k)
The item latent factors, optional initialization via init_params.
trainable: boolean, optional, default: True
When False, the model will not be re-trained, and input of pre-trained parameters are required.
verbose: boolean, optional, default: True
When True, some running logs are displayed.
seed: int, optional, default: None
Random seed for weight initialization.
References
----------
Julian McAuley, Jure Leskovec. "Hidden Factors and Hidden Topics: Understanding Rating Dimensions with Review Text"
RecSys '13 Proceedings of the 7th ACM conference on Recommender systems Pages 165-172
"""
def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50,
lambda_text=0.1, l2_reg=0.001, vocab_size=8000,
init_params=None, trainable=True, verbose=True, seed=None):
super().__init__(name=name, trainable=trainable, verbose=verbose)
self.k = k
self.lambda_text = lambda_text
self.l2_reg = l2_reg
self.grad_iter = grad_iter
self.name = name
self.max_iter = max_iter
self.verbose = verbose
self.init_params = {} if not init_params else init_params
self.seed = seed
self.vocab_size = vocab_size
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils.init_utils import normal
self.n_item = self.train_set.num_items
self.n_user = self.train_set.num_users
self.alpha = self.init_params.get('alpha', train_set.global_mean)
self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))
self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))
self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))
self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))
if self.trainable:
self._fit_hft()
return self
@staticmethod
def _build_data(csr_mat):
index_list = []
rating_list = []
for i in range(csr_mat.shape[0]):
j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]
index_list.append(csr_mat.indices[j:k])
rating_list.append(csr_mat.data[j:k])
return index_list, rating_list
def _fit_hft(self):
from .hft import Model
from tqdm import trange
# document data
bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)
documents, _ = self._build_data(bow_mat) # bag of word feature
# Rating data
user_data = self._build_data(self.train_set.matrix)
item_data = self._build_data(self.train_set.matrix.T.tocsr())
model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,
gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,
lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)
model.init_count(docs=documents)
# training
loop = trange(self.max_iter, disable=not self.verbose)
for _ in loop:
model.assign_word_topics(docs=documents)
loss = model.update_params(rating_data=(user_data, item_data))
loop.set_postfix(loss=loss)
self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()
if self.verbose:
print('Learning completed!')
def score(self, user_idx, item_idx=None):
"""Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for that to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
"""
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(
self.gamma_u[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(
self.gamma_u[user_idx, :])
return user_pred
| 38.41791 | 119 | 0.642191 |
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class HFT(Recommender):
def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50,
lambda_text=0.1, l2_reg=0.001, vocab_size=8000,
init_params=None, trainable=True, verbose=True, seed=None):
super().__init__(name=name, trainable=trainable, verbose=verbose)
self.k = k
self.lambda_text = lambda_text
self.l2_reg = l2_reg
self.grad_iter = grad_iter
self.name = name
self.max_iter = max_iter
self.verbose = verbose
self.init_params = {} if not init_params else init_params
self.seed = seed
self.vocab_size = vocab_size
def fit(self, train_set, val_set=None):
Recommender.fit(self, train_set, val_set)
from ...utils.init_utils import normal
self.n_item = self.train_set.num_items
self.n_user = self.train_set.num_users
self.alpha = self.init_params.get('alpha', train_set.global_mean)
self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))
self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))
self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))
self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))
if self.trainable:
self._fit_hft()
return self
@staticmethod
def _build_data(csr_mat):
index_list = []
rating_list = []
for i in range(csr_mat.shape[0]):
j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]
index_list.append(csr_mat.indices[j:k])
rating_list.append(csr_mat.data[j:k])
return index_list, rating_list
def _fit_hft(self):
from .hft import Model
from tqdm import trange
bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)
documents, _ = self._build_data(bow_mat)
user_data = self._build_data(self.train_set.matrix)
item_data = self._build_data(self.train_set.matrix.T.tocsr())
model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,
gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,
lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)
model.init_count(docs=documents)
loop = trange(self.max_iter, disable=not self.verbose)
for _ in loop:
model.assign_word_topics(docs=documents)
loss = model.update_params(rating_data=(user_data, item_data))
loop.set_postfix(loss=loss)
self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()
if self.verbose:
print('Learning completed!')
def score(self, user_idx, item_idx=None):
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(
self.gamma_u[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(
self.gamma_u[user_idx, :])
return user_pred
| true | true |
f726847a942ebeed78eb3cd894dacd72c3644eb4 | 101 | py | Python | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | 1 | 2020-11-10T16:07:24.000Z | 2020-11-10T16:07:24.000Z | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | null | null | null | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | null | null | null | from web import app
app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)
| 25.25 | 79 | 0.742574 | from web import app
app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)
| true | true |
f726861fe715da3c897ba6b113c2784dd07089b9 | 572 | py | Python | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | 1 | 2020-04-18T15:30:58.000Z | 2020-04-18T15:30:58.000Z | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | 3 | 2020-04-21T17:09:06.000Z | 2020-04-28T15:50:07.000Z | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | null | null | null |
import json, os
from planutils import manifest_converter
# This should eventually be changed once the prefix is customizable
PLANUTILS_PREFIX = os.path.join(os.path.expanduser('~'), '.planutils')
SETTINGS_FILE = os.path.join(PLANUTILS_PREFIX, 'settings.json')
PAAS_SERVER = 'http://45.113.232.43:5001'
PAAS_SERVER_LIMIT = 100
def load():
with open(SETTINGS_FILE, 'r') as f:
settings = json.loads(f.read())
return settings
def save(s):
with open(SETTINGS_FILE, 'w') as f:
f.write(json.dumps(s))
manifest_converter.generate_manifest()
| 24.869565 | 70 | 0.713287 |
import json, os
from planutils import manifest_converter
PLANUTILS_PREFIX = os.path.join(os.path.expanduser('~'), '.planutils')
SETTINGS_FILE = os.path.join(PLANUTILS_PREFIX, 'settings.json')
PAAS_SERVER = 'http://45.113.232.43:5001'
PAAS_SERVER_LIMIT = 100
def load():
with open(SETTINGS_FILE, 'r') as f:
settings = json.loads(f.read())
return settings
def save(s):
with open(SETTINGS_FILE, 'w') as f:
f.write(json.dumps(s))
manifest_converter.generate_manifest()
| true | true |
f726862b7ddae2271ecd69d2a01c433b3d758f10 | 2,349 | py | Python | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops
from unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front
class TFLoaderTest(unittest.TestCase):
@staticmethod
def build_conv_graph():
nodes = {
**const('weights', np.random.randn(1, 1, 1, 1)),
**regular_op('input', {'op': 'Parameter'}),
**regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}),
**result('result'),
}
edges = [*connect_front('input', '0:conv'),
*connect_front('weights', '1:conv'),
*connect_front('conv:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_parameter_result_graph():
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**result('result'),
}
edges = [*connect_front('input', '0:result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_loop_graph(body_graph):
# create fake Loop operation
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}),
**result('result'),
}
edges = [*connect_front('input', '0:loop'),
*connect_front('loop:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
def test_convolution_main_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph()))
def test_convolution_loop_body_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph())))
def test_no_convolution_main_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph()))
def test_no_convolution_main_and_sub_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph())))
| 34.544118 | 117 | 0.613027 |
import unittest
import numpy as np
from openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops
from unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front
class TFLoaderTest(unittest.TestCase):
@staticmethod
def build_conv_graph():
nodes = {
**const('weights', np.random.randn(1, 1, 1, 1)),
**regular_op('input', {'op': 'Parameter'}),
**regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}),
**result('result'),
}
edges = [*connect_front('input', '0:conv'),
*connect_front('weights', '1:conv'),
*connect_front('conv:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_parameter_result_graph():
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**result('result'),
}
edges = [*connect_front('input', '0:result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_loop_graph(body_graph):
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}),
**result('result'),
}
edges = [*connect_front('input', '0:loop'),
*connect_front('loop:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
def test_convolution_main_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph()))
def test_convolution_loop_body_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph())))
def test_no_convolution_main_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph()))
def test_no_convolution_main_and_sub_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph())))
| true | true |
f726869b16a3c31ae1abced42c8498b5f9a71273 | 390 | py | Python | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | null | null | null | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | 1 | 2021-06-10T19:02:19.000Z | 2021-06-10T19:02:19.000Z | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | null | null | null | """
WSGI config for shorty project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shorty.settings")
application = get_wsgi_application()
| 22.941176 | 78 | 0.784615 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shorty.settings")
application = get_wsgi_application()
| true | true |
f72686bfb1c06e1c972ddd0550910b6f66064e90 | 1,361 | py | Python | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 12 | 2020-02-11T22:18:10.000Z | 2021-06-23T02:56:07.000Z | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 1 | 2019-09-26T08:03:36.000Z | 2019-09-26T08:03:36.000Z | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 18 | 2020-02-09T13:00:11.000Z | 2021-03-11T08:47:36.000Z | """Password generator allows you to generate a random password of length N."""
from random import choice
from string import ascii_letters, digits, punctuation
def password_generator(length=8):
"""
>>> len(password_generator())
8
>>> len(password_generator(length=16))
16
>>> len(password_generator(257))
257
>>> len(password_generator(length=0))
0
>>> len(password_generator(-1))
0
"""
chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation)
return "".join(choice(chars) for x in range(length))
# ALTERNATIVE METHODS
# ctbi= characters that must be in password
# i= how many letters or characters the password length will be
def alternative_password_generator(ctbi, i):
# Password generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
pass # Put your code here...
def random_number(ctbi, i):
pass # Put your code here...
def random_letters(ctbi, i):
pass # Put your code here...
def random_characters(ctbi, i):
pass # Put your code here...
def main():
length = int(input("Please indicate the max length of your password: ").strip())
print("Password generated:", password_generator(length))
print("[If you are thinking of using this passsword, You better save it.]")
if __name__ == "__main__":
main()
| 26.173077 | 84 | 0.68626 | from random import choice
from string import ascii_letters, digits, punctuation
def password_generator(length=8):
chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation)
return "".join(choice(chars) for x in range(length))
def alternative_password_generator(ctbi, i):
pass
def random_number(ctbi, i):
pass
def random_letters(ctbi, i):
pass
def random_characters(ctbi, i):
pass
def main():
length = int(input("Please indicate the max length of your password: ").strip())
print("Password generated:", password_generator(length))
print("[If you are thinking of using this passsword, You better save it.]")
if __name__ == "__main__":
main()
| true | true |
f726874106f79a0d94063c9a43eae7fc697a4f84 | 2,283 | py | Python | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 27 | 2021-07-19T09:03:34.000Z | 2022-03-31T06:19:23.000Z | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 4 | 2021-07-15T12:17:10.000Z | 2022-01-26T17:16:58.000Z | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 17 | 2020-05-12T20:24:50.000Z | 2021-07-11T03:31:38.000Z | from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \
CategoricalHyperparameter
from mindware.components.feature_engineering.transformations.base_transformer import *
class QuantileTransformation(Transformer):
type = 5
def __init__(self, n_quantiles=1000, output_distribution='uniform', random_state=1):
super().__init__("quantile_transformer")
self.input_type = [NUMERICAL, DISCRETE]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
self.output_distribution = output_distribution
self.n_quantiles = n_quantiles
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
from mindware.components.feature_engineering.transformations.utils import QuantileTransformer
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
self.model = QuantileTransformer(output_distribution=self.output_distribution,
n_quantiles=self.n_quantiles, copy=False,
random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
# TODO parametrize like the Random Forest as n_quantiles = n_features^param
n_quantiles = UniformIntegerHyperparameter(
'n_quantiles', lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
'output_distribution', ['uniform', 'normal'], default_value="uniform"
)
cs.add_hyperparameters([n_quantiles, output_distribution])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_quantiles': hp.randint('quantile_n_quantiles', 1990) + 10,
'output_distribution': hp.choice('quantile_output_distribution', ['uniform', 'normal'])}
return space
| 40.767857 | 109 | 0.65922 | from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \
CategoricalHyperparameter
from mindware.components.feature_engineering.transformations.base_transformer import *
class QuantileTransformation(Transformer):
type = 5
def __init__(self, n_quantiles=1000, output_distribution='uniform', random_state=1):
super().__init__("quantile_transformer")
self.input_type = [NUMERICAL, DISCRETE]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
self.output_distribution = output_distribution
self.n_quantiles = n_quantiles
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
from mindware.components.feature_engineering.transformations.utils import QuantileTransformer
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
self.model = QuantileTransformer(output_distribution=self.output_distribution,
n_quantiles=self.n_quantiles, copy=False,
random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
n_quantiles = UniformIntegerHyperparameter(
'n_quantiles', lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
'output_distribution', ['uniform', 'normal'], default_value="uniform"
)
cs.add_hyperparameters([n_quantiles, output_distribution])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_quantiles': hp.randint('quantile_n_quantiles', 1990) + 10,
'output_distribution': hp.choice('quantile_output_distribution', ['uniform', 'normal'])}
return space
| true | true |
f726877422782de259695a0ce16eb5bc2f697d80 | 360 | py | Python | resource_path.py | UAlbanyArchives/EADMachine-2.0 | 18e155f76374b295c287d32c6e54ef8ecabe29d2 | [
"MIT"
] | 5 | 2016-01-25T15:27:12.000Z | 2021-08-17T22:31:48.000Z | source/GUI/resource_path.py | gwiedeman/eadmachine | f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4 | [
"Unlicense"
] | null | null | null | source/GUI/resource_path.py | gwiedeman/eadmachine | f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4 | [
"Unlicense"
] | null | null | null | import os
import sys
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path) | 30 | 76 | 0.691667 | import os
import sys
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path) | true | true |
f72687e80bced94fac9dfc6fc885ba3a3a16f55e | 1,067 | py | Python | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import GetRuleInput, GetRuleOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.helper import clean
from threatstack.errors import ThreatStackAPIError, ThreatStackClientError, APIRateLimitError
from insightconnect_plugin_runtime.exceptions import PluginException
class GetRule(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_rule",
description=Component.DESCRIPTION,
input=GetRuleInput(),
output=GetRuleOutput(),
)
def run(self, params={}):
rule_id, ruleset_id = params.get(Input.RULE_ID), params.get(Input.RULESET_ID)
try:
rule = clean(self.connection.client.rulesets.rules(ruleset_id=ruleset_id, rule_id=rule_id))
except (ThreatStackAPIError, ThreatStackClientError, APIRateLimitError) as e:
raise PluginException(cause="An error occurred!", assistance=e)
return {Output.RULE: rule}
| 38.107143 | 103 | 0.730084 | import insightconnect_plugin_runtime
from .schema import GetRuleInput, GetRuleOutput, Input, Output, Component
from insightconnect_plugin_runtime.helper import clean
from threatstack.errors import ThreatStackAPIError, ThreatStackClientError, APIRateLimitError
from insightconnect_plugin_runtime.exceptions import PluginException
class GetRule(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_rule",
description=Component.DESCRIPTION,
input=GetRuleInput(),
output=GetRuleOutput(),
)
def run(self, params={}):
rule_id, ruleset_id = params.get(Input.RULE_ID), params.get(Input.RULESET_ID)
try:
rule = clean(self.connection.client.rulesets.rules(ruleset_id=ruleset_id, rule_id=rule_id))
except (ThreatStackAPIError, ThreatStackClientError, APIRateLimitError) as e:
raise PluginException(cause="An error occurred!", assistance=e)
return {Output.RULE: rule}
| true | true |
f72688f4329669467a3225780b13734675bc50e3 | 383 | py | Python | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | """
geonames/exceptions
~~~~~~~~~~~~~~~~~~~
"""
from . import base
def ignore_foreign_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'FOREIGN KEY constraint failed' in str(exception)
def ignore_unique_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'UNIQUE constraint failed' in str(exception)
| 27.357143 | 93 | 0.697128 | from . import base
def ignore_foreign_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'FOREIGN KEY constraint failed' in str(exception)
def ignore_unique_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'UNIQUE constraint failed' in str(exception)
| true | true |
f7268900a2f97b94ecc6e2a71419685ab31bc7d7 | 131,714 | py | Python | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from rpython.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status
class Parser(object):
def NAME(self):
return self._NAME().result
def _NAME(self):
_key = self._pos
_status = self._dict_NAME.get(_key, None)
if _status is None:
_status = self._dict_NAME[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1074651696()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def SPACE(self):
return self._SPACE().result
def _SPACE(self):
_key = self._pos
_status = self._dict_SPACE.get(_key, None)
if _status is None:
_status = self._dict_SPACE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__(' ')
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def COMMENT(self):
return self._COMMENT().result
def _COMMENT(self):
_key = self._pos
_status = self._dict_COMMENT.get(_key, None)
if _status is None:
_status = self._dict_COMMENT[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex528667127()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def IGNORE(self):
return self._IGNORE().result
def _IGNORE(self):
_key = self._pos
_status = self._dict_IGNORE.get(_key, None)
if _status is None:
_status = self._dict_IGNORE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1979538501()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def newline(self):
return self._newline().result
def _newline(self):
_key = self._pos
_status = self._dict_newline.get(_key, None)
if _status is None:
_status = self._dict_newline[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._COMMENT()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_result = self._regex299149370()
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._newline()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def REGEX(self):
return self._REGEX().result
def _REGEX(self):
_key = self._pos
_status = self._dict_REGEX.get(_key, None)
if _status is None:
_status = self._dict_REGEX[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1006631623()
r = _result
_result = (Symbol('REGEX', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTE(self):
return self._QUOTE().result
def _QUOTE(self):
_key = self._pos
_status = self._dict_QUOTE.get(_key, None)
if _status is None:
_status = self._dict_QUOTE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1124192327()
r = _result
_result = (Symbol('QUOTE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def PYTHONCODE(self):
return self._PYTHONCODE().result
def _PYTHONCODE(self):
_key = self._pos
_status = self._dict_PYTHONCODE.get(_key, None)
if _status is None:
_status = self._dict_PYTHONCODE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex291086639()
r = _result
_result = (Symbol('PYTHONCODE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def file(self):
return self._file().result
def _file(self):
_key = self._pos
_status = self._dict_file.get(_key, None)
if _status is None:
_status = self._dict_file[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._list()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._file()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def list(self):
return self._list().result
def _list(self):
_key = self._pos
_status = self._dict_list.get(_key, None)
if _status is None:
_status = self._dict_list[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._production()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._production()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
content = _result
_result = (Nonterminal('list', content))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._list()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def production(self):
return self._production().result
def _production(self):
_key = self._pos
_status = self._dict_production.get(_key, None)
if _status is None:
_status = self._dict_production[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._productionargs()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_result = self.__chars__(':')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_result = self.__chars__(';')
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = (Nonterminal('production', [name, args, what]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._production()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def productionargs(self):
return self._productionargs().result
def _productionargs(self):
_key = self._pos
_status = self._dict_productionargs.get(_key, None)
if _status is None:
_status = self._dict_productionargs[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
arg = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(')')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
_result = (Nonterminal('productionargs', []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._productionargs()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def or_(self):
return self._or_().result
def _or_(self):
_key = self._pos
_status = self._dict_or_.get(_key, None)
if _status is None:
_status = self._dict_or_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_all1 = []
_call_status = self._commands()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__('|')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
_all1.append(_result)
while 1:
_choice5 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_result = self.__chars__('|')
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all1
l = _result
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = (Nonterminal('or', l + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._or_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commands(self):
return self._commands().result
def _commands(self):
_key = self._pos
_status = self._dict_commands.get(_key, None)
if _status is None:
_status = self._dict_commands[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = _call_status.error
cmd = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1 = []
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
_all1.append(_result)
while 1:
_choice3 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard4 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all1
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commands()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def command(self):
return self._command().result
def _command(self):
_key = self._pos
_status = self._dict_command.get(_key, None)
if _status is None:
_status = self._dict_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def simplecommand(self):
return self._simplecommand().result
def _simplecommand(self):
_key = self._pos
_status = self._dict_simplecommand.get(_key, None)
if _status is None:
_status = self._dict_simplecommand[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._return_()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._if_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._named_command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._choose()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._simplecommand()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def return_(self):
return self._return_().result
def _return_(self):
_key = self._pos
_status = self._dict_return_.get(_key, None)
if _status is None:
_status = self._dict_return_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('return')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
code = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = (Nonterminal('return', [code]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._return_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def if_(self):
return self._if_().result
def _if_(self):
_key = self._pos
_status = self._dict_if_.get(_key, None)
if _status is None:
_status = self._dict_if_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('do')
_call_status = self._newline()
_result = _call_status.result
_error = _call_status.error
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('if')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('if')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
_result = self.__chars__('if')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all14 = []
while 1:
_choice15 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
_result = _all14
_result = (Nonterminal('if', [condition]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._if_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def choose(self):
return self._choose().result
def _choose(self):
_key = self._pos
_status = self._dict_choose.get(_key, None)
if _status is None:
_status = self._dict_choose[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('choose')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
name = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = self.__chars__('in')
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
expr = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmds = _result
_result = (Nonterminal('choose', [name, expr, cmds]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._choose()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commandchain(self):
return self._commandchain().result
def _commandchain(self):
_key = self._pos
_status = self._dict_commandchain.get(_key, None)
if _status is None:
_status = self._dict_commandchain[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._simplecommand()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
result = _result
_result = (Nonterminal('commands', result))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commandchain()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def named_command(self):
return self._named_command().result
def _named_command(self):
_key = self._pos
_status = self._dict_named_command.get(_key, None)
if _status is None:
_status = self._dict_named_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = self.__chars__('=')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_result = (Nonterminal('named_command', [name, cmd]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._named_command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = _call_status.error
what = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('?')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
while 1:
_choice8 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
while 1:
_choice14 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = (Nonterminal('repetition', [repetition, what]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def negation(self):
return self._negation().result
def _negation(self):
_key = self._pos
_status = self._dict_negation.get(_key, None)
if _status is None:
_status = self._dict_negation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('!')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._negation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def enclosed(self):
return self._enclosed().result
def _enclosed(self):
_key = self._pos
_status = self._dict_enclosed.get(_key, None)
if _status is None:
_status = self._dict_enclosed[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('<')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = self.__chars__('>')
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('[')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(']')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
try:
_before_discard15 = _result
_result = self.__chars__('(')
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = _before_discard15
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard18 = _result
_result = self.__chars__(')')
_all19 = []
while 1:
_choice20 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._enclosed()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._call()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._REGEX()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
try:
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard9 = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = _before_discard9
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def call(self):
return self._call().result
def _call(self):
_key = self._pos
_status = self._dict_call.get(_key, None)
if _status is None:
_status = self._dict_call[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
x = _result
_call_status = self._arguments()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = (Nonterminal("call", [x, args]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._call()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def arguments(self):
return self._arguments().result
def _arguments(self):
_key = self._pos
_status = self._dict_arguments.get(_key, None)
if _status is None:
_status = self._dict_arguments[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = self.__chars__(')')
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
_result = (Nonterminal("args", []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._arguments()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_NAME = {}
self._dict_SPACE = {}
self._dict_COMMENT = {}
self._dict_IGNORE = {}
self._dict_newline = {}
self._dict_REGEX = {}
self._dict_QUOTE = {}
self._dict_PYTHONCODE = {}
self._dict_EOF = {}
self._dict_file = {}
self._dict_list = {}
self._dict_production = {}
self._dict_productionargs = {}
self._dict_or_ = {}
self._dict_commands = {}
self._dict_command = {}
self._dict_simplecommand = {}
self._dict_return_ = {}
self._dict_if_ = {}
self._dict_choose = {}
self._dict_commandchain = {}
self._dict_named_command = {}
self._dict_repetition = {}
self._dict_negation = {}
self._dict_enclosed = {}
self._dict_primary = {}
self._dict_call = {}
self._dict_arguments = {}
self._pos = 0
self._inputstream = inputstream
def _regex299149370(self):
_choice13 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_299149370(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice13
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1006631623(self):
_choice14 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1006631623(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice14
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex528667127(self):
_choice15 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_528667127(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice15
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex291086639(self):
_choice16 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_291086639(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice16
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1074651696(self):
_choice17 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1074651696(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice17
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1124192327(self):
_choice18 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1124192327(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice18
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1979538501(self):
_choice19 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1979538501(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice19
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_299149370(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return i
if char == '\n':
state = 1
elif char == ' ':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 1
continue
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1006631623(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '`':
state = 3
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if '\x00' <= char <= '\xff':
state = 3
else:
break
if state == 3:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 3
return ~i
if char == '`':
state = 1
elif char == '\\':
state = 2
continue
elif ']' <= char <= '_':
state = 3
continue
elif '\x00' <= char <= '[':
state = 3
continue
elif 'a' <= char <= '\xff':
state = 3
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_528667127(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == ' ':
state = 0
continue
elif char == '#':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == ' ':
state = 0
continue
elif char == '#':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_291086639(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '{':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '}':
state = 1
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '|':
state = 2
continue
elif '~' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1074651696(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '_':
state = 1
elif 'A' <= char <= 'Z':
state = 1
elif 'a' <= char <= 'z':
state = 1
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '_':
state = 1
continue
elif '0' <= char <= '9':
state = 1
continue
elif 'A' <= char <= 'Z':
state = 1
continue
elif 'a' <= char <= 'z':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1124192327(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == "'":
state = 1
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '&':
state = 1
continue
elif '(' <= char <= '\xff':
state = 1
continue
elif char == "'":
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1979538501(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '#':
state = 1
elif char == ' ':
state = 2
elif char == '\t':
state = 2
elif char == '\n':
state = 2
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.items():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
| 42.014035 | 93 | 0.466374 |
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from rpython.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status
class Parser(object):
def NAME(self):
return self._NAME().result
def _NAME(self):
_key = self._pos
_status = self._dict_NAME.get(_key, None)
if _status is None:
_status = self._dict_NAME[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1074651696()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def SPACE(self):
return self._SPACE().result
def _SPACE(self):
_key = self._pos
_status = self._dict_SPACE.get(_key, None)
if _status is None:
_status = self._dict_SPACE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__(' ')
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def COMMENT(self):
return self._COMMENT().result
def _COMMENT(self):
_key = self._pos
_status = self._dict_COMMENT.get(_key, None)
if _status is None:
_status = self._dict_COMMENT[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex528667127()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def IGNORE(self):
return self._IGNORE().result
def _IGNORE(self):
_key = self._pos
_status = self._dict_IGNORE.get(_key, None)
if _status is None:
_status = self._dict_IGNORE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1979538501()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def newline(self):
return self._newline().result
def _newline(self):
_key = self._pos
_status = self._dict_newline.get(_key, None)
if _status is None:
_status = self._dict_newline[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._COMMENT()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_result = self._regex299149370()
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._newline()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def REGEX(self):
return self._REGEX().result
def _REGEX(self):
_key = self._pos
_status = self._dict_REGEX.get(_key, None)
if _status is None:
_status = self._dict_REGEX[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1006631623()
r = _result
_result = (Symbol('REGEX', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTE(self):
return self._QUOTE().result
def _QUOTE(self):
_key = self._pos
_status = self._dict_QUOTE.get(_key, None)
if _status is None:
_status = self._dict_QUOTE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1124192327()
r = _result
_result = (Symbol('QUOTE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def PYTHONCODE(self):
return self._PYTHONCODE().result
def _PYTHONCODE(self):
_key = self._pos
_status = self._dict_PYTHONCODE.get(_key, None)
if _status is None:
_status = self._dict_PYTHONCODE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex291086639()
r = _result
_result = (Symbol('PYTHONCODE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def file(self):
return self._file().result
def _file(self):
_key = self._pos
_status = self._dict_file.get(_key, None)
if _status is None:
_status = self._dict_file[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._list()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._file()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def list(self):
return self._list().result
def _list(self):
_key = self._pos
_status = self._dict_list.get(_key, None)
if _status is None:
_status = self._dict_list[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._production()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._production()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
content = _result
_result = (Nonterminal('list', content))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._list()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def production(self):
return self._production().result
def _production(self):
_key = self._pos
_status = self._dict_production.get(_key, None)
if _status is None:
_status = self._dict_production[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._productionargs()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_result = self.__chars__(':')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_result = self.__chars__(';')
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = (Nonterminal('production', [name, args, what]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._production()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def productionargs(self):
return self._productionargs().result
def _productionargs(self):
_key = self._pos
_status = self._dict_productionargs.get(_key, None)
if _status is None:
_status = self._dict_productionargs[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
arg = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(')')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
_result = (Nonterminal('productionargs', []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._productionargs()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def or_(self):
return self._or_().result
def _or_(self):
_key = self._pos
_status = self._dict_or_.get(_key, None)
if _status is None:
_status = self._dict_or_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_all1 = []
_call_status = self._commands()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__('|')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
_all1.append(_result)
while 1:
_choice5 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_result = self.__chars__('|')
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all1
l = _result
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = (Nonterminal('or', l + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._or_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commands(self):
return self._commands().result
def _commands(self):
_key = self._pos
_status = self._dict_commands.get(_key, None)
if _status is None:
_status = self._dict_commands[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = _call_status.error
cmd = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1 = []
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
_all1.append(_result)
while 1:
_choice3 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard4 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all1
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commands()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def command(self):
return self._command().result
def _command(self):
_key = self._pos
_status = self._dict_command.get(_key, None)
if _status is None:
_status = self._dict_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def simplecommand(self):
return self._simplecommand().result
def _simplecommand(self):
_key = self._pos
_status = self._dict_simplecommand.get(_key, None)
if _status is None:
_status = self._dict_simplecommand[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._return_()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._if_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._named_command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._choose()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._simplecommand()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def return_(self):
return self._return_().result
def _return_(self):
_key = self._pos
_status = self._dict_return_.get(_key, None)
if _status is None:
_status = self._dict_return_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('return')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
code = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = (Nonterminal('return', [code]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._return_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def if_(self):
return self._if_().result
def _if_(self):
_key = self._pos
_status = self._dict_if_.get(_key, None)
if _status is None:
_status = self._dict_if_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('do')
_call_status = self._newline()
_result = _call_status.result
_error = _call_status.error
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('if')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('if')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
_result = self.__chars__('if')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all14 = []
while 1:
_choice15 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
_result = _all14
_result = (Nonterminal('if', [condition]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._if_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def choose(self):
return self._choose().result
def _choose(self):
_key = self._pos
_status = self._dict_choose.get(_key, None)
if _status is None:
_status = self._dict_choose[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('choose')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
name = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = self.__chars__('in')
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
expr = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmds = _result
_result = (Nonterminal('choose', [name, expr, cmds]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._choose()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commandchain(self):
return self._commandchain().result
def _commandchain(self):
_key = self._pos
_status = self._dict_commandchain.get(_key, None)
if _status is None:
_status = self._dict_commandchain[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._simplecommand()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
result = _result
_result = (Nonterminal('commands', result))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commandchain()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def named_command(self):
return self._named_command().result
def _named_command(self):
_key = self._pos
_status = self._dict_named_command.get(_key, None)
if _status is None:
_status = self._dict_named_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = self.__chars__('=')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_result = (Nonterminal('named_command', [name, cmd]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._named_command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = _call_status.error
what = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('?')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
while 1:
_choice8 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
while 1:
_choice14 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = (Nonterminal('repetition', [repetition, what]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def negation(self):
return self._negation().result
def _negation(self):
_key = self._pos
_status = self._dict_negation.get(_key, None)
if _status is None:
_status = self._dict_negation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('!')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._negation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def enclosed(self):
return self._enclosed().result
def _enclosed(self):
_key = self._pos
_status = self._dict_enclosed.get(_key, None)
if _status is None:
_status = self._dict_enclosed[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('<')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = self.__chars__('>')
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('[')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(']')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
try:
_before_discard15 = _result
_result = self.__chars__('(')
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = _before_discard15
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard18 = _result
_result = self.__chars__(')')
_all19 = []
while 1:
_choice20 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._enclosed()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._call()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._REGEX()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
try:
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard9 = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = _before_discard9
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def call(self):
return self._call().result
def _call(self):
_key = self._pos
_status = self._dict_call.get(_key, None)
if _status is None:
_status = self._dict_call[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
x = _result
_call_status = self._arguments()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = (Nonterminal("call", [x, args]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._call()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def arguments(self):
return self._arguments().result
def _arguments(self):
_key = self._pos
_status = self._dict_arguments.get(_key, None)
if _status is None:
_status = self._dict_arguments[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = self.__chars__(')')
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
_result = (Nonterminal("args", []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._arguments()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_NAME = {}
self._dict_SPACE = {}
self._dict_COMMENT = {}
self._dict_IGNORE = {}
self._dict_newline = {}
self._dict_REGEX = {}
self._dict_QUOTE = {}
self._dict_PYTHONCODE = {}
self._dict_EOF = {}
self._dict_file = {}
self._dict_list = {}
self._dict_production = {}
self._dict_productionargs = {}
self._dict_or_ = {}
self._dict_commands = {}
self._dict_command = {}
self._dict_simplecommand = {}
self._dict_return_ = {}
self._dict_if_ = {}
self._dict_choose = {}
self._dict_commandchain = {}
self._dict_named_command = {}
self._dict_repetition = {}
self._dict_negation = {}
self._dict_enclosed = {}
self._dict_primary = {}
self._dict_call = {}
self._dict_arguments = {}
self._pos = 0
self._inputstream = inputstream
def _regex299149370(self):
_choice13 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_299149370(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice13
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1006631623(self):
_choice14 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1006631623(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice14
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex528667127(self):
_choice15 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_528667127(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice15
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex291086639(self):
_choice16 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_291086639(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice16
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1074651696(self):
_choice17 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1074651696(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice17
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1124192327(self):
_choice18 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1124192327(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice18
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1979538501(self):
_choice19 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1979538501(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice19
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_299149370(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return i
if char == '\n':
state = 1
elif char == ' ':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 1
continue
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1006631623(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '`':
state = 3
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if '\x00' <= char <= '\xff':
state = 3
else:
break
if state == 3:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 3
return ~i
if char == '`':
state = 1
elif char == '\\':
state = 2
continue
elif ']' <= char <= '_':
state = 3
continue
elif '\x00' <= char <= '[':
state = 3
continue
elif 'a' <= char <= '\xff':
state = 3
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_528667127(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == ' ':
state = 0
continue
elif char == '
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == ' ':
state = 0
continue
elif char == '
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_291086639(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '{':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '}':
state = 1
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '|':
state = 2
continue
elif '~' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1074651696(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '_':
state = 1
elif 'A' <= char <= 'Z':
state = 1
elif 'a' <= char <= 'z':
state = 1
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '_':
state = 1
continue
elif '0' <= char <= '9':
state = 1
continue
elif 'A' <= char <= 'Z':
state = 1
continue
elif 'a' <= char <= 'z':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1124192327(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == "'":
state = 1
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '&':
state = 1
continue
elif '(' <= char <= '\xff':
state = 1
continue
elif char == "'":
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1979538501(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '
state = 1
elif char == ' ':
state = 2
elif char == '\t':
state = 2
elif char == '\n':
state = 2
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.items():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
| true | true |
f7268b17e5afdf9edaac16ec22aa1865bf00ab9e | 6,337 | py | Python | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 26 | 2018-10-30T12:47:58.000Z | 2022-03-29T08:39:00.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi import seedGeneratorFromRegionHitsEDProducer
CommonClusterCheckPSet = seedGeneratorFromRegionHitsEDProducer.ClusterCheckPSet
photonConvTrajSeedFromSingleLeg = cms.EDProducer("PhotonConversionTrajectorySeedProducerFromSingleLeg",
TrackRefitter = cms.InputTag('TrackRefitter',''),
primaryVerticesTag = cms.InputTag("offlinePrimaryVertices"),
beamSpotInputTag = cms.InputTag("offlineBeamSpot"),
newSeedCandidates = cms.string("convSeedCandidates"),
xcheckSeedCandidates = cms.string("xcheckSeedCandidates"),
vtxMinDoF = cms.double(4),
maxDZSigmas = cms.double(10.),
maxNumSelVtx = cms.uint32(2),
applyTkVtxConstraint = cms.bool(True),
DoxcheckSeedCandidates = cms.bool(False),
OrderedHitsFactoryPSet = cms.PSet(
maxHitPairsPerTrackAndGenerator = cms.uint32(10),
maxElement = cms.uint32(40000),
SeedingLayers = cms.InputTag('convLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
ClusterCheckPSet = CommonClusterCheckPSet,
RegionFactoryPSet = cms.PSet(
RegionPSet = cms.PSet( precise = cms.bool(True),
beamSpot = cms.InputTag("offlineBeamSpot"),
originRadius = cms.double(3.0),
ptMin = cms.double(0.2),
originHalfLength = cms.double(12.0)
),
ComponentName = cms.string('GlobalRegionProducerFromBeamSpot')
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string('SeedForPhotonConversion1Leg'),
SeedMomentumForBOFF = cms.double(5.0),
propagator = cms.string('PropagatorWithMaterial'),
TTRHBuilder = cms.string('WithTrackAngle')
)
)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(photonConvTrajSeedFromSingleLeg,
OrderedHitsFactoryPSet = dict(maxElement = 10000),
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 150000,
MaxNumberOfPixelClusters = 20000,
cut = "strip < 150000 && pixel < 20000 && (strip < 20000 + 7* pixel)"
)
)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 1000000,
MaxNumberOfPixelClusters = 100000,
cut = None
),
OrderedHitsFactoryPSet = dict(maxElement = 100000),
RegionFactoryPSet = dict(RegionPSet = dict(ptMin = 0.3)),
)
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
peripheralPbPb.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(cut = "strip < 400000 && pixel < 40000 && (strip < 60000 + 7.0*pixel) && (pixel < 8000 + 0.14*strip)")
)
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(MaxNumberOfPixelClusters = 100000,
cut = "strip < 1000000 && pixel < 100000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + strip/2.)"
),
OrderedHitsFactoryPSet = dict(maxElement = 100000)
)
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
RegionFactoryPSet = dict(ComponentName = 'GlobalTrackingRegionWithVerticesProducer',
RegionPSet = _globalTrackingRegionWithVertices.RegionPSet.clone(
originRadius = 0,
originRScaling4BigEvts = True,
minOriginR = 0,
scalingStartNPix = 0,
scalingEndNPix = 1#essentially turn off immediately
),
)
)
| 70.411111 | 153 | 0.453369 | import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi import seedGeneratorFromRegionHitsEDProducer
CommonClusterCheckPSet = seedGeneratorFromRegionHitsEDProducer.ClusterCheckPSet
photonConvTrajSeedFromSingleLeg = cms.EDProducer("PhotonConversionTrajectorySeedProducerFromSingleLeg",
TrackRefitter = cms.InputTag('TrackRefitter',''),
primaryVerticesTag = cms.InputTag("offlinePrimaryVertices"),
beamSpotInputTag = cms.InputTag("offlineBeamSpot"),
newSeedCandidates = cms.string("convSeedCandidates"),
xcheckSeedCandidates = cms.string("xcheckSeedCandidates"),
vtxMinDoF = cms.double(4),
maxDZSigmas = cms.double(10.),
maxNumSelVtx = cms.uint32(2),
applyTkVtxConstraint = cms.bool(True),
DoxcheckSeedCandidates = cms.bool(False),
OrderedHitsFactoryPSet = cms.PSet(
maxHitPairsPerTrackAndGenerator = cms.uint32(10),
maxElement = cms.uint32(40000),
SeedingLayers = cms.InputTag('convLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
ClusterCheckPSet = CommonClusterCheckPSet,
RegionFactoryPSet = cms.PSet(
RegionPSet = cms.PSet( precise = cms.bool(True),
beamSpot = cms.InputTag("offlineBeamSpot"),
originRadius = cms.double(3.0),
ptMin = cms.double(0.2),
originHalfLength = cms.double(12.0)
),
ComponentName = cms.string('GlobalRegionProducerFromBeamSpot')
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string('SeedForPhotonConversion1Leg'),
SeedMomentumForBOFF = cms.double(5.0),
propagator = cms.string('PropagatorWithMaterial'),
TTRHBuilder = cms.string('WithTrackAngle')
)
)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(photonConvTrajSeedFromSingleLeg,
OrderedHitsFactoryPSet = dict(maxElement = 10000),
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 150000,
MaxNumberOfPixelClusters = 20000,
cut = "strip < 150000 && pixel < 20000 && (strip < 20000 + 7* pixel)"
)
)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 1000000,
MaxNumberOfPixelClusters = 100000,
cut = None
),
OrderedHitsFactoryPSet = dict(maxElement = 100000),
RegionFactoryPSet = dict(RegionPSet = dict(ptMin = 0.3)),
)
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
peripheralPbPb.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(cut = "strip < 400000 && pixel < 40000 && (strip < 60000 + 7.0*pixel) && (pixel < 8000 + 0.14*strip)")
)
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(MaxNumberOfPixelClusters = 100000,
cut = "strip < 1000000 && pixel < 100000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + strip/2.)"
),
OrderedHitsFactoryPSet = dict(maxElement = 100000)
)
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
RegionFactoryPSet = dict(ComponentName = 'GlobalTrackingRegionWithVerticesProducer',
RegionPSet = _globalTrackingRegionWithVertices.RegionPSet.clone(
originRadius = 0,
originRScaling4BigEvts = True,
minOriginR = 0,
scalingStartNPix = 0,
scalingEndNPix = 1
),
)
)
| true | true |
f7268b2e949d82a8bd564b36aff123357d5bc3a1 | 9,689 | py | Python | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as fd
import pandas as pd
from LocalModelCommunication import LocalModelCommunication
from APP import APP
class GUI(object):
def __init__(self):
# overall
self.tabControl = None
self.tab_step1 = None
self.tab_step2 = None
self.tab_step3 = None
self.tab_step4 = None
self.dataframe = None
self.img_wait = PhotoImage(file='test.GIF')
# 1 step
self.fname = None
self.data = None
self.features = None
self.import_lable = None
self.import_label_text = StringVar()
self.import_label_text.set(' ')
# 2 step
self.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',
'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']
self.required_ordered = []
i = 0
for item in self.required:
self.required_ordered.append(str(i) + ': ' + item)
i = i + 1
self.leftbox = StringVar()
self.rightbox = StringVar()
self.rrightbox = StringVar()
self.list_left = None
self.list_right = None
self.list_rright = None
# 3 step
self.model_label = None
self.model_label_text = StringVar()
self.model_label_text.set('Waiting for model training...')
self.img_gif = PhotoImage(file='img.GIF')
# 4 step
self.connect_label = None
self.connect_label_text = StringVar()
self.connect_label_text.set('Waiting for central server response...')
# 5 step
# help functions
def add_tab(self, tabControl, tab_name):
tab = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab, text=tab_name)
return tab
# Callback functions
## step 1
def get_csv(self): # open file system
self.fname = fd.askopenfilename(filetypes=[(".csv file", ".csv")])
self.data = pd.read_csv(self.fname, delimiter=',')
self.features = self.data.columns
self.import_label_text.set('Import data from: ' + self.fname + '\n' + str(self.features))
self.import_lable.pack(side=TOP)
def go_next_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step1)
## step 2
def move_to_right(self):
self.list_right.insert(END,
str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))
self.list_left.delete(self.list_left.curselection())
def move_to_left(self):
content = self.list_right.get(self.list_right.curselection())
contents = content.split(': ')
self.list_left.insert(END, contents[1])
self.list_right.delete(self.list_right.curselection())
def add_nan(self):
self.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')
def go_next_step3(self):
# prepare dataframe for localmodel
columns = []
contents = self.rightbox.get()
contents = contents.replace('(', '')
contents = contents.replace(')', '')
contents = contents.replace("'", '')
item_list = contents.split(', ')
for item in item_list:
content = item.split(': ')[1]
if content != 'NAN':
columns.append(content)
self.dataframe = self.data[columns]
print(self.dataframe.head(2))
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step2)
def go_back_step1(self):
self.tab_step1 = self.add_tab(self.tabControl, "Step 1: Import Data")
# render tab1
self.tab_import(self.tab_step1, self.tabControl)
self.tabControl.select(self.tab_step1)
self.tabControl.forget(self.tab_step2)
## step 3
def go_next_step4(self):
self.tab_step4 = self.add_tab(self.tabControl, "Step 4: Connect to Central Server")
# render tab4
self.tab_connect(self.tab_step4)
self.tabControl.select(self.tab_step4)
self.tabControl.forget(self.tab_step3)
def go_back_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
# render tab2
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step3)
## step 4
def go_next_step5(self):
self.tab_step5 = self.add_tab(self.tabControl, "Step 5: Wait for Prediction Call")
# render tab5
self.tab_wait(self.tab_step5)
self.tabControl.select(self.tab_step5)
self.tabControl.forget(self.tab_step4)
def go_back_step3(self):
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step4)
## step 5
# frames
def tab_import(self, root, tabControl):
"""
Load local data (csv file)
"""
self.tabControl = tabControl
self.tab_step1 = root
frame = Frame(root)
frame.pack(side=TOP)
Button(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)
label_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')
label_frame.pack(side=TOP)
self.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)
self.import_lable.pack(side=TOP)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)
def tab_match(self, root):
"""
Feature matching
"""
self.leftbox.set(sorted(self.features))
self.rightbox.set('')
self.rrightbox.set(self.required_ordered)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Local Features')
column_head.pack(side=TOP)
self.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)
self.list_left.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_left.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=LEFT)
Button(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)
Button(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)
Button(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Matched Features')
column_head.pack(side=TOP)
self.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)
self.list_right.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_right.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=RIGHT)
column_head = ttk.Label(frame, text='Required Features')
column_head.pack(side=TOP)
self.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)
self.list_rright.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_rright.yview)
scrollbar.pack(side="right", fill="y")
def tab_model(self, root):
"""
Call localmodel.init() and localmodel.train()
Display model accuracy
"""
frame = Frame(root)
frame.pack(side=TOP)
self.label_frame = ttk.LabelFrame(frame)
self.label_frame.pack(side=TOP)
self.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)
self.model_label.pack(side=TOP)
self.label_img = ttk.Label(self.label_frame, image=self.img_wait)
self.label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)
print ("MODEL TRAINED -> ")
self.loca = LocalModelCommunication(data= self.dataframe)
training_result = self.loca.chooseModel_with_crossValidation_and_train()
print (training_result)
self.trainingdone()
def trainingdone(self):
self.label_img.config(image=self.img_gif)
self.label_img.pack()
def tab_connect(self, root):
"""
Connect to center server
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
self.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)
self.connect_label.pack(side=TOP)
label_img = ttk.Label(label_frame, image=self.img_wait)
label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)
## cannot get fast responce! -> get false even if we are connected :]
if self.loca.connectToCentral() == False :
print ("not connected")
else :
print ("connected")
'''
self.root = Tk()
self.root.geometry("700x500")
self.root.title("Doctor Application")
self.root.resizable(width=False, height=False)
self.app = APP(root)
self.root.mainloop()
'''
def tab_wait(self, root):
"""
Call localmodel.predict()
:return:
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
label = ttk.Label(label_frame, text='TODO')
label.pack(side=TOP)
if __name__ == '__main__':
root = Tk()
root.geometry("700x500")
root.title("Modeling Tool GUI")
root.resizable(width=False, height=False)
tabControl = ttk.Notebook(root)
tab_step1 = ttk.Frame(tabControl)
tabControl.add(tab_step1, text="Step 1: Import Data")
tabControl.pack(expand=1, fill="both") # Pack to make visible
gui = GUI()
gui.tab_import(tab_step1, tabControl)
root.mainloop()
| 31.254839 | 100 | 0.716999 | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as fd
import pandas as pd
from LocalModelCommunication import LocalModelCommunication
from APP import APP
class GUI(object):
def __init__(self):
self.tabControl = None
self.tab_step1 = None
self.tab_step2 = None
self.tab_step3 = None
self.tab_step4 = None
self.dataframe = None
self.img_wait = PhotoImage(file='test.GIF')
self.fname = None
self.data = None
self.features = None
self.import_lable = None
self.import_label_text = StringVar()
self.import_label_text.set(' ')
self.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',
'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']
self.required_ordered = []
i = 0
for item in self.required:
self.required_ordered.append(str(i) + ': ' + item)
i = i + 1
self.leftbox = StringVar()
self.rightbox = StringVar()
self.rrightbox = StringVar()
self.list_left = None
self.list_right = None
self.list_rright = None
self.model_label = None
self.model_label_text = StringVar()
self.model_label_text.set('Waiting for model training...')
self.img_gif = PhotoImage(file='img.GIF')
self.connect_label = None
self.connect_label_text = StringVar()
self.connect_label_text.set('Waiting for central server response...')
def add_tab(self, tabControl, tab_name):
tab = ttk.Frame(tabControl)
tabControl.add(tab, text=tab_name)
return tab
t_csv(self):
self.fname = fd.askopenfilename(filetypes=[(".csv file", ".csv")])
self.data = pd.read_csv(self.fname, delimiter=',')
self.features = self.data.columns
self.import_label_text.set('Import data from: ' + self.fname + '\n' + str(self.features))
self.import_lable.pack(side=TOP)
def go_next_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step1)
ve_to_right(self):
self.list_right.insert(END,
str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))
self.list_left.delete(self.list_left.curselection())
def move_to_left(self):
content = self.list_right.get(self.list_right.curselection())
contents = content.split(': ')
self.list_left.insert(END, contents[1])
self.list_right.delete(self.list_right.curselection())
def add_nan(self):
self.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')
def go_next_step3(self):
columns = []
contents = self.rightbox.get()
contents = contents.replace('(', '')
contents = contents.replace(')', '')
contents = contents.replace("'", '')
item_list = contents.split(', ')
for item in item_list:
content = item.split(': ')[1]
if content != 'NAN':
columns.append(content)
self.dataframe = self.data[columns]
print(self.dataframe.head(2))
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step2)
def go_back_step1(self):
self.tab_step1 = self.add_tab(self.tabControl, "Step 1: Import Data")
# render tab1
self.tab_import(self.tab_step1, self.tabControl)
self.tabControl.select(self.tab_step1)
self.tabControl.forget(self.tab_step2)
## step 3
def go_next_step4(self):
self.tab_step4 = self.add_tab(self.tabControl, "Step 4: Connect to Central Server")
# render tab4
self.tab_connect(self.tab_step4)
self.tabControl.select(self.tab_step4)
self.tabControl.forget(self.tab_step3)
def go_back_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
# render tab2
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step3)
## step 4
def go_next_step5(self):
self.tab_step5 = self.add_tab(self.tabControl, "Step 5: Wait for Prediction Call")
# render tab5
self.tab_wait(self.tab_step5)
self.tabControl.select(self.tab_step5)
self.tabControl.forget(self.tab_step4)
def go_back_step3(self):
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step4)
## step 5
# frames
def tab_import(self, root, tabControl):
self.tabControl = tabControl
self.tab_step1 = root
frame = Frame(root)
frame.pack(side=TOP)
Button(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)
label_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')
label_frame.pack(side=TOP)
self.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)
self.import_lable.pack(side=TOP)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)
def tab_match(self, root):
self.leftbox.set(sorted(self.features))
self.rightbox.set('')
self.rrightbox.set(self.required_ordered)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Local Features')
column_head.pack(side=TOP)
self.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)
self.list_left.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_left.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=LEFT)
Button(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)
Button(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)
Button(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Matched Features')
column_head.pack(side=TOP)
self.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)
self.list_right.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_right.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=RIGHT)
column_head = ttk.Label(frame, text='Required Features')
column_head.pack(side=TOP)
self.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)
self.list_rright.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_rright.yview)
scrollbar.pack(side="right", fill="y")
def tab_model(self, root):
frame = Frame(root)
frame.pack(side=TOP)
self.label_frame = ttk.LabelFrame(frame)
self.label_frame.pack(side=TOP)
self.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)
self.model_label.pack(side=TOP)
self.label_img = ttk.Label(self.label_frame, image=self.img_wait)
self.label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)
print ("MODEL TRAINED -> ")
self.loca = LocalModelCommunication(data= self.dataframe)
training_result = self.loca.chooseModel_with_crossValidation_and_train()
print (training_result)
self.trainingdone()
def trainingdone(self):
self.label_img.config(image=self.img_gif)
self.label_img.pack()
def tab_connect(self, root):
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
self.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)
self.connect_label.pack(side=TOP)
label_img = ttk.Label(label_frame, image=self.img_wait)
label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)
## cannot get fast responce! -> get false even if we are connected :]
if self.loca.connectToCentral() == False :
print ("not connected")
else :
print ("connected")
def tab_wait(self, root):
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
label = ttk.Label(label_frame, text='TODO')
label.pack(side=TOP)
if __name__ == '__main__':
root = Tk()
root.geometry("700x500")
root.title("Modeling Tool GUI")
root.resizable(width=False, height=False)
tabControl = ttk.Notebook(root)
tab_step1 = ttk.Frame(tabControl)
tabControl.add(tab_step1, text="Step 1: Import Data")
tabControl.pack(expand=1, fill="both") # Pack to make visible
gui = GUI()
gui.tab_import(tab_step1, tabControl)
root.mainloop()
| true | true |
f7268c06513bb9fa62c71a99d8cdf748c117880d | 14,879 | py | Python | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | 8 | 2018-05-22T10:02:51.000Z | 2022-01-11T03:02:51.000Z | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | null | null | null | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | 2 | 2021-01-07T19:39:19.000Z | 2021-11-14T09:06:35.000Z | import tensorflow as tf
import keras
from keras.datasets import cifar10
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import sys
import csv
import utils_csv
import utils_tf as utils
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import Model
print("Tensorflow version " + tf.__version__)
config_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1 # Choose type of learning technique according to config_dict
config_dict = {0: "backprop", 1: "biprop", 2: "halfbiprop", 3: "nobias_backprop", 4: "nobias_biprop", 5: "nobias_halfbiprop"}
num_classes = 10
model_name = sys.argv[0].replace(".py", "") + "_" + config_dict[config_num]
print("Model name: " + model_name)
# load data
# https://github.com/BIGBALLON/cifar-10-cnn/blob/master/1_Lecun_Network/LeNet_keras.py
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# for reproducibility
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.InteractiveSession()
# three convolutional layers with their channel counts, and a
# fully connected layer (tha last layer has 10 softmax neurons)
K = 4 # first convolutional layer output depth
L = 8 # second convolutional layer output depth
M = 12 # third convolutional layer
N = 200 # fully connected layer
with tf.name_scope("input"):
# input X & output GX_: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3])
GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3])
# output Y_ & input GY: labels for classification and generation
Y_ = tf.placeholder(tf.float32, [None, num_classes])
GY = tf.placeholder(tf.float32, [None, num_classes])
# variable learning rate
lr = tf.placeholder(tf.float32)
# variable batch size
BS = tf.placeholder(tf.int32)
input_test_sum = tf.summary.image("input", X, num_classes)
input_noisy_sum = tf.summary.image("input-noisy", X_noisy, num_classes)
input_adv_sum = tf.summary.image("input-adv", X_adv, num_classes)
with tf.name_scope("classifier-generator"):
C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name="C_W1")
C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name="C_W2")
C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name="C_W3")
C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name="C_W4")
C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name="C_W5")
def classifier(x, reuse=None):
with tf.variable_scope("classifier", reuse=reuse) as scope_c:
# Variables for classifier
C_B1 = utils.bias_variable([K], name="C_B1")
C_B2 = utils.bias_variable([L], name="C_B2")
C_B3 = utils.bias_variable([M], name="C_B3")
C_B4 = utils.bias_variable([N], name="C_B4")
C_B5 = utils.bias_variable([num_classes], name="C_B5")
stride = 1 # output is 32x32
H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1)
stride = 2 # output is 16x16
H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2)
stride = 2 # output is 8x8
H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3)
# reshape the output from the third convolution for the fully connected layer
HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M])
H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4)
Ylogits = tf.matmul(H4, C_W5) + C_B5
Ysigmoid = tf.nn.sigmoid(Ylogits)
Ysoftmax = tf.nn.softmax(Ylogits)
return Ysoftmax, Ysigmoid, Ylogits
class ClassifierModel(Model):
def get_logits(self, x):
Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True)
return Ylogits
# Generator of random input reuses weights of classifier
def generator(y, bs, reuse=None):
with tf.variable_scope("generator", reuse=reuse) as scope_g:
# Variables for classifier
G_B1 = utils.bias_variable([3], name="G_B1")
G_B2 = utils.bias_variable([K], name="G_B2")
G_B3 = utils.bias_variable([L], name="G_B3")
G_B4 = utils.bias_variable([M*8*8], name="G_B4")
G_B5 = utils.bias_variable([N], name="G_B5")
GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5)
GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4)
GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M])
stride = 2 # output is 14x14
GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3) #deconv2 W3
stride = 2 # output is 28x28
GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)#deconv2 W2
stride = 1 # output is 28x28
GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1#deconv2 W1
GXsigmoid = tf.nn.sigmoid(GXlogits)
return GXsigmoid, GXlogits
def plot_generator(samples):
if num_classes == 10:
fig = plt.figure(figsize=(5, 2))
gs = gridspec.GridSpec(2, 5)
else:
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape((32,32,3)))
return fig
GXsigmoid, GXlogits = generator(GY, BS)
GXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True)
Ysoftmax, Ysigmoid, Ylogits = classifier(X)
model_classifier = ClassifierModel()
Ysoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True)
Ysoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True)
with tf.name_scope("loss"):
c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_))
""" Summary """
g_loss_sum = tf.summary.scalar("g_loss", g_loss)
c_loss_sum = tf.summary.scalar("c_loss", c_loss)
# accuracy of the trained model, between 0 (worst) and 1 (best)
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("correct_prediction_noisy"):
correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_noisy"):
accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32))
with tf.name_scope("correct_prediction_adv"):
correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_adv"):
accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32))
""" Summary """
accuracy_sum = tf.summary.scalar("accuracy", accuracy)
accuracy_noisy_sum = tf.summary.scalar("accuracy_noisy", accuracy_noisy)
accuracy_adv_sum = tf.summary.scalar("accuracy_adv", accuracy_adv)
with tf.name_scope("max_output"):
with tf.name_scope("max_output_test"):
max_output_sigmoid_test = tf.reduce_max(Ysigmoid)
max_output_softmax_test = tf.reduce_max(Ysoftmax)
with tf.name_scope("max_output_noise"):
max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy)
max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy)
with tf.name_scope("max_output_adv"):
max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv)
max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv)
""" Summary """
max_output_sigmoid_test_sum = tf.summary.scalar("max_output_sigmoid_test", max_output_sigmoid_test)
max_output_softmax_test_sum = tf.summary.scalar("max_output_softmax_test", max_output_softmax_test)
max_output_sigmoid_noise_sum = tf.summary.scalar("max_output_sigmoid_noise", max_output_sigmoid_noise)
max_output_softmax_noise_sum = tf.summary.scalar("max_output_softmax_noise", max_output_softmax_noise)
max_output_sigmoid_adv_sum = tf.summary.scalar("max_output_sigmoid_adv", max_output_sigmoid_adv)
max_output_softmax_adv_sum = tf.summary.scalar("max_output_softmax_adv", max_output_softmax_adv)
utils.show_all_variables()
t_vars = tf.trainable_variables()
c_vars = [var for var in t_vars if 'C_' in var.name]\
if config_num < 3 else [var for var in t_vars if 'C_W' in var.name]
g_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\
if config_num < 3 else c_vars
# training step
learning_rate_dis = lr
learning_rate_gen = lr
with tf.name_scope("train"):
c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars)
g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars)
# final summary operations
g_sum = tf.summary.merge([g_loss_sum])
c_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum])
noise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum])
noisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum])
adv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum])
folder_out = 'out/' + model_name + '/'
if not os.path.exists(folder_out):
os.makedirs(folder_out)
folder_csv = 'csv/' + model_name + '/'
if not os.path.exists(folder_csv):
os.makedirs(folder_csv)
folder_logs = 'logs/' + model_name
if not os.path.exists(folder_csv):
os.makedirs(folder_logs)
writer = tf.summary.FileWriter(folder_logs, sess.graph)
batch_size = 100
num_train_images = x_train.shape[0]
num_batches = num_train_images // batch_size
all_classes = np.eye(num_classes)
counter = 0
fgsm_params = {'eps': 0.03,
'clip_min': 0.,
'clip_max': 1.}
random_noise = np.random.random_sample(x_test.shape)
test_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.)
accuracy_list = []
sigmoid_list = []
softmax_list = []
# initialize all variables
tf.global_variables_initializer().run()
for i in range(50001):
if i % num_batches == 0:
idx_train = np.arange(x_train.shape[0])
np.random.shuffle(idx_train)
x_train, y_train = x_train[idx_train], y_train[idx_train]
idx = i % num_batches
batch_X = x_train[idx*batch_size:(idx+1)*batch_size]
batch_Y = y_train[idx*batch_size:(idx+1)*batch_size]
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed)
if i % 500 == 0 or i == 50000:
counter += 1
# Saves generated images
samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes})
fig = plot_generator(samples)
plt.savefig(folder_out+"gen_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
attack_fgsm = FastGradientMethod(model_classifier, sess=sess)
adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params)
fig = plot_generator(adv_x_np[:num_classes])
plt.savefig(folder_out+"adv_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test})
writer.add_summary(sum_c, i)
g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size})
writer.add_summary(sum_g, i)
print(str(i) + ": epoch " + str(i*batch_size//x_train.shape[0]+1)\
+ " - test loss class: " + str(c_loss_test) + " test loss gen: " + str(g_loss_test))
print("Real test images - Sigmoid: " + str(sigmoid_test) + "\tSoftmax: " + str(softmax_test) + "\taccuracy: "+ str(accu_test))
sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise})
writer.add_summary(sum_random, i)
accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test})
writer.add_summary(sum_noisy, i)
print("Random noise images - Sigmoid: " + str(sigmoid_random) + "\tSoftmax: " + str(softmax_random) + "\taccuracy: "+ str(accu_random))
accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test})
writer.add_summary(sum_adv, i)
print("Adversarial examples - Sigmoid: " + str(sigmoid_adv) + "\tSoftmax: " + str(softmax_adv) + "\taccuracy: "+ str(accu_adv))
print()
accuracy_list.append([i, accu_test, accu_random, accu_adv, counter])
sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter])
softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter])
sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate})
if config_num == 1 or (config_num == 2 and i < 25000) or\
config_num == 4 or (config_num == 5 and i < 25000):
sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size})
writer.close()
# Save data in csv
with open(folder_csv+"accuracy.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(accuracy_list)
with open(folder_csv+"sigmoid.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(sigmoid_list)
with open(folder_csv+"softmax.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(softmax_list)
# Load data in csv
accu_data = utils_csv.get_data_csv_file(folder_csv+"accuracy.csv")
sigmoid_data = utils_csv.get_data_csv_file(folder_csv+"sigmoid.csv")
softmax_data = utils_csv.get_data_csv_file(folder_csv+"softmax.csv")
# Print best values
utils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+"summary.txt")
| 43.00289 | 178 | 0.699308 | import tensorflow as tf
import keras
from keras.datasets import cifar10
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import sys
import csv
import utils_csv
import utils_tf as utils
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import Model
print("Tensorflow version " + tf.__version__)
config_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1
config_dict = {0: "backprop", 1: "biprop", 2: "halfbiprop", 3: "nobias_backprop", 4: "nobias_biprop", 5: "nobias_halfbiprop"}
num_classes = 10
model_name = sys.argv[0].replace(".py", "") + "_" + config_dict[config_num]
print("Model name: " + model_name)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.InteractiveSession()
K = 4
L = 8
M = 12
N = 200
with tf.name_scope("input"):
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3])
GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3])
Y_ = tf.placeholder(tf.float32, [None, num_classes])
GY = tf.placeholder(tf.float32, [None, num_classes])
lr = tf.placeholder(tf.float32)
BS = tf.placeholder(tf.int32)
input_test_sum = tf.summary.image("input", X, num_classes)
input_noisy_sum = tf.summary.image("input-noisy", X_noisy, num_classes)
input_adv_sum = tf.summary.image("input-adv", X_adv, num_classes)
with tf.name_scope("classifier-generator"):
C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name="C_W1")
C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name="C_W2")
C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name="C_W3")
C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name="C_W4")
C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name="C_W5")
def classifier(x, reuse=None):
with tf.variable_scope("classifier", reuse=reuse) as scope_c:
C_B1 = utils.bias_variable([K], name="C_B1")
C_B2 = utils.bias_variable([L], name="C_B2")
C_B3 = utils.bias_variable([M], name="C_B3")
C_B4 = utils.bias_variable([N], name="C_B4")
C_B5 = utils.bias_variable([num_classes], name="C_B5")
stride = 1
H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1)
stride = 2
H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2)
stride = 2
H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3)
HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M])
H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4)
Ylogits = tf.matmul(H4, C_W5) + C_B5
Ysigmoid = tf.nn.sigmoid(Ylogits)
Ysoftmax = tf.nn.softmax(Ylogits)
return Ysoftmax, Ysigmoid, Ylogits
class ClassifierModel(Model):
def get_logits(self, x):
Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True)
return Ylogits
def generator(y, bs, reuse=None):
with tf.variable_scope("generator", reuse=reuse) as scope_g:
G_B1 = utils.bias_variable([3], name="G_B1")
G_B2 = utils.bias_variable([K], name="G_B2")
G_B3 = utils.bias_variable([L], name="G_B3")
G_B4 = utils.bias_variable([M*8*8], name="G_B4")
G_B5 = utils.bias_variable([N], name="G_B5")
GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5)
GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4)
GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M])
stride = 2
GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3)
stride = 2
GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)
stride = 1
GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1
GXsigmoid = tf.nn.sigmoid(GXlogits)
return GXsigmoid, GXlogits
def plot_generator(samples):
if num_classes == 10:
fig = plt.figure(figsize=(5, 2))
gs = gridspec.GridSpec(2, 5)
else:
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape((32,32,3)))
return fig
GXsigmoid, GXlogits = generator(GY, BS)
GXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True)
Ysoftmax, Ysigmoid, Ylogits = classifier(X)
model_classifier = ClassifierModel()
Ysoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True)
Ysoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True)
with tf.name_scope("loss"):
c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_))
g_loss_sum = tf.summary.scalar("g_loss", g_loss)
c_loss_sum = tf.summary.scalar("c_loss", c_loss)
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("correct_prediction_noisy"):
correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_noisy"):
accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32))
with tf.name_scope("correct_prediction_adv"):
correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_adv"):
accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32))
accuracy_sum = tf.summary.scalar("accuracy", accuracy)
accuracy_noisy_sum = tf.summary.scalar("accuracy_noisy", accuracy_noisy)
accuracy_adv_sum = tf.summary.scalar("accuracy_adv", accuracy_adv)
with tf.name_scope("max_output"):
with tf.name_scope("max_output_test"):
max_output_sigmoid_test = tf.reduce_max(Ysigmoid)
max_output_softmax_test = tf.reduce_max(Ysoftmax)
with tf.name_scope("max_output_noise"):
max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy)
max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy)
with tf.name_scope("max_output_adv"):
max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv)
max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv)
max_output_sigmoid_test_sum = tf.summary.scalar("max_output_sigmoid_test", max_output_sigmoid_test)
max_output_softmax_test_sum = tf.summary.scalar("max_output_softmax_test", max_output_softmax_test)
max_output_sigmoid_noise_sum = tf.summary.scalar("max_output_sigmoid_noise", max_output_sigmoid_noise)
max_output_softmax_noise_sum = tf.summary.scalar("max_output_softmax_noise", max_output_softmax_noise)
max_output_sigmoid_adv_sum = tf.summary.scalar("max_output_sigmoid_adv", max_output_sigmoid_adv)
max_output_softmax_adv_sum = tf.summary.scalar("max_output_softmax_adv", max_output_softmax_adv)
utils.show_all_variables()
t_vars = tf.trainable_variables()
c_vars = [var for var in t_vars if 'C_' in var.name]\
if config_num < 3 else [var for var in t_vars if 'C_W' in var.name]
g_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\
if config_num < 3 else c_vars
learning_rate_dis = lr
learning_rate_gen = lr
with tf.name_scope("train"):
c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars)
g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars)
g_sum = tf.summary.merge([g_loss_sum])
c_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum])
noise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum])
noisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum])
adv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum])
folder_out = 'out/' + model_name + '/'
if not os.path.exists(folder_out):
os.makedirs(folder_out)
folder_csv = 'csv/' + model_name + '/'
if not os.path.exists(folder_csv):
os.makedirs(folder_csv)
folder_logs = 'logs/' + model_name
if not os.path.exists(folder_csv):
os.makedirs(folder_logs)
writer = tf.summary.FileWriter(folder_logs, sess.graph)
batch_size = 100
num_train_images = x_train.shape[0]
num_batches = num_train_images // batch_size
all_classes = np.eye(num_classes)
counter = 0
fgsm_params = {'eps': 0.03,
'clip_min': 0.,
'clip_max': 1.}
random_noise = np.random.random_sample(x_test.shape)
test_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.)
accuracy_list = []
sigmoid_list = []
softmax_list = []
tf.global_variables_initializer().run()
for i in range(50001):
if i % num_batches == 0:
idx_train = np.arange(x_train.shape[0])
np.random.shuffle(idx_train)
x_train, y_train = x_train[idx_train], y_train[idx_train]
idx = i % num_batches
batch_X = x_train[idx*batch_size:(idx+1)*batch_size]
batch_Y = y_train[idx*batch_size:(idx+1)*batch_size]
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed)
if i % 500 == 0 or i == 50000:
counter += 1
samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes})
fig = plot_generator(samples)
plt.savefig(folder_out+"gen_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
attack_fgsm = FastGradientMethod(model_classifier, sess=sess)
adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params)
fig = plot_generator(adv_x_np[:num_classes])
plt.savefig(folder_out+"adv_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test})
writer.add_summary(sum_c, i)
g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size})
writer.add_summary(sum_g, i)
print(str(i) + ": epoch " + str(i*batch_size//x_train.shape[0]+1)\
+ " - test loss class: " + str(c_loss_test) + " test loss gen: " + str(g_loss_test))
print("Real test images - Sigmoid: " + str(sigmoid_test) + "\tSoftmax: " + str(softmax_test) + "\taccuracy: "+ str(accu_test))
sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise})
writer.add_summary(sum_random, i)
accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test})
writer.add_summary(sum_noisy, i)
print("Random noise images - Sigmoid: " + str(sigmoid_random) + "\tSoftmax: " + str(softmax_random) + "\taccuracy: "+ str(accu_random))
accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test})
writer.add_summary(sum_adv, i)
print("Adversarial examples - Sigmoid: " + str(sigmoid_adv) + "\tSoftmax: " + str(softmax_adv) + "\taccuracy: "+ str(accu_adv))
print()
accuracy_list.append([i, accu_test, accu_random, accu_adv, counter])
sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter])
softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter])
sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate})
if config_num == 1 or (config_num == 2 and i < 25000) or\
config_num == 4 or (config_num == 5 and i < 25000):
sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size})
writer.close()
with open(folder_csv+"accuracy.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(accuracy_list)
with open(folder_csv+"sigmoid.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(sigmoid_list)
with open(folder_csv+"softmax.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(softmax_list)
accu_data = utils_csv.get_data_csv_file(folder_csv+"accuracy.csv")
sigmoid_data = utils_csv.get_data_csv_file(folder_csv+"sigmoid.csv")
softmax_data = utils_csv.get_data_csv_file(folder_csv+"softmax.csv")
utils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+"summary.txt")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.