max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
shiyanlou_cs866/boston_price.py | tongxindao/shiyanlou | 0 | 6618951 | # -*- coding: utf-8 -*
from sklearn import datasets
from sklearn.svm import LinearSVR
from matplotlib import pyplot as plt
from sklearn.cross_validation import cross_val_predict
boston = datasets.load_boston()
feature = boston.data
target = boston.target
model = LinearSVR()
predictions = cross_val_predict(model, feature, target, cv=10)
plt.scatter(target, predictions)
plt.plot([target.min(), target.max()], [target.min(), target.max()], 'k--', lw=4)
plt.xlabel("true_target")
plt.ylabel("prediction")
plt.show()
| # -*- coding: utf-8 -*
from sklearn import datasets
from sklearn.svm import LinearSVR
from matplotlib import pyplot as plt
from sklearn.cross_validation import cross_val_predict
boston = datasets.load_boston()
feature = boston.data
target = boston.target
model = LinearSVR()
predictions = cross_val_predict(model, feature, target, cv=10)
plt.scatter(target, predictions)
plt.plot([target.min(), target.max()], [target.min(), target.max()], 'k--', lw=4)
plt.xlabel("true_target")
plt.ylabel("prediction")
plt.show()
| en | 0.732069 | # -*- coding: utf-8 -* | 3.171345 | 3 |
project-euler/utils/triangle.py | pietrodll/coding-challenges | 0 | 6618952 | """This module contains functions to calculate paths on a triangle on numbers, mostly
useful for problems 18 and 67"""
from math import sqrt
def line_from_index(i):
return int((sqrt(8*i + 1) - 1) / 2)
def left_child(i):
return line_from_index(i) + i + 1
def right_child(i):
return left_child(i) + 1
def right_parent(i):
line = line_from_index(i)
p = i - line
if line_from_index(p) != (line - 1):
return None
return p
def left_parent(i):
line = line_from_index(i)
p = i - line - 1
if p < 0 or line_from_index(p) != (line - 1):
return None
return p
def max_paths(L):
n = len(L)
M = [0] * n
M[0] = L[0]
# fill the triangle at the two extremities
l = left_child(0)
r = right_child(0)
while r < n and l < n:
M[l] = L[l] + M[right_parent(l)]
M[r] = L[r] + M[left_parent(r)]
l = left_child(l)
r = right_child(r)
# fill the rest of the triangle
for i in range(n):
l = left_parent(i)
r = right_parent(i)
if l is not None and r is not None:
M[i] = L[i] + max(M[l], M[r])
return M
def max_path_to_bottom(L):
M = max_paths(L)
last_line = line_from_index(len(L) - 1)
first_of_last_line = last_line * (last_line + 1) // 2
m = M[first_of_last_line]
for i in range(first_of_last_line, len(L)):
if M[i] > m:
m = M[i]
return m
| """This module contains functions to calculate paths on a triangle on numbers, mostly
useful for problems 18 and 67"""
from math import sqrt
def line_from_index(i):
return int((sqrt(8*i + 1) - 1) / 2)
def left_child(i):
return line_from_index(i) + i + 1
def right_child(i):
return left_child(i) + 1
def right_parent(i):
line = line_from_index(i)
p = i - line
if line_from_index(p) != (line - 1):
return None
return p
def left_parent(i):
line = line_from_index(i)
p = i - line - 1
if p < 0 or line_from_index(p) != (line - 1):
return None
return p
def max_paths(L):
n = len(L)
M = [0] * n
M[0] = L[0]
# fill the triangle at the two extremities
l = left_child(0)
r = right_child(0)
while r < n and l < n:
M[l] = L[l] + M[right_parent(l)]
M[r] = L[r] + M[left_parent(r)]
l = left_child(l)
r = right_child(r)
# fill the rest of the triangle
for i in range(n):
l = left_parent(i)
r = right_parent(i)
if l is not None and r is not None:
M[i] = L[i] + max(M[l], M[r])
return M
def max_path_to_bottom(L):
M = max_paths(L)
last_line = line_from_index(len(L) - 1)
first_of_last_line = last_line * (last_line + 1) // 2
m = M[first_of_last_line]
for i in range(first_of_last_line, len(L)):
if M[i] > m:
m = M[i]
return m
| en | 0.795981 | This module contains functions to calculate paths on a triangle on numbers, mostly useful for problems 18 and 67 # fill the triangle at the two extremities # fill the rest of the triangle | 3.881488 | 4 |
python/0001_multiples_of_3_and_5.py | chrisneave/project-euler | 0 | 6618953 | <reponame>chrisneave/project-euler<gh_stars>0
max = 1000
sum = 0
for n in range(1, max):
if n % 3 == 0 or n % 5 == 0:
sum += n
print(sum)
| max = 1000
sum = 0
for n in range(1, max):
if n % 3 == 0 or n % 5 == 0:
sum += n
print(sum) | none | 1 | 3.7449 | 4 | |
twodlearn/convnet.py | danmar3/twodlearn | 0 | 6618954 | <gh_stars>0
from __future__ import division
from __future__ import print_function
import typing
import collections
import tensorflow as tf
from . import core
from .core import exceptions
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Args:
input_length (int): integer.
filter_size (int): integer.
padding (str): one of "same", "valid", "full", "causal"
stride (int): integer.
dilation (int): dilation rate, integer.
Returns:
int: the output length.
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
@core.create_init_docstring
class Conv2DLayer(core.Layer):
@core.InputArgument
def kernel_size(self, value):
'''Size of the convolution kernels. Must be a tuple/list of two
elements (height, width)
'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self, 'kernel_size')
if isinstance(value, collections.Iterable):
assert len(value) == 2, 'kernel_size must have a length of 2'
if isinstance(value, int):
value = [value, value]
return value
@core.InputArgument
def strides(self, value):
'''Convolution strides. Default is (1, 1).'''
if value is None:
value = (1, 1)
if isinstance(value, collections.Iterable):
assert len(value) == 2, 'strides must have a length of 2'
return value
@core.InputArgument
def input_shape(self, value):
'''Input tensor shape: (n_samples, n_rows, n_cols, n_channels).'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if len(value) != 4:
raise ValueError('input_shape must specify four values: '
'(n_samples, n_rows, n_cols, n_channels)')
if not isinstance(value, tf.TensorShape):
value = tf.TensorShape(value)
return value
@core.InputArgument
def filters(self, value):
'''Number of filters (int), equal to the number of output maps.'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, int):
raise TypeError('filters must be an integer')
return value
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['kernel_size', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.kernel_size[0], self.kernel_size[1],
self.input_shape[-1].value, self.filters],
trainable=trainable,
**kargs)
def _tdl_check_kwargs(self, kwargs):
if ('bias' in kwargs and 'use_bias' in kwargs):
raise ValueError('bias and use_bias cannot be specified at the '
'same time')
return
@core.ParameterInit(lazzy=True)
def bias(self, initializer=None, trainable=True, use_bias=True, **kargs):
core.assert_initialized(self, 'bias', ['filters'])
core.assert_initialized_if_available(self, 'bias', ['use_bias'])
if core.is_property_initialized(self, 'use_bias'):
use_bias = (use_bias and self.use_bias)
if use_bias is False:
return None
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(
name='bias',
initializer=initializer,
shape=[self.filters],
trainable=trainable,
**kargs)
@core.InputArgument
def use_bias(self, value: typing.Union[bool, None]):
core.assert_initialized_if_available(
self, 'use_bias', ['bias', 'filters'])
if value is None:
if core.is_property_initialized(self, 'bias'):
value = self.bias is not None
else:
value = True
assert isinstance(value, bool), 'use_bias should be bool'
if value is True:
if core.is_property_initialized(self, 'bias'):
assert self.bias is not None, \
'use_bias is True, but bias was set to None'
if value is False:
if core.is_property_initialized(self, 'bias'):
assert self.bias is None, \
'use_bias is False, but bias was not set to None'
return value
@core.InputArgument
def padding(self, value):
"""Padding for the convolution. It could be either 'valid' or 'same'.
Default is 'valid'.
"""
if value is None:
value = 'valid'
assert value in ('valid', 'same'),\
'padding should be either same or valid'
return value
@core.InputArgument
def dilation_rate(self, value):
'''Defaults to (1, 1).'''
if value is None:
value = (1, 1)
if isinstance(value, int):
value = (value, value)
if not (isinstance(value, collections.Iterable) and len(value) == 2):
raise ValueError('dilation_rate must be an iterable of length 2')
value = tuple((v if isinstance(v, int) else int(v))
for v in value)
return value
def compute_output_shape(self, input_shape=None):
if input_shape is None:
core.assert_initialized(
self, 'copute_output_shape',
['input_shape', 'kernel_size', 'padding', 'strides',
'dilation_rate'])
input_shape = self.input_shape
input_shape = tf.TensorShape(input_shape).as_list()
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0]] + new_space + [self.filters])
def call(self, inputs, *args, **kargs):
inputs = tf.convert_to_tensor(inputs)
if not core.is_property_initialized(self, 'input_shape'):
self.input_shape = inputs.shape
core.assert_initialized(
self, 'call', ['kernel', 'bias', 'strides', 'padding'])
conv = tf.nn.conv2d(
inputs, self.kernel,
strides=[1, self.strides[0], self.strides[1], 1],
padding=self.padding.upper(),
dilations=[1, self.dilation_rate[0], self.dilation_rate[1], 1])
if self.bias is not None:
conv = conv + self.bias
return conv
@core.create_init_docstring
class Conv2DTranspose(Conv2DLayer):
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['kernel_size', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.kernel_size[0], self.kernel_size[1],
self.filters, self.input_shape[-1].value],
trainable=trainable,
**kargs)
@core.InputArgument
def output_padding(self, value):
if isinstance(value, (list, tuple)):
assert len(value) == 2, 'kernel_size must have a length of 2'
else:
value = (value, value)
return value
@staticmethod
def transpose_output_lenght(
input_length, filter_size, padding,
output_padding=None, stride=0, dilation=1):
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
def _compute_output_shape(self, input_shape):
# bypass eager iterable error
batch, height, width, depth = [input_shape[i] for i in range(4)]
if self.output_padding is None:
output_padding = (None, None)
else:
output_padding = self.output_padding
new_h = self.transpose_output_lenght(
height, self.kernel_size[0], padding=self.padding,
output_padding=output_padding[0], stride=self.strides[0],
dilation=self.dilation_rate[0])
new_w = self.transpose_output_lenght(
width, self.kernel_size[1], padding=self.padding,
output_padding=output_padding[1], stride=self.strides[1],
dilation=self.dilation_rate[1])
return (batch, new_h, new_w, self.filters)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
assert input_shape.ndims == 4, 'provided shape is not four dimensional'
input_shape = input_shape.as_list()
return tf.TensorShape(self._compute_output_shape(input_shape))
def call(self, inputs, *args, **kargs):
inputs = tf.convert_to_tensor(inputs)
if not core.is_property_initialized(self, 'input_shape'):
self.input_shape = inputs.shape
core.assert_initialized(
self, 'call', ['kernel', 'bias', 'strides', 'padding'])
output_shape = self._compute_output_shape(tf.shape(inputs))
output_shape = tf.stack(output_shape)
conv = tf.keras.backend.conv2d_transpose(
inputs,
self.kernel, # tf.transpose(self.kernel, perm=[0, 1, 3, 2]),
output_shape,
strides=tuple(self.strides),
padding=self.padding,
dilation_rate=self.dilation_rate
)
if self.bias is not None:
conv = conv + self.bias
# output fix shape
if inputs.shape[1:].is_fully_defined():
conv.set_shape(self._compute_output_shape(inputs.shape))
return conv
@core.create_init_docstring
class Conv1x1Proj(core.Layer):
@core.InputArgument
def units(self, value: int):
'''Number of output units (int).'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, int):
raise TypeError('units must be an integer')
return value
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['units', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.input_shape[-1].value, self.units],
trainable=trainable,
**kargs)
@core.ParameterInit(lazzy=True)
def bias(self, initializer=None, trainable=True, use_bias=True, **kargs):
core.assert_initialized(self, 'bias', ['units', 'use_bias'])
if (use_bias and self.use_bias) is False:
return None
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(
name='bias',
initializer=initializer,
shape=[self.units],
trainable=trainable,
**kargs)
@core.InputArgument
def use_bias(self, value: typing.Union[bool, None]):
core.assert_initialized_if_available(self, 'use_bias', ['bias'])
if value is None:
if core.is_property_initialized(self, 'bias'):
value = self.bias is not None
else:
value = True
assert isinstance(value, bool), 'use_bias should be bool'
if value is True:
if core.is_property_initialized(self, 'bias'):
assert self.bias is not None, \
'use_bias is True, but bias was set to None'
if value is False:
if core.is_property_initialized(self, 'bias'):
assert self.bias is None, \
'use_bias is False, but bias was not set to None'
return value
@core.InputArgument
def activation(self, value):
return value
@core.Submodel
def _linop(self, _):
core.assert_initialized(self, '_linop', ['kernel'])
return tf.linalg.LinearOperatorFullMatrix(self.kernel)
def compute_output_shape(self, input_shape=None):
input_shape = tf.TensorShape(input_shape)
output_shape = input_shape[:-1]
return output_shape.concatenate(self.units)
def call(self, inputs):
output = self._linop.matvec(inputs, adjoint=True)
if self.bias is not None:
output = output + self.bias
if self.activation is not None:
output = self.activation(output)
return output
def get_transpose(self, use_bias=None, activation=None, trainable=True):
core.assert_initialized(
self, 'get_transpose', ['kernel', 'bias', 'activation'])
kargs = dict()
if use_bias is False or self.bias is None:
kargs['bias'] = None
transpose = type(self)(
units=self.kernel.shape[0].value,
kernel=tf.transpose(self.kernel),
activation=activation,
**kargs)
if trainable:
transpose.add_weight(self.kernel)
return transpose
| from __future__ import division
from __future__ import print_function
import typing
import collections
import tensorflow as tf
from . import core
from .core import exceptions
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Args:
input_length (int): integer.
filter_size (int): integer.
padding (str): one of "same", "valid", "full", "causal"
stride (int): integer.
dilation (int): dilation rate, integer.
Returns:
int: the output length.
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
@core.create_init_docstring
class Conv2DLayer(core.Layer):
@core.InputArgument
def kernel_size(self, value):
'''Size of the convolution kernels. Must be a tuple/list of two
elements (height, width)
'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self, 'kernel_size')
if isinstance(value, collections.Iterable):
assert len(value) == 2, 'kernel_size must have a length of 2'
if isinstance(value, int):
value = [value, value]
return value
@core.InputArgument
def strides(self, value):
'''Convolution strides. Default is (1, 1).'''
if value is None:
value = (1, 1)
if isinstance(value, collections.Iterable):
assert len(value) == 2, 'strides must have a length of 2'
return value
@core.InputArgument
def input_shape(self, value):
'''Input tensor shape: (n_samples, n_rows, n_cols, n_channels).'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if len(value) != 4:
raise ValueError('input_shape must specify four values: '
'(n_samples, n_rows, n_cols, n_channels)')
if not isinstance(value, tf.TensorShape):
value = tf.TensorShape(value)
return value
@core.InputArgument
def filters(self, value):
'''Number of filters (int), equal to the number of output maps.'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, int):
raise TypeError('filters must be an integer')
return value
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['kernel_size', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.kernel_size[0], self.kernel_size[1],
self.input_shape[-1].value, self.filters],
trainable=trainable,
**kargs)
def _tdl_check_kwargs(self, kwargs):
if ('bias' in kwargs and 'use_bias' in kwargs):
raise ValueError('bias and use_bias cannot be specified at the '
'same time')
return
@core.ParameterInit(lazzy=True)
def bias(self, initializer=None, trainable=True, use_bias=True, **kargs):
core.assert_initialized(self, 'bias', ['filters'])
core.assert_initialized_if_available(self, 'bias', ['use_bias'])
if core.is_property_initialized(self, 'use_bias'):
use_bias = (use_bias and self.use_bias)
if use_bias is False:
return None
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(
name='bias',
initializer=initializer,
shape=[self.filters],
trainable=trainable,
**kargs)
@core.InputArgument
def use_bias(self, value: typing.Union[bool, None]):
core.assert_initialized_if_available(
self, 'use_bias', ['bias', 'filters'])
if value is None:
if core.is_property_initialized(self, 'bias'):
value = self.bias is not None
else:
value = True
assert isinstance(value, bool), 'use_bias should be bool'
if value is True:
if core.is_property_initialized(self, 'bias'):
assert self.bias is not None, \
'use_bias is True, but bias was set to None'
if value is False:
if core.is_property_initialized(self, 'bias'):
assert self.bias is None, \
'use_bias is False, but bias was not set to None'
return value
@core.InputArgument
def padding(self, value):
"""Padding for the convolution. It could be either 'valid' or 'same'.
Default is 'valid'.
"""
if value is None:
value = 'valid'
assert value in ('valid', 'same'),\
'padding should be either same or valid'
return value
@core.InputArgument
def dilation_rate(self, value):
'''Defaults to (1, 1).'''
if value is None:
value = (1, 1)
if isinstance(value, int):
value = (value, value)
if not (isinstance(value, collections.Iterable) and len(value) == 2):
raise ValueError('dilation_rate must be an iterable of length 2')
value = tuple((v if isinstance(v, int) else int(v))
for v in value)
return value
def compute_output_shape(self, input_shape=None):
if input_shape is None:
core.assert_initialized(
self, 'copute_output_shape',
['input_shape', 'kernel_size', 'padding', 'strides',
'dilation_rate'])
input_shape = self.input_shape
input_shape = tf.TensorShape(input_shape).as_list()
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tf.TensorShape([input_shape[0]] + new_space + [self.filters])
def call(self, inputs, *args, **kargs):
inputs = tf.convert_to_tensor(inputs)
if not core.is_property_initialized(self, 'input_shape'):
self.input_shape = inputs.shape
core.assert_initialized(
self, 'call', ['kernel', 'bias', 'strides', 'padding'])
conv = tf.nn.conv2d(
inputs, self.kernel,
strides=[1, self.strides[0], self.strides[1], 1],
padding=self.padding.upper(),
dilations=[1, self.dilation_rate[0], self.dilation_rate[1], 1])
if self.bias is not None:
conv = conv + self.bias
return conv
@core.create_init_docstring
class Conv2DTranspose(Conv2DLayer):
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['kernel_size', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.kernel_size[0], self.kernel_size[1],
self.filters, self.input_shape[-1].value],
trainable=trainable,
**kargs)
@core.InputArgument
def output_padding(self, value):
if isinstance(value, (list, tuple)):
assert len(value) == 2, 'kernel_size must have a length of 2'
else:
value = (value, value)
return value
@staticmethod
def transpose_output_lenght(
input_length, filter_size, padding,
output_padding=None, stride=0, dilation=1):
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
def _compute_output_shape(self, input_shape):
# bypass eager iterable error
batch, height, width, depth = [input_shape[i] for i in range(4)]
if self.output_padding is None:
output_padding = (None, None)
else:
output_padding = self.output_padding
new_h = self.transpose_output_lenght(
height, self.kernel_size[0], padding=self.padding,
output_padding=output_padding[0], stride=self.strides[0],
dilation=self.dilation_rate[0])
new_w = self.transpose_output_lenght(
width, self.kernel_size[1], padding=self.padding,
output_padding=output_padding[1], stride=self.strides[1],
dilation=self.dilation_rate[1])
return (batch, new_h, new_w, self.filters)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
assert input_shape.ndims == 4, 'provided shape is not four dimensional'
input_shape = input_shape.as_list()
return tf.TensorShape(self._compute_output_shape(input_shape))
def call(self, inputs, *args, **kargs):
inputs = tf.convert_to_tensor(inputs)
if not core.is_property_initialized(self, 'input_shape'):
self.input_shape = inputs.shape
core.assert_initialized(
self, 'call', ['kernel', 'bias', 'strides', 'padding'])
output_shape = self._compute_output_shape(tf.shape(inputs))
output_shape = tf.stack(output_shape)
conv = tf.keras.backend.conv2d_transpose(
inputs,
self.kernel, # tf.transpose(self.kernel, perm=[0, 1, 3, 2]),
output_shape,
strides=tuple(self.strides),
padding=self.padding,
dilation_rate=self.dilation_rate
)
if self.bias is not None:
conv = conv + self.bias
# output fix shape
if inputs.shape[1:].is_fully_defined():
conv.set_shape(self._compute_output_shape(inputs.shape))
return conv
@core.create_init_docstring
class Conv1x1Proj(core.Layer):
@core.InputArgument
def units(self, value: int):
'''Number of output units (int).'''
if value is None:
raise core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, int):
raise TypeError('units must be an integer')
return value
@core.ParameterInit(lazzy=True)
def kernel(self, initializer=None, trainable=True, **kargs):
core.assert_initialized(
self, 'kernel', ['units', 'input_shape'])
if initializer is None:
initializer = tf.keras.initializers.glorot_uniform()
return self.add_weight(
name='kernel',
initializer=initializer,
shape=[self.input_shape[-1].value, self.units],
trainable=trainable,
**kargs)
@core.ParameterInit(lazzy=True)
def bias(self, initializer=None, trainable=True, use_bias=True, **kargs):
core.assert_initialized(self, 'bias', ['units', 'use_bias'])
if (use_bias and self.use_bias) is False:
return None
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(
name='bias',
initializer=initializer,
shape=[self.units],
trainable=trainable,
**kargs)
@core.InputArgument
def use_bias(self, value: typing.Union[bool, None]):
core.assert_initialized_if_available(self, 'use_bias', ['bias'])
if value is None:
if core.is_property_initialized(self, 'bias'):
value = self.bias is not None
else:
value = True
assert isinstance(value, bool), 'use_bias should be bool'
if value is True:
if core.is_property_initialized(self, 'bias'):
assert self.bias is not None, \
'use_bias is True, but bias was set to None'
if value is False:
if core.is_property_initialized(self, 'bias'):
assert self.bias is None, \
'use_bias is False, but bias was not set to None'
return value
@core.InputArgument
def activation(self, value):
return value
@core.Submodel
def _linop(self, _):
core.assert_initialized(self, '_linop', ['kernel'])
return tf.linalg.LinearOperatorFullMatrix(self.kernel)
def compute_output_shape(self, input_shape=None):
input_shape = tf.TensorShape(input_shape)
output_shape = input_shape[:-1]
return output_shape.concatenate(self.units)
def call(self, inputs):
output = self._linop.matvec(inputs, adjoint=True)
if self.bias is not None:
output = output + self.bias
if self.activation is not None:
output = self.activation(output)
return output
def get_transpose(self, use_bias=None, activation=None, trainable=True):
core.assert_initialized(
self, 'get_transpose', ['kernel', 'bias', 'activation'])
kargs = dict()
if use_bias is False or self.bias is None:
kargs['bias'] = None
transpose = type(self)(
units=self.kernel.shape[0].value,
kernel=tf.transpose(self.kernel),
activation=activation,
**kargs)
if trainable:
transpose.add_weight(self.kernel)
return transpose | en | 0.563273 | Determines output length of a convolution given input length.
Args:
input_length (int): integer.
filter_size (int): integer.
padding (str): one of "same", "valid", "full", "causal"
stride (int): integer.
dilation (int): dilation rate, integer.
Returns:
int: the output length. Size of the convolution kernels. Must be a tuple/list of two
elements (height, width) Convolution strides. Default is (1, 1). Input tensor shape: (n_samples, n_rows, n_cols, n_channels). Number of filters (int), equal to the number of output maps. Padding for the convolution. It could be either 'valid' or 'same'.
Default is 'valid'. Defaults to (1, 1). # Get the dilated kernel size # Infer length if output padding is None, else compute the exact length # bypass eager iterable error # tf.transpose(self.kernel, perm=[0, 1, 3, 2]), # output fix shape Number of output units (int). | 2.650937 | 3 |
diventi/feedbacks/migrations/0020_auto_20181016_0827.py | flavoi/diven | 2 | 6618955 | <reponame>flavoi/diven
# Generated by Django 2.0.8 on 2018-10-16 06:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0019_auto_20181016_0824'),
]
operations = [
migrations.RemoveField(
model_name='survey',
name='image',
),
migrations.RemoveField(
model_name='survey',
name='label',
),
migrations.RemoveField(
model_name='survey',
name='label_en',
),
migrations.RemoveField(
model_name='survey',
name='label_it',
),
]
| # Generated by Django 2.0.8 on 2018-10-16 06:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0019_auto_20181016_0824'),
]
operations = [
migrations.RemoveField(
model_name='survey',
name='image',
),
migrations.RemoveField(
model_name='survey',
name='label',
),
migrations.RemoveField(
model_name='survey',
name='label_en',
),
migrations.RemoveField(
model_name='survey',
name='label_it',
),
] | en | 0.812313 | # Generated by Django 2.0.8 on 2018-10-16 06:27 | 1.403685 | 1 |
D1C1/run.py | RFC1928/AOC2020 | 0 | 6618956 | # Find the product of any 2 numbers in the input that sum up to 2020
f = open("input.txt","r")
numlist = []
for x in f:
numlist.append(x.replace('\n',''))
for x in numlist:
for y in numlist:
if (int(x)+int(y)==2020):
print(" X:"+x+"; Y:"+y+"; X+Y="+str(int(x)+int(y))+"; X*Y="+str(int(x)*int(y))) | # Find the product of any 2 numbers in the input that sum up to 2020
f = open("input.txt","r")
numlist = []
for x in f:
numlist.append(x.replace('\n',''))
for x in numlist:
for y in numlist:
if (int(x)+int(y)==2020):
print(" X:"+x+"; Y:"+y+"; X+Y="+str(int(x)+int(y))+"; X*Y="+str(int(x)*int(y))) | en | 0.861603 | # Find the product of any 2 numbers in the input that sum up to 2020 | 3.673937 | 4 |
tests/test_plotting.py | mramospe/hepspt | 0 | 6618957 | <filename>tests/test_plotting.py
'''
Test functions for the "plotting" module.
'''
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
import hep_spt
import matplotlib
import numpy as np
import os
import pytest
def test_available_styles():
'''
Test for the function "available_styles".
'''
styles = {'default', 'singleplot', 'multiplot'}
assert len(set(hep_spt.available_styles()) - styles) == 0
def test_corr_hist2d():
'''
Test for the "corr_hist2d" function.
'''
matrix = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
hep_spt.corr_hist2d(matrix, ['a', 'b', 'c'])
def test_line_style():
'''
Test for the "line_style" function.
'''
for name in hep_spt.plotting.LINE_STYLES:
matplotlib.lines.Line2D([], [], ls=hep_spt.line_style(name))
with pytest.raises(KeyError):
hep_spt.line_style('unknown style')
def test_modified_format():
'''
Test for the "modified_format" function.
'''
prev = matplotlib.rcParams['font.size']
with hep_spt.modified_format({'font.size': 10}):
assert matplotlib.rcParams['font.size'] == 10
assert matplotlib.rcParams['font.size'] == prev
def test_opt_fig_div():
'''
Test for the "opt_fig_div" function.
'''
assert hep_spt.opt_fig_div(4) == (2, 2)
assert hep_spt.opt_fig_div(9) == (3, 3)
assert hep_spt.opt_fig_div(5) == (2, 3)
assert hep_spt.opt_fig_div(6) == (2, 3)
def test_path_to_styles():
'''
Test for the function "path_to_styles".
'''
path = hep_spt.path_to_styles()
s = set(map(lambda s: s[:s.find('.mplstyle')], os.listdir(path)))
assert len(s - set(hep_spt.available_styles())) == 0
def test_samples_cycler():
'''
Test for the function "test_samples_cycler".
'''
cfg = {
'K': 'k',
'W': 'w',
'R': 'r',
'Y': 'y',
'G': 'g',
'C': 'c',
'B': 'b',
'M': 'm',
}
# Construct a cycler
cyc = hep_spt.samples_cycler(cfg.keys(), ls=cfg.values())
for c in cyc:
c['ls'] == cfg[c['label']]
# Check that a warning is displayed when the number of samples is
# greater than the number of styles. The check is done considering
# that the number of samples is a multiple and non-multiple of the
# number of styles.
with pytest.warns(RuntimeWarning):
ls = list(sorted(cfg.values())[:5])
cyc = hep_spt.samples_cycler(cfg.keys(), ls=ls)
assert len(cyc) == len(cfg)
cyc_ls = list(c['ls'] for c in cyc)
assert ls + ls[:3] == cyc_ls
with pytest.warns(RuntimeWarning):
ls = list(sorted(cfg.values())[:4])
cyc = hep_spt.samples_cycler(cfg.keys(), ls=ls)
assert len(cyc) == len(cfg)
cyc_ls = list(c['ls'] for c in cyc)
assert 2*ls == cyc_ls
def test_set_style():
'''
Test for the "set_style" function.
'''
for s in hep_spt.available_styles():
hep_spt.set_style(s)
def test_text_in_rectangles():
'''
Test the "text_in_rectangles" function.
'''
smp = np.array([
np.array([0., 0., 1., 1.]),
np.array([0., 1., 0., 1.])
]).T
weights = np.array([2, 1, 2, 1])
nbins = 2
bins = hep_spt.adbin_hist(smp, nbins)
recs, conts = hep_spt.adbin_hist2d_rectangles(bins, smp)
hep_spt.text_in_rectangles(recs, map(str, conts))
| <filename>tests/test_plotting.py
'''
Test functions for the "plotting" module.
'''
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
import hep_spt
import matplotlib
import numpy as np
import os
import pytest
def test_available_styles():
'''
Test for the function "available_styles".
'''
styles = {'default', 'singleplot', 'multiplot'}
assert len(set(hep_spt.available_styles()) - styles) == 0
def test_corr_hist2d():
'''
Test for the "corr_hist2d" function.
'''
matrix = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
hep_spt.corr_hist2d(matrix, ['a', 'b', 'c'])
def test_line_style():
'''
Test for the "line_style" function.
'''
for name in hep_spt.plotting.LINE_STYLES:
matplotlib.lines.Line2D([], [], ls=hep_spt.line_style(name))
with pytest.raises(KeyError):
hep_spt.line_style('unknown style')
def test_modified_format():
'''
Test for the "modified_format" function.
'''
prev = matplotlib.rcParams['font.size']
with hep_spt.modified_format({'font.size': 10}):
assert matplotlib.rcParams['font.size'] == 10
assert matplotlib.rcParams['font.size'] == prev
def test_opt_fig_div():
'''
Test for the "opt_fig_div" function.
'''
assert hep_spt.opt_fig_div(4) == (2, 2)
assert hep_spt.opt_fig_div(9) == (3, 3)
assert hep_spt.opt_fig_div(5) == (2, 3)
assert hep_spt.opt_fig_div(6) == (2, 3)
def test_path_to_styles():
'''
Test for the function "path_to_styles".
'''
path = hep_spt.path_to_styles()
s = set(map(lambda s: s[:s.find('.mplstyle')], os.listdir(path)))
assert len(s - set(hep_spt.available_styles())) == 0
def test_samples_cycler():
'''
Test for the function "test_samples_cycler".
'''
cfg = {
'K': 'k',
'W': 'w',
'R': 'r',
'Y': 'y',
'G': 'g',
'C': 'c',
'B': 'b',
'M': 'm',
}
# Construct a cycler
cyc = hep_spt.samples_cycler(cfg.keys(), ls=cfg.values())
for c in cyc:
c['ls'] == cfg[c['label']]
# Check that a warning is displayed when the number of samples is
# greater than the number of styles. The check is done considering
# that the number of samples is a multiple and non-multiple of the
# number of styles.
with pytest.warns(RuntimeWarning):
ls = list(sorted(cfg.values())[:5])
cyc = hep_spt.samples_cycler(cfg.keys(), ls=ls)
assert len(cyc) == len(cfg)
cyc_ls = list(c['ls'] for c in cyc)
assert ls + ls[:3] == cyc_ls
with pytest.warns(RuntimeWarning):
ls = list(sorted(cfg.values())[:4])
cyc = hep_spt.samples_cycler(cfg.keys(), ls=ls)
assert len(cyc) == len(cfg)
cyc_ls = list(c['ls'] for c in cyc)
assert 2*ls == cyc_ls
def test_set_style():
'''
Test for the "set_style" function.
'''
for s in hep_spt.available_styles():
hep_spt.set_style(s)
def test_text_in_rectangles():
'''
Test the "text_in_rectangles" function.
'''
smp = np.array([
np.array([0., 0., 1., 1.]),
np.array([0., 1., 0., 1.])
]).T
weights = np.array([2, 1, 2, 1])
nbins = 2
bins = hep_spt.adbin_hist(smp, nbins)
recs, conts = hep_spt.adbin_hist2d_rectangles(bins, smp)
hep_spt.text_in_rectangles(recs, map(str, conts))
| en | 0.692753 | Test functions for the "plotting" module. Test for the function "available_styles". Test for the "corr_hist2d" function. Test for the "line_style" function. Test for the "modified_format" function. Test for the "opt_fig_div" function. Test for the function "path_to_styles". Test for the function "test_samples_cycler". # Construct a cycler # Check that a warning is displayed when the number of samples is # greater than the number of styles. The check is done considering # that the number of samples is a multiple and non-multiple of the # number of styles. Test for the "set_style" function. Test the "text_in_rectangles" function. | 2.547909 | 3 |
Aula 12.7 - Exercício 42 - Analisando Triângulos v2.0/ex042_.py | Guilherme-Artigas/Python-intermediario | 0 | 6618958 | L1 = float(input('Digite o 1º lado: '))
L2 = float(input('Digite o 2º lado: '))
L3 = float(input('Digite o 3º lado: '))
if L1 < L2 + L3 and L2 < L1 + L3 and L3 < L1 + L2:
print('Legal Podemos formar um triângulo com os valores digitados!')
print('... mais esse triângulo é EQUILÁTERO, ESCALENO ou ISÓCELES?')
print()
if L1 == L2 and L1 == L3 and L2 == L3:
print('EQUILÁTERO: todos os lados iguais.')
elif L1 != L2 and L1 != L3 and L2 != L3:
print('ESCALENO: todos os lados são diferentes.')
elif L1 == L2 and L1 != L3 or L1 == L3 and L1 != L2 or L2 == L3 and L2 != L1:
print('ISÓCELES: dois lados iguais.')
else:
print('Infelizmente não podemos formar um triângulo com os valores informados!')
| L1 = float(input('Digite o 1º lado: '))
L2 = float(input('Digite o 2º lado: '))
L3 = float(input('Digite o 3º lado: '))
if L1 < L2 + L3 and L2 < L1 + L3 and L3 < L1 + L2:
print('Legal Podemos formar um triângulo com os valores digitados!')
print('... mais esse triângulo é EQUILÁTERO, ESCALENO ou ISÓCELES?')
print()
if L1 == L2 and L1 == L3 and L2 == L3:
print('EQUILÁTERO: todos os lados iguais.')
elif L1 != L2 and L1 != L3 and L2 != L3:
print('ESCALENO: todos os lados são diferentes.')
elif L1 == L2 and L1 != L3 or L1 == L3 and L1 != L2 or L2 == L3 and L2 != L1:
print('ISÓCELES: dois lados iguais.')
else:
print('Infelizmente não podemos formar um triângulo com os valores informados!')
| none | 1 | 4.056262 | 4 | |
Source/ARATesting.py | SummerSad/Lab01-PathFinding | 5 | 6618959 | import sys
import Heuristic
import RandomProblem
import SolveProblem
def main():
# auto random file if no input
if len(sys.argv) != 4:
RandomProblem.createRandomProblem('rand_in.txt', 8, 16)
pf = SolveProblem.ARA('rand_in.txt', 'rand_log.txt', 3,
Heuristic.EuclidDistance, 5)
pf.writeSolution('rand_out.txt')
else:
pf = SolveProblem.ARA(sys.argv[1], 'ARA_log.txt', 3,
Heuristic.EuclidDistance, int(sys.argv[3]))
pf.writeSolution(sys.argv[2])
if __name__ == '__main__':
main() | import sys
import Heuristic
import RandomProblem
import SolveProblem
def main():
# auto random file if no input
if len(sys.argv) != 4:
RandomProblem.createRandomProblem('rand_in.txt', 8, 16)
pf = SolveProblem.ARA('rand_in.txt', 'rand_log.txt', 3,
Heuristic.EuclidDistance, 5)
pf.writeSolution('rand_out.txt')
else:
pf = SolveProblem.ARA(sys.argv[1], 'ARA_log.txt', 3,
Heuristic.EuclidDistance, int(sys.argv[3]))
pf.writeSolution(sys.argv[2])
if __name__ == '__main__':
main() | en | 0.122138 | # auto random file if no input | 2.858577 | 3 |
basic/type_conversion.py | sanikamal/awesome-python-examples | 1 | 6618960 | """
Created by <NAME>
"""
var1 = 4;
number2 = "4.0"
print (number2)
converted = float(var1)
convertint = int(number2)
num3 = var1 + converted;
print (num3)
| """
Created by <NAME>
"""
var1 = 4;
number2 = "4.0"
print (number2)
converted = float(var1)
convertint = int(number2)
num3 = var1 + converted;
print (num3)
| en | 0.977018 | Created by <NAME> | 3.253623 | 3 |
controllers/success.py | gideontong/Humingbird | 1 | 6618961 | <filename>controllers/success.py
from flask import Blueprint, render_template, abort, request
import csvparser
from subprocess import Popen
success = Blueprint('success', __name__,
template_folder='templates')
@success.route('/success', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('uploads/' + f.filename)
Popen(['python', 'lib/dataHandler.py', 'uploads/'+ f.filename])
return render_template('forms/success.html', name = f.filename)
| <filename>controllers/success.py
from flask import Blueprint, render_template, abort, request
import csvparser
from subprocess import Popen
success = Blueprint('success', __name__,
template_folder='templates')
@success.route('/success', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('uploads/' + f.filename)
Popen(['python', 'lib/dataHandler.py', 'uploads/'+ f.filename])
return render_template('forms/success.html', name = f.filename)
| none | 1 | 2.226346 | 2 | |
bob/learn/em/test/test_plda.py | bioidiap/bob.learn.em | 6 | 6618962 | <filename>bob/learn/em/test/test_plda.py<gh_stars>1-10
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
# Sat Oct 22 23:01:09 2011 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests PLDA machine
"""
import numpy
import os
import tempfile
import nose.tools
import math
import bob.io.base
from bob.learn.em import PLDABase, PLDAMachine
# Defines common variables globally
# Dimensionalities
C_dim_d = 7
C_dim_f = 2
C_dim_g = 3
# Values for F and G
C_G=numpy.array([-1.1424, -0.5044, -0.1917,
-0.6249, 0.1021, -0.8658,
-1.1687, 1.1963, 0.1807,
0.3926, 0.1203, 1.2665,
1.3018, -1.0368, -0.2512,
-0.5936, -0.8571, -0.2046,
0.4364, -0.1699, -2.2015], 'float64').reshape(C_dim_d, C_dim_g)
# F <-> PCA on G
C_F=numpy.array([-0.054222647972093, -0.000000000783146,
0.596449127693018, 0.000000006265167,
0.298224563846509, 0.000000003132583,
0.447336845769764, 0.000000009397750,
-0.108445295944185, -0.000000001566292,
-0.501559493741856, -0.000000006265167,
-0.298224563846509, -0.000000003132583], 'float64').reshape(C_dim_d, C_dim_f)
def equals(x, y, epsilon):
return (abs(x - y) < epsilon).all()
def compute_i_sigma(sigma):
# Inverse of a diagonal matrix (represented by a 1D numpy array)
return (1. / sigma)
def compute_alpha(G, sigma):
# alpha = (Id + G^T.sigma^-1.G)^-1 = \mathcal{G}
dim_g = G.shape[1]
isigma = numpy.diag(compute_i_sigma(sigma))
return numpy.linalg.inv(numpy.eye(dim_g) + numpy.dot(numpy.dot(G.transpose(), isigma), G))
def compute_beta(G, sigma):
# beta = (sigma + G.G^T)^-1 = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1 = \mathcal{S}
isigma = numpy.diag(compute_i_sigma(sigma))
gt_isigma = numpy.dot(G.transpose(), isigma)
alpha = compute_alpha(G, sigma)
return (isigma - numpy.dot(numpy.dot(gt_isigma.transpose(), alpha), gt_isigma))
def compute_gamma(F, G, sigma, a):
# gamma_a = (Id + a.F^T.beta.F)^-1 = \mathcal{F}_{a}
dim_f = F.shape[1]
beta = compute_beta(G, sigma)
return numpy.linalg.inv(numpy.eye(dim_f) + a * numpy.dot(numpy.dot(F.transpose(), beta), F))
def compute_ft_beta(F, G, sigma):
# F^T.beta = F^T.\mathcal{S}
beta = compute_beta(G, sigma)
return numpy.dot(numpy.transpose(F), beta)
def compute_gt_i_sigma(G, sigma):
# G^T.sigma^-1
isigma = compute_i_sigma(sigma)
return numpy.transpose(G) * isigma
def compute_logdet_alpha(G, sigma):
# \log(\det(\alpha)) = \log(\det(\mathcal{G}))
alpha = compute_alpha(G, sigma)
return math.log(numpy.linalg.det(alpha))
def compute_logdet_sigma(sigma):
# \log(\det(\sigma)) = \log(\det(\sigma)) = \log(\prod(\sigma_{i}))
return math.log(numpy.prod(sigma))
def compute_loglike_constterm(F, G, sigma, a):
# loglike_constterm[a] = a/2 * ( -D*\log(2*pi) -\log|\sigma| +\log|\alpha| +\log|\gamma_a|)
gamma_a = compute_gamma(F, G, sigma, a)
logdet_gamma_a = math.log(abs(numpy.linalg.det(gamma_a)))
ah = a/2.
dim_d = F.shape[0]
logdet_sigma = compute_logdet_sigma(sigma)
logdet_alpha = compute_logdet_alpha(G, sigma)
res = -ah*dim_d*math.log(2*math.pi) - ah*logdet_sigma + ah*logdet_alpha + logdet_gamma_a/2.
return res;
def compute_log_likelihood_point_estimate(observation, mu, F, G, sigma, hi, wij):
"""
This function computes p(x_{ij} | h_{i}, w_{ij}, \Theta), which is given by
N_{x}[\mu + Fh_{i} + Gw_{ij} + epsilon_{ij}, \Sigma], N_{x} being a
Gaussian distribution. As it returns the corresponding log likelihood,
this is given by the sum of the following three terms:
C1 = -dim_d/2 log(2pi)
C2 = -1/2 log(det(\Sigma))
C3 = -1/2 (x_{ij}-\mu-Fh_{i}-Gw_{ij})^{T}\Sigma^{-1}(x_{ij}-\mu-Fh_{i}-Gw_{ij})
"""
### Pre-computes some of the constants
dim_d = observation.shape[0] # A scalar
log_2pi = numpy.log(2. * numpy.pi) # A scalar
C1 = -(dim_d / 2.) * log_2pi # A scalar
C2 = -(1. / 2.) * numpy.sum( numpy.log(sigma) ) # (dim_d, 1)
### Subtract the identity and session components from the observed vector.
session_plus_identity = numpy.dot(F, hi) + numpy.dot(G, wij)
normalised_observation = numpy.reshape(observation - mu - session_plus_identity, (dim_d,1))
### Now calculate C3
sigma_inverse = numpy.reshape(1. / sigma, (dim_d,1)) # (dim_d, 1)
C3 = -(1. / 2.) * numpy.sum(normalised_observation * sigma_inverse * normalised_observation)
### Returns the log likelihood
log_likelihood = C1 + C2 + C3
return (log_likelihood)
def compute_log_likelihood(observations, mu, F, G, sigma):
"""
This function computes the log-likelihood of the observations given the parameters
of the PLDA model. This is done by fulling integrating out the latent variables.
"""
# Work out the number of samples that we have and normalise the data.
J_i = observations.shape[0]; # An integer > 0
norm_observations = observations - numpy.tile(mu, [J_i,1]); # (J_i, D_x)
# There are three terms that need to be computed: C1, C2 and C3
# 1. Computes C1
# C1 = - J_{i} * dim_d/2 log(2*pi)
dim_d = observations.shape[1] # A scalar
dim_f = F.shape[1]
log_2pi = numpy.log(2. * numpy.pi); # A scalar
C1 = - J_i * (dim_d / 2.) * log_2pi; # A scalar
# 2. Computes C2
# C2 = - J_i/2 * [log(det(sigma)) - log(det(alpha^-1))] + log(det(gamma_{J_i}))/2
ld_sigma = compute_logdet_sigma(sigma)
ld_alpha = compute_logdet_alpha(G, sigma)
gamma = compute_gamma(F, G, sigma, J_i)
ld_gamma = math.log(numpy.linalg.det(gamma))
C2 = - J_i/2.*(ld_sigma - ld_alpha) + ld_gamma/2.
# 3. Computes C3
# This is a quadratic part and consists of
# C3 = -0.5 * sum x^T beta x + 0.5 * Quadratic term in x
# C3 = -0.5 * (C3a - C3b)
C3a = 0.0;
C3b_sum_part = numpy.zeros((dim_f,1));
isigma = numpy.diag(compute_i_sigma(sigma))
beta = compute_beta(G, sigma)
ft_beta = numpy.dot(numpy.transpose(F), beta)
for j in range(0, J_i):
### Calculations for C3a
current_vector = numpy.reshape(norm_observations[j,:], (dim_d,1)); # (D_x, 1)
vector_E = numpy.dot(beta, current_vector); # (D_x, 1)
current_result = numpy.dot(current_vector.transpose(), vector_E); # A floating point value
C3a = C3a + current_result[0][0]; # A floating point value
### Calculations for C3b
C3b_sum_part = C3b_sum_part + numpy.dot(ft_beta, current_vector); # (nf, 1)
### Final calculations for C3b, using the matrix gamma_{J_i}
C3b = numpy.dot(numpy.dot(C3b_sum_part.transpose(), gamma), C3b_sum_part);
C3 = -0.5 * (C3a - C3b[0][0]);
return C1 + C2 + C3
def test_plda_basemachine():
# Data used for performing the tests
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines reference results based on matlab
alpha_ref = numpy.array([ 0.002189051545735, 0.001127099941432,
-0.000145483208153, 0.001127099941432, 0.003549267943741,
-0.000552001405453, -0.000145483208153, -0.000552001405453,
0.001440505362615], 'float64').reshape(C_dim_g, C_dim_g)
beta_ref = numpy.array([ 50.587191765140361, -14.512478352504877,
-0.294799164567830, 13.382002504394316, 9.202063877660278,
-43.182264846086497, 11.932345916716455, -14.512478352504878,
82.320149045633045, -12.605578822979698, 19.618675892079366,
13.033691341150439, -8.004874490989799, -21.547363307109187,
-0.294799164567832, -12.605578822979696, 52.123885798398241,
4.363739008635009, 44.847177605628545, 16.438137537463710,
5.137421840557050, 13.382002504394316, 19.618675892079366,
4.363739008635011, 75.070401560513488, -4.515472972526140,
9.752862741017488, 34.196127678931106, 9.202063877660285,
13.033691341150439, 44.847177605628552, -4.515472972526142,
56.189416227691098, -7.536676357632515, -10.555735414707383,
-43.182264846086497, -8.004874490989799, 16.438137537463703,
9.752862741017490, -7.536676357632518, 56.430571485722126,
9.471758169835317, 11.932345916716461, -21.547363307109187,
5.137421840557051, 34.196127678931099, -10.555735414707385,
9.471758169835320, 27.996266602110637], 'float64').reshape(C_dim_d, C_dim_d)
gamma3_ref = numpy.array([ 0.005318799462241, -0.000000012993151,
-0.000000012993151, 0.999999999999996], 'float64').reshape(C_dim_f, C_dim_f)
# Constructor tests
#m = PLDABase()
#assert m.dim_d == 0
#assert m.dim_f == 0
#assert m.dim_g == 0
#del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
assert m.shape[0] == C_dim_d
assert m.shape[1] == C_dim_f
assert m.shape[2] == C_dim_g
assert abs(m.variance_threshold - 0.) < 1e-10
del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g, 1e-2)
assert m.shape[0] == C_dim_d
assert m.shape[1] == C_dim_f
assert m.shape[2] == C_dim_g
assert abs(m.variance_threshold - 1e-2) < 1e-10
del m
# Defines base machine
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
#m.resize(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Compares precomputed values to matlab reference
for ii in range(m.__alpha__.shape[0]):
for jj in range(m.__alpha__.shape[1]):
absdiff = abs(m.__alpha__[ii,jj]- alpha_ref[ii,jj])
assert absdiff < 1e-10, 'PLDABase alpha matrix does not match reference at (%d,%d) to 10^-10: |%g-%g| = %g' % (ii, jj, m.__alpha__[ii,jj], alpha_ref[ii,jj], absdiff)
assert equals(m.__alpha__, alpha_ref, 1e-10)
assert equals(m.__beta__, beta_ref, 1e-10)
assert equals(gamma3, gamma3_ref, 1e-10)
# Compares precomputed values to the ones returned by python implementation
assert equals(m.__isigma__, compute_i_sigma(sigma), 1e-10)
assert equals(m.__alpha__, compute_alpha(C_G,sigma), 1e-10)
assert equals(m.__beta__, compute_beta(C_G,sigma), 1e-10)
assert equals(m.get_add_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
assert m.has_gamma(3)
assert equals(m.get_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
assert equals(m.__ft_beta__, compute_ft_beta(C_F,C_G,sigma), 1e-10)
assert equals(m.__gt_i_sigma__, compute_gt_i_sigma(C_G,sigma), 1e-10)
assert math.fabs(m.__logdet_alpha__ - compute_logdet_alpha(C_G,sigma)) < 1e-10
assert math.fabs(m.__logdet_sigma__ - compute_logdet_sigma(sigma)) < 1e-10
assert abs(m.get_add_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
assert m.has_log_like_const_term(3)
assert abs(m.get_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
# Defines base machine
del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Compares precomputed values to matlab reference
assert equals(m.__alpha__, alpha_ref, 1e-10)
assert equals(m.__beta__, beta_ref, 1e-10)
assert equals(gamma3, gamma3_ref, 1e-10)
# values before being saved
isigma = m.__isigma__.copy()
alpha = m.__alpha__.copy()
beta = m.__beta__.copy()
FtBeta = m.__ft_beta__.copy()
GtISigma = m.__gt_i_sigma__.copy()
logdetAlpha = m.__logdet_alpha__
logdetSigma = m.__logdet_sigma__
# Saves to file, loads and compares to original
filename = str(tempfile.mkstemp(".hdf5")[1])
m.save(bob.io.base.HDF5File(filename, 'w'))
m_loaded = PLDABase(bob.io.base.HDF5File(filename))
# Compares the values loaded with the former ones
assert m_loaded == m
assert (m_loaded != m) is False
assert equals(m_loaded.mu, mu, 1e-10)
assert equals(m_loaded.f, C_F, 1e-10)
assert equals(m_loaded.g, C_G, 1e-10)
assert equals(m_loaded.sigma, sigma, 1e-10)
assert equals(m_loaded.__isigma__, isigma, 1e-10)
assert equals(m_loaded.__alpha__, alpha, 1e-10)
assert equals(m_loaded.__beta__, beta, 1e-10)
assert equals(m_loaded.__ft_beta__, FtBeta, 1e-10)
assert equals(m_loaded.__gt_i_sigma__, GtISigma, 1e-10)
assert abs(m_loaded.__logdet_alpha__ - logdetAlpha) < 1e-10
assert abs(m_loaded.__logdet_sigma__ - logdetSigma) < 1e-10
assert m_loaded.has_gamma(3)
assert equals(m_loaded.get_gamma(3), gamma3_ref, 1e-10)
assert equals(m_loaded.get_add_gamma(3), gamma3_ref, 1e-10)
assert m_loaded.has_log_like_const_term(3)
assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
# Compares the values loaded with the former ones when copying
m_copy = PLDABase(m_loaded)
assert m_loaded == m_copy
assert (m_loaded != m_copy) is False
# Test clear_maps method
assert m_copy.has_gamma(3)
assert m_copy.has_log_like_const_term(3)
m_copy.clear_maps()
assert (m_copy.has_gamma(3)) is False
assert (m_copy.has_log_like_const_term(3)) is False
# Check variance flooring thresholds-related methods
v_zo = numpy.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
v_zo_ = 0.01
v_zzo = numpy.array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001])
v_zzo_ = 0.001
m_copy.variance_threshold = v_zo_
assert (m_loaded == m_copy) is False
assert m_loaded != m_copy
m_copy.variance_threshold = v_zzo_
m_copy.sigma = v_zo
assert equals(m_copy.sigma, v_zo, 1e-10)
m_copy.variance_threshold = v_zo_
m_copy.sigma = v_zzo
assert equals(m_copy.sigma, v_zo, 1e-10)
m_copy.variance_threshold = v_zzo_
m_copy.sigma = v_zzo
assert equals(m_copy.sigma, v_zzo, 1e-10)
m_copy.variance_threshold = v_zo_
assert equals(m_copy.sigma, v_zo, 1e-10)
# Clean-up
os.unlink(filename)
def test_plda_basemachine_loglikelihood_pointestimate():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
xij = numpy.array([0.7, 1.3, 2.5, 0.3, 1.3, 2.7, 0.9])
hi = numpy.array([-0.5, 0.5])
wij = numpy.array([-0.1, 0.2, 0.3])
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
#assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
log_likelihood_point_estimate = m.compute_log_likelihood_point_estimate(xij, hi, wij)
log_likelihood_point_estimate_python = compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij)
assert equals(log_likelihood_point_estimate, log_likelihood_point_estimate_python, 1e-6)
def test_plda_machine():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines base machine
mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
mb.mu = mu
mb.f = C_F
mb.g = C_G
mb.sigma = sigma
# Test constructors and dim getters
m = PLDAMachine(mb)
assert m.shape[0] == C_dim_d
assert m.shape[1]== C_dim_f
assert m.shape[2] == C_dim_g
m0 = PLDAMachine(mb)
#m0.plda_base = mb
assert m0.shape[0] == C_dim_d
assert m0.shape[1] == C_dim_f
assert m0.shape[2] == C_dim_g
# Defines machine
n_samples = 2
WSumXitBetaXi = 0.37
weightedSum = numpy.array([1.39,0.54], 'float64')
log_likelihood = -0.22
m.n_samples = n_samples
m.w_sum_xit_beta_xi = WSumXitBetaXi
m.weighted_sum = weightedSum
m.log_likelihood = log_likelihood
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Saves to file, loads and compares to original
filename = str(tempfile.mkstemp(".hdf5")[1])
m.save(bob.io.base.HDF5File(filename, 'w'))
m_loaded = PLDAMachine(bob.io.base.HDF5File(filename), mb)
# Compares the values loaded with the former ones
assert m_loaded == m
assert (m_loaded != m) is False
assert abs(m_loaded.n_samples - n_samples) < 1e-10
assert abs(m_loaded.w_sum_xit_beta_xi - WSumXitBetaXi) < 1e-10
assert equals(m_loaded.weighted_sum, weightedSum, 1e-10)
assert abs(m_loaded.log_likelihood - log_likelihood) < 1e-10
assert m_loaded.has_gamma(3)
assert equals(m_loaded.get_add_gamma(3), gamma3, 1e-10)
assert equals(m_loaded.get_gamma(3), gamma3, 1e-10)
assert m_loaded.has_log_like_const_term(3)
assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
assert abs(m_loaded.get_log_like_const_term(3) - constTerm3) < 1e-10
# Test clear_maps method
assert m_loaded.has_gamma(3)
assert m_loaded.has_log_like_const_term(3)
m_loaded.clear_maps()
assert (m_loaded.has_gamma(3)) is False
assert (m_loaded.has_log_like_const_term(3)) is False
# Check exceptions
#m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename))
#m_loaded2.load(bob.io.base.HDF5File(filename))
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape')
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
#nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
#nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
# Clean-up
os.unlink(filename)
def test_plda_machine_log_likelihood_Python():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines base machine
mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
mb.mu = mu
mb.f = C_F
mb.g = C_G
mb.sigma = sigma
# Defines machine
m = PLDAMachine(mb)
# Defines (random) samples and check compute_log_likelihood method
ar_e = numpy.random.randn(2,C_dim_d)
ar_p = numpy.random.randn(C_dim_d)
ar_s = numpy.vstack([ar_e, ar_p])
assert abs(m.compute_log_likelihood(ar_s, False) - compute_log_likelihood(ar_s, mu, C_F, C_G, sigma)) < 1e-10
ar_p2d = numpy.reshape(ar_p, (1,C_dim_d))
a = m.compute_log_likelihood(ar_p, False)
assert abs(m.compute_log_likelihood(ar_p, False) - compute_log_likelihood(ar_p2d, mu, C_F, C_G, sigma)) < 1e-10
# Defines (random) samples and check forward method
ar2_e = numpy.random.randn(4,C_dim_d)
ar2_p = numpy.random.randn(C_dim_d)
ar2_s = numpy.vstack([ar2_e, ar2_p])
m.log_likelihood = m.compute_log_likelihood(ar2_e, False)
llr = m.compute_log_likelihood(ar2_s, True) - (m.compute_log_likelihood(ar2_s, False) + m.log_likelihood)
assert abs(m(ar2_s) - llr) < 1e-10
ar2_p2d = numpy.random.randn(3,C_dim_d)
ar2_s2d = numpy.vstack([ar2_e, ar2_p2d])
llr2d = m.compute_log_likelihood(ar2_s2d, True) - (m.compute_log_likelihood(ar2_s2d, False) + m.log_likelihood)
assert abs(m(ar2_s2d) - llr2d) < 1e-10
def test_plda_machine_log_likelihood_Prince():
# Data used for performing the tests
# Features and subspaces dimensionality
D = 7
nf = 2
ng = 3
# initial values for F, G and sigma
G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-0.6249, 0.1021, -0.8658,
-1.1687, 1.1963, 0.1807,
0.3926, 0.1203, 1.2665,
1.3018, -1.0368, -0.2512,
-0.5936, -0.8571, -0.2046,
0.4364, -0.1699, -2.2015]).reshape(D,ng)
# F <-> PCA on G
F_init=numpy.array([-0.054222647972093, -0.000000000783146,
0.596449127693018, 0.000000006265167,
0.298224563846509, 0.000000003132583,
0.447336845769764, 0.000000009397750,
-0.108445295944185, -0.000000001566292,
-0.501559493741856, -0.000000006265167,
-0.298224563846509, -0.000000003132583]).reshape(D,nf)
sigma_init = 0.01 * numpy.ones((D,), 'float64')
mean_zero = numpy.zeros((D,), 'float64')
# base machine
mb = PLDABase(D,nf,ng)
mb.sigma = sigma_init
mb.g = G_init
mb.f = F_init
mb.mu = mean_zero
# Data for likelihood computation
x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
X = numpy.ndarray((3,D), 'float64')
X[0,:] = x1
X[1,:] = x2
X[2,:] = x3
a = []
a.append(x1)
a.append(x2)
a.append(x3)
a = numpy.array(a)
# reference likelihood from Prince implementation
ll_ref = -182.8880743535197
# machine
m = PLDAMachine(mb)
ll = m.compute_log_likelihood(X)
assert abs(ll - ll_ref) < 1e-10
# log likelihood ratio
Y = numpy.ndarray((2,D), 'float64')
Y[0,:] = x1
Y[1,:] = x2
Z = numpy.ndarray((1,D), 'float64')
Z[0,:] = x3
llX = m.compute_log_likelihood(X)
llY = m.compute_log_likelihood(Y)
llZ = m.compute_log_likelihood(Z)
# reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
# and [x3] separately
llr_ref = -4.43695386675
assert abs((llX - (llY + llZ)) - llr_ref) < 1e-10
| <filename>bob/learn/em/test/test_plda.py<gh_stars>1-10
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
# Sat Oct 22 23:01:09 2011 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests PLDA machine
"""
import numpy
import os
import tempfile
import nose.tools
import math
import bob.io.base
from bob.learn.em import PLDABase, PLDAMachine
# Defines common variables globally
# Dimensionalities
C_dim_d = 7
C_dim_f = 2
C_dim_g = 3
# Values for F and G
C_G=numpy.array([-1.1424, -0.5044, -0.1917,
-0.6249, 0.1021, -0.8658,
-1.1687, 1.1963, 0.1807,
0.3926, 0.1203, 1.2665,
1.3018, -1.0368, -0.2512,
-0.5936, -0.8571, -0.2046,
0.4364, -0.1699, -2.2015], 'float64').reshape(C_dim_d, C_dim_g)
# F <-> PCA on G
C_F=numpy.array([-0.054222647972093, -0.000000000783146,
0.596449127693018, 0.000000006265167,
0.298224563846509, 0.000000003132583,
0.447336845769764, 0.000000009397750,
-0.108445295944185, -0.000000001566292,
-0.501559493741856, -0.000000006265167,
-0.298224563846509, -0.000000003132583], 'float64').reshape(C_dim_d, C_dim_f)
def equals(x, y, epsilon):
return (abs(x - y) < epsilon).all()
def compute_i_sigma(sigma):
# Inverse of a diagonal matrix (represented by a 1D numpy array)
return (1. / sigma)
def compute_alpha(G, sigma):
# alpha = (Id + G^T.sigma^-1.G)^-1 = \mathcal{G}
dim_g = G.shape[1]
isigma = numpy.diag(compute_i_sigma(sigma))
return numpy.linalg.inv(numpy.eye(dim_g) + numpy.dot(numpy.dot(G.transpose(), isigma), G))
def compute_beta(G, sigma):
# beta = (sigma + G.G^T)^-1 = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1 = \mathcal{S}
isigma = numpy.diag(compute_i_sigma(sigma))
gt_isigma = numpy.dot(G.transpose(), isigma)
alpha = compute_alpha(G, sigma)
return (isigma - numpy.dot(numpy.dot(gt_isigma.transpose(), alpha), gt_isigma))
def compute_gamma(F, G, sigma, a):
# gamma_a = (Id + a.F^T.beta.F)^-1 = \mathcal{F}_{a}
dim_f = F.shape[1]
beta = compute_beta(G, sigma)
return numpy.linalg.inv(numpy.eye(dim_f) + a * numpy.dot(numpy.dot(F.transpose(), beta), F))
def compute_ft_beta(F, G, sigma):
# F^T.beta = F^T.\mathcal{S}
beta = compute_beta(G, sigma)
return numpy.dot(numpy.transpose(F), beta)
def compute_gt_i_sigma(G, sigma):
# G^T.sigma^-1
isigma = compute_i_sigma(sigma)
return numpy.transpose(G) * isigma
def compute_logdet_alpha(G, sigma):
# \log(\det(\alpha)) = \log(\det(\mathcal{G}))
alpha = compute_alpha(G, sigma)
return math.log(numpy.linalg.det(alpha))
def compute_logdet_sigma(sigma):
# \log(\det(\sigma)) = \log(\det(\sigma)) = \log(\prod(\sigma_{i}))
return math.log(numpy.prod(sigma))
def compute_loglike_constterm(F, G, sigma, a):
# loglike_constterm[a] = a/2 * ( -D*\log(2*pi) -\log|\sigma| +\log|\alpha| +\log|\gamma_a|)
gamma_a = compute_gamma(F, G, sigma, a)
logdet_gamma_a = math.log(abs(numpy.linalg.det(gamma_a)))
ah = a/2.
dim_d = F.shape[0]
logdet_sigma = compute_logdet_sigma(sigma)
logdet_alpha = compute_logdet_alpha(G, sigma)
res = -ah*dim_d*math.log(2*math.pi) - ah*logdet_sigma + ah*logdet_alpha + logdet_gamma_a/2.
return res;
def compute_log_likelihood_point_estimate(observation, mu, F, G, sigma, hi, wij):
"""
This function computes p(x_{ij} | h_{i}, w_{ij}, \Theta), which is given by
N_{x}[\mu + Fh_{i} + Gw_{ij} + epsilon_{ij}, \Sigma], N_{x} being a
Gaussian distribution. As it returns the corresponding log likelihood,
this is given by the sum of the following three terms:
C1 = -dim_d/2 log(2pi)
C2 = -1/2 log(det(\Sigma))
C3 = -1/2 (x_{ij}-\mu-Fh_{i}-Gw_{ij})^{T}\Sigma^{-1}(x_{ij}-\mu-Fh_{i}-Gw_{ij})
"""
### Pre-computes some of the constants
dim_d = observation.shape[0] # A scalar
log_2pi = numpy.log(2. * numpy.pi) # A scalar
C1 = -(dim_d / 2.) * log_2pi # A scalar
C2 = -(1. / 2.) * numpy.sum( numpy.log(sigma) ) # (dim_d, 1)
### Subtract the identity and session components from the observed vector.
session_plus_identity = numpy.dot(F, hi) + numpy.dot(G, wij)
normalised_observation = numpy.reshape(observation - mu - session_plus_identity, (dim_d,1))
### Now calculate C3
sigma_inverse = numpy.reshape(1. / sigma, (dim_d,1)) # (dim_d, 1)
C3 = -(1. / 2.) * numpy.sum(normalised_observation * sigma_inverse * normalised_observation)
### Returns the log likelihood
log_likelihood = C1 + C2 + C3
return (log_likelihood)
def compute_log_likelihood(observations, mu, F, G, sigma):
"""
This function computes the log-likelihood of the observations given the parameters
of the PLDA model. This is done by fulling integrating out the latent variables.
"""
# Work out the number of samples that we have and normalise the data.
J_i = observations.shape[0]; # An integer > 0
norm_observations = observations - numpy.tile(mu, [J_i,1]); # (J_i, D_x)
# There are three terms that need to be computed: C1, C2 and C3
# 1. Computes C1
# C1 = - J_{i} * dim_d/2 log(2*pi)
dim_d = observations.shape[1] # A scalar
dim_f = F.shape[1]
log_2pi = numpy.log(2. * numpy.pi); # A scalar
C1 = - J_i * (dim_d / 2.) * log_2pi; # A scalar
# 2. Computes C2
# C2 = - J_i/2 * [log(det(sigma)) - log(det(alpha^-1))] + log(det(gamma_{J_i}))/2
ld_sigma = compute_logdet_sigma(sigma)
ld_alpha = compute_logdet_alpha(G, sigma)
gamma = compute_gamma(F, G, sigma, J_i)
ld_gamma = math.log(numpy.linalg.det(gamma))
C2 = - J_i/2.*(ld_sigma - ld_alpha) + ld_gamma/2.
# 3. Computes C3
# This is a quadratic part and consists of
# C3 = -0.5 * sum x^T beta x + 0.5 * Quadratic term in x
# C3 = -0.5 * (C3a - C3b)
C3a = 0.0;
C3b_sum_part = numpy.zeros((dim_f,1));
isigma = numpy.diag(compute_i_sigma(sigma))
beta = compute_beta(G, sigma)
ft_beta = numpy.dot(numpy.transpose(F), beta)
for j in range(0, J_i):
### Calculations for C3a
current_vector = numpy.reshape(norm_observations[j,:], (dim_d,1)); # (D_x, 1)
vector_E = numpy.dot(beta, current_vector); # (D_x, 1)
current_result = numpy.dot(current_vector.transpose(), vector_E); # A floating point value
C3a = C3a + current_result[0][0]; # A floating point value
### Calculations for C3b
C3b_sum_part = C3b_sum_part + numpy.dot(ft_beta, current_vector); # (nf, 1)
### Final calculations for C3b, using the matrix gamma_{J_i}
C3b = numpy.dot(numpy.dot(C3b_sum_part.transpose(), gamma), C3b_sum_part);
C3 = -0.5 * (C3a - C3b[0][0]);
return C1 + C2 + C3
def test_plda_basemachine():
# Data used for performing the tests
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines reference results based on matlab
alpha_ref = numpy.array([ 0.002189051545735, 0.001127099941432,
-0.000145483208153, 0.001127099941432, 0.003549267943741,
-0.000552001405453, -0.000145483208153, -0.000552001405453,
0.001440505362615], 'float64').reshape(C_dim_g, C_dim_g)
beta_ref = numpy.array([ 50.587191765140361, -14.512478352504877,
-0.294799164567830, 13.382002504394316, 9.202063877660278,
-43.182264846086497, 11.932345916716455, -14.512478352504878,
82.320149045633045, -12.605578822979698, 19.618675892079366,
13.033691341150439, -8.004874490989799, -21.547363307109187,
-0.294799164567832, -12.605578822979696, 52.123885798398241,
4.363739008635009, 44.847177605628545, 16.438137537463710,
5.137421840557050, 13.382002504394316, 19.618675892079366,
4.363739008635011, 75.070401560513488, -4.515472972526140,
9.752862741017488, 34.196127678931106, 9.202063877660285,
13.033691341150439, 44.847177605628552, -4.515472972526142,
56.189416227691098, -7.536676357632515, -10.555735414707383,
-43.182264846086497, -8.004874490989799, 16.438137537463703,
9.752862741017490, -7.536676357632518, 56.430571485722126,
9.471758169835317, 11.932345916716461, -21.547363307109187,
5.137421840557051, 34.196127678931099, -10.555735414707385,
9.471758169835320, 27.996266602110637], 'float64').reshape(C_dim_d, C_dim_d)
gamma3_ref = numpy.array([ 0.005318799462241, -0.000000012993151,
-0.000000012993151, 0.999999999999996], 'float64').reshape(C_dim_f, C_dim_f)
# Constructor tests
#m = PLDABase()
#assert m.dim_d == 0
#assert m.dim_f == 0
#assert m.dim_g == 0
#del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
assert m.shape[0] == C_dim_d
assert m.shape[1] == C_dim_f
assert m.shape[2] == C_dim_g
assert abs(m.variance_threshold - 0.) < 1e-10
del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g, 1e-2)
assert m.shape[0] == C_dim_d
assert m.shape[1] == C_dim_f
assert m.shape[2] == C_dim_g
assert abs(m.variance_threshold - 1e-2) < 1e-10
del m
# Defines base machine
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
#m.resize(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Compares precomputed values to matlab reference
for ii in range(m.__alpha__.shape[0]):
for jj in range(m.__alpha__.shape[1]):
absdiff = abs(m.__alpha__[ii,jj]- alpha_ref[ii,jj])
assert absdiff < 1e-10, 'PLDABase alpha matrix does not match reference at (%d,%d) to 10^-10: |%g-%g| = %g' % (ii, jj, m.__alpha__[ii,jj], alpha_ref[ii,jj], absdiff)
assert equals(m.__alpha__, alpha_ref, 1e-10)
assert equals(m.__beta__, beta_ref, 1e-10)
assert equals(gamma3, gamma3_ref, 1e-10)
# Compares precomputed values to the ones returned by python implementation
assert equals(m.__isigma__, compute_i_sigma(sigma), 1e-10)
assert equals(m.__alpha__, compute_alpha(C_G,sigma), 1e-10)
assert equals(m.__beta__, compute_beta(C_G,sigma), 1e-10)
assert equals(m.get_add_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
assert m.has_gamma(3)
assert equals(m.get_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
assert equals(m.__ft_beta__, compute_ft_beta(C_F,C_G,sigma), 1e-10)
assert equals(m.__gt_i_sigma__, compute_gt_i_sigma(C_G,sigma), 1e-10)
assert math.fabs(m.__logdet_alpha__ - compute_logdet_alpha(C_G,sigma)) < 1e-10
assert math.fabs(m.__logdet_sigma__ - compute_logdet_sigma(sigma)) < 1e-10
assert abs(m.get_add_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
assert m.has_log_like_const_term(3)
assert abs(m.get_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
# Defines base machine
del m
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Compares precomputed values to matlab reference
assert equals(m.__alpha__, alpha_ref, 1e-10)
assert equals(m.__beta__, beta_ref, 1e-10)
assert equals(gamma3, gamma3_ref, 1e-10)
# values before being saved
isigma = m.__isigma__.copy()
alpha = m.__alpha__.copy()
beta = m.__beta__.copy()
FtBeta = m.__ft_beta__.copy()
GtISigma = m.__gt_i_sigma__.copy()
logdetAlpha = m.__logdet_alpha__
logdetSigma = m.__logdet_sigma__
# Saves to file, loads and compares to original
filename = str(tempfile.mkstemp(".hdf5")[1])
m.save(bob.io.base.HDF5File(filename, 'w'))
m_loaded = PLDABase(bob.io.base.HDF5File(filename))
# Compares the values loaded with the former ones
assert m_loaded == m
assert (m_loaded != m) is False
assert equals(m_loaded.mu, mu, 1e-10)
assert equals(m_loaded.f, C_F, 1e-10)
assert equals(m_loaded.g, C_G, 1e-10)
assert equals(m_loaded.sigma, sigma, 1e-10)
assert equals(m_loaded.__isigma__, isigma, 1e-10)
assert equals(m_loaded.__alpha__, alpha, 1e-10)
assert equals(m_loaded.__beta__, beta, 1e-10)
assert equals(m_loaded.__ft_beta__, FtBeta, 1e-10)
assert equals(m_loaded.__gt_i_sigma__, GtISigma, 1e-10)
assert abs(m_loaded.__logdet_alpha__ - logdetAlpha) < 1e-10
assert abs(m_loaded.__logdet_sigma__ - logdetSigma) < 1e-10
assert m_loaded.has_gamma(3)
assert equals(m_loaded.get_gamma(3), gamma3_ref, 1e-10)
assert equals(m_loaded.get_add_gamma(3), gamma3_ref, 1e-10)
assert m_loaded.has_log_like_const_term(3)
assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
# Compares the values loaded with the former ones when copying
m_copy = PLDABase(m_loaded)
assert m_loaded == m_copy
assert (m_loaded != m_copy) is False
# Test clear_maps method
assert m_copy.has_gamma(3)
assert m_copy.has_log_like_const_term(3)
m_copy.clear_maps()
assert (m_copy.has_gamma(3)) is False
assert (m_copy.has_log_like_const_term(3)) is False
# Check variance flooring thresholds-related methods
v_zo = numpy.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
v_zo_ = 0.01
v_zzo = numpy.array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001])
v_zzo_ = 0.001
m_copy.variance_threshold = v_zo_
assert (m_loaded == m_copy) is False
assert m_loaded != m_copy
m_copy.variance_threshold = v_zzo_
m_copy.sigma = v_zo
assert equals(m_copy.sigma, v_zo, 1e-10)
m_copy.variance_threshold = v_zo_
m_copy.sigma = v_zzo
assert equals(m_copy.sigma, v_zo, 1e-10)
m_copy.variance_threshold = v_zzo_
m_copy.sigma = v_zzo
assert equals(m_copy.sigma, v_zzo, 1e-10)
m_copy.variance_threshold = v_zo_
assert equals(m_copy.sigma, v_zo, 1e-10)
# Clean-up
os.unlink(filename)
def test_plda_basemachine_loglikelihood_pointestimate():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
xij = numpy.array([0.7, 1.3, 2.5, 0.3, 1.3, 2.7, 0.9])
hi = numpy.array([-0.5, 0.5])
wij = numpy.array([-0.1, 0.2, 0.3])
m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
m.mu = mu
m.f = C_F
m.g = C_G
m.sigma = sigma
#assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
log_likelihood_point_estimate = m.compute_log_likelihood_point_estimate(xij, hi, wij)
log_likelihood_point_estimate_python = compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij)
assert equals(log_likelihood_point_estimate, log_likelihood_point_estimate_python, 1e-6)
def test_plda_machine():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines base machine
mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
mb.mu = mu
mb.f = C_F
mb.g = C_G
mb.sigma = sigma
# Test constructors and dim getters
m = PLDAMachine(mb)
assert m.shape[0] == C_dim_d
assert m.shape[1]== C_dim_f
assert m.shape[2] == C_dim_g
m0 = PLDAMachine(mb)
#m0.plda_base = mb
assert m0.shape[0] == C_dim_d
assert m0.shape[1] == C_dim_f
assert m0.shape[2] == C_dim_g
# Defines machine
n_samples = 2
WSumXitBetaXi = 0.37
weightedSum = numpy.array([1.39,0.54], 'float64')
log_likelihood = -0.22
m.n_samples = n_samples
m.w_sum_xit_beta_xi = WSumXitBetaXi
m.weighted_sum = weightedSum
m.log_likelihood = log_likelihood
gamma3 = m.get_add_gamma(3).copy()
constTerm3 = m.get_add_log_like_const_term(3)
# Saves to file, loads and compares to original
filename = str(tempfile.mkstemp(".hdf5")[1])
m.save(bob.io.base.HDF5File(filename, 'w'))
m_loaded = PLDAMachine(bob.io.base.HDF5File(filename), mb)
# Compares the values loaded with the former ones
assert m_loaded == m
assert (m_loaded != m) is False
assert abs(m_loaded.n_samples - n_samples) < 1e-10
assert abs(m_loaded.w_sum_xit_beta_xi - WSumXitBetaXi) < 1e-10
assert equals(m_loaded.weighted_sum, weightedSum, 1e-10)
assert abs(m_loaded.log_likelihood - log_likelihood) < 1e-10
assert m_loaded.has_gamma(3)
assert equals(m_loaded.get_add_gamma(3), gamma3, 1e-10)
assert equals(m_loaded.get_gamma(3), gamma3, 1e-10)
assert m_loaded.has_log_like_const_term(3)
assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
assert abs(m_loaded.get_log_like_const_term(3) - constTerm3) < 1e-10
# Test clear_maps method
assert m_loaded.has_gamma(3)
assert m_loaded.has_log_like_const_term(3)
m_loaded.clear_maps()
assert (m_loaded.has_gamma(3)) is False
assert (m_loaded.has_log_like_const_term(3)) is False
# Check exceptions
#m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename))
#m_loaded2.load(bob.io.base.HDF5File(filename))
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape')
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
#nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
#nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
#nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
# Clean-up
os.unlink(filename)
def test_plda_machine_log_likelihood_Python():
# Data used for performing the tests
# Features and subspaces dimensionality
sigma = numpy.ndarray(C_dim_d, 'float64')
sigma.fill(0.01)
mu = numpy.ndarray(C_dim_d, 'float64')
mu.fill(0)
# Defines base machine
mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
# Sets the current mu, F, G and sigma
mb.mu = mu
mb.f = C_F
mb.g = C_G
mb.sigma = sigma
# Defines machine
m = PLDAMachine(mb)
# Defines (random) samples and check compute_log_likelihood method
ar_e = numpy.random.randn(2,C_dim_d)
ar_p = numpy.random.randn(C_dim_d)
ar_s = numpy.vstack([ar_e, ar_p])
assert abs(m.compute_log_likelihood(ar_s, False) - compute_log_likelihood(ar_s, mu, C_F, C_G, sigma)) < 1e-10
ar_p2d = numpy.reshape(ar_p, (1,C_dim_d))
a = m.compute_log_likelihood(ar_p, False)
assert abs(m.compute_log_likelihood(ar_p, False) - compute_log_likelihood(ar_p2d, mu, C_F, C_G, sigma)) < 1e-10
# Defines (random) samples and check forward method
ar2_e = numpy.random.randn(4,C_dim_d)
ar2_p = numpy.random.randn(C_dim_d)
ar2_s = numpy.vstack([ar2_e, ar2_p])
m.log_likelihood = m.compute_log_likelihood(ar2_e, False)
llr = m.compute_log_likelihood(ar2_s, True) - (m.compute_log_likelihood(ar2_s, False) + m.log_likelihood)
assert abs(m(ar2_s) - llr) < 1e-10
ar2_p2d = numpy.random.randn(3,C_dim_d)
ar2_s2d = numpy.vstack([ar2_e, ar2_p2d])
llr2d = m.compute_log_likelihood(ar2_s2d, True) - (m.compute_log_likelihood(ar2_s2d, False) + m.log_likelihood)
assert abs(m(ar2_s2d) - llr2d) < 1e-10
def test_plda_machine_log_likelihood_Prince():
# Data used for performing the tests
# Features and subspaces dimensionality
D = 7
nf = 2
ng = 3
# initial values for F, G and sigma
G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-0.6249, 0.1021, -0.8658,
-1.1687, 1.1963, 0.1807,
0.3926, 0.1203, 1.2665,
1.3018, -1.0368, -0.2512,
-0.5936, -0.8571, -0.2046,
0.4364, -0.1699, -2.2015]).reshape(D,ng)
# F <-> PCA on G
F_init=numpy.array([-0.054222647972093, -0.000000000783146,
0.596449127693018, 0.000000006265167,
0.298224563846509, 0.000000003132583,
0.447336845769764, 0.000000009397750,
-0.108445295944185, -0.000000001566292,
-0.501559493741856, -0.000000006265167,
-0.298224563846509, -0.000000003132583]).reshape(D,nf)
sigma_init = 0.01 * numpy.ones((D,), 'float64')
mean_zero = numpy.zeros((D,), 'float64')
# base machine
mb = PLDABase(D,nf,ng)
mb.sigma = sigma_init
mb.g = G_init
mb.f = F_init
mb.mu = mean_zero
# Data for likelihood computation
x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
X = numpy.ndarray((3,D), 'float64')
X[0,:] = x1
X[1,:] = x2
X[2,:] = x3
a = []
a.append(x1)
a.append(x2)
a.append(x3)
a = numpy.array(a)
# reference likelihood from Prince implementation
ll_ref = -182.8880743535197
# machine
m = PLDAMachine(mb)
ll = m.compute_log_likelihood(X)
assert abs(ll - ll_ref) < 1e-10
# log likelihood ratio
Y = numpy.ndarray((2,D), 'float64')
Y[0,:] = x1
Y[1,:] = x2
Z = numpy.ndarray((1,D), 'float64')
Z[0,:] = x3
llX = m.compute_log_likelihood(X)
llY = m.compute_log_likelihood(Y)
llZ = m.compute_log_likelihood(Z)
# reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
# and [x3] separately
llr_ref = -4.43695386675
assert abs((llX - (llY + llZ)) - llr_ref) < 1e-10
| en | 0.684644 | #!/usr/bin/env python # vim: set fileencoding=utf-8 : # <NAME> <<EMAIL>> # Sat Oct 22 23:01:09 2011 +0200 # # Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland Tests PLDA machine # Defines common variables globally # Dimensionalities # Values for F and G # F <-> PCA on G # Inverse of a diagonal matrix (represented by a 1D numpy array) # alpha = (Id + G^T.sigma^-1.G)^-1 = \mathcal{G} # beta = (sigma + G.G^T)^-1 = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1 = \mathcal{S} # gamma_a = (Id + a.F^T.beta.F)^-1 = \mathcal{F}_{a} # F^T.beta = F^T.\mathcal{S} # G^T.sigma^-1 # \log(\det(\alpha)) = \log(\det(\mathcal{G})) # \log(\det(\sigma)) = \log(\det(\sigma)) = \log(\prod(\sigma_{i})) # loglike_constterm[a] = a/2 * ( -D*\log(2*pi) -\log|\sigma| +\log|\alpha| +\log|\gamma_a|) This function computes p(x_{ij} | h_{i}, w_{ij}, \Theta), which is given by N_{x}[\mu + Fh_{i} + Gw_{ij} + epsilon_{ij}, \Sigma], N_{x} being a Gaussian distribution. As it returns the corresponding log likelihood, this is given by the sum of the following three terms: C1 = -dim_d/2 log(2pi) C2 = -1/2 log(det(\Sigma)) C3 = -1/2 (x_{ij}-\mu-Fh_{i}-Gw_{ij})^{T}\Sigma^{-1}(x_{ij}-\mu-Fh_{i}-Gw_{ij}) ### Pre-computes some of the constants # A scalar # A scalar # A scalar # (dim_d, 1) ### Subtract the identity and session components from the observed vector. ### Now calculate C3 # (dim_d, 1) ### Returns the log likelihood This function computes the log-likelihood of the observations given the parameters of the PLDA model. This is done by fulling integrating out the latent variables. # Work out the number of samples that we have and normalise the data. # An integer > 0 # (J_i, D_x) # There are three terms that need to be computed: C1, C2 and C3 # 1. Computes C1 # C1 = - J_{i} * dim_d/2 log(2*pi) # A scalar # A scalar # A scalar # 2. Computes C2 # C2 = - J_i/2 * [log(det(sigma)) - log(det(alpha^-1))] + log(det(gamma_{J_i}))/2 # 3. Computes C3 # This is a quadratic part and consists of # C3 = -0.5 * sum x^T beta x + 0.5 * Quadratic term in x # C3 = -0.5 * (C3a - C3b) ### Calculations for C3a # (D_x, 1) # (D_x, 1) # A floating point value # A floating point value ### Calculations for C3b # (nf, 1) ### Final calculations for C3b, using the matrix gamma_{J_i} # Data used for performing the tests # Defines reference results based on matlab # Constructor tests #m = PLDABase() #assert m.dim_d == 0 #assert m.dim_f == 0 #assert m.dim_g == 0 #del m # Defines base machine #m.resize(C_dim_d, C_dim_f, C_dim_g) # Sets the current mu, F, G and sigma # Compares precomputed values to matlab reference # Compares precomputed values to the ones returned by python implementation # Defines base machine # Sets the current mu, F, G and sigma # Compares precomputed values to matlab reference # values before being saved # Saves to file, loads and compares to original # Compares the values loaded with the former ones # Compares the values loaded with the former ones when copying # Test clear_maps method # Check variance flooring thresholds-related methods # Clean-up # Data used for performing the tests # Features and subspaces dimensionality # Sets the current mu, F, G and sigma #assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6) # Data used for performing the tests # Features and subspaces dimensionality # Defines base machine # Sets the current mu, F, G and sigma # Test constructors and dim getters #m0.plda_base = mb # Defines machine # Saves to file, loads and compares to original # Compares the values loaded with the former ones # Test clear_maps method # Check exceptions #m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename)) #m_loaded2.load(bob.io.base.HDF5File(filename)) #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape') #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f') #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g') #nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.]) #nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.]) # Clean-up # Data used for performing the tests # Features and subspaces dimensionality # Defines base machine # Sets the current mu, F, G and sigma # Defines machine # Defines (random) samples and check compute_log_likelihood method # Defines (random) samples and check forward method # Data used for performing the tests # Features and subspaces dimensionality # initial values for F, G and sigma # F <-> PCA on G # base machine # Data for likelihood computation # reference likelihood from Prince implementation # machine # log likelihood ratio # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2] # and [x3] separately | 2.405302 | 2 |
day11/main.py | VincentBeltman/aoc2020 | 0 | 6618963 | <reponame>VincentBeltman/aoc2020
def print_map_part1(seats):
print("")
for line in seats:
print("".join(line))
def get_neighbours_part_1(x, y, max_x, max_y):
neighbours = [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
return [(x, y) for x, y in neighbours if 0 <= x <= max_x and 0 <= y <= max_y]
def maps_are_equal_part1(a, b):
for y in range(0, len(a)):
for x in range(0, len(a[y])):
if a[y][x] != b[y][x]:
return False
return True
def count_occupied_seats_part1(seats):
nr_of_occupied_seats = 0
for row in seats:
for seat in row:
if seat == '#':
nr_of_occupied_seats += 1
return nr_of_occupied_seats
def part_1_iterate(seats):
tmp = []
max_y = len(seats) - 1
max_x = len(seats[0]) - 1
for y in range(0, len(seats)):
row = seats[y]
tmp.append(row[:])
for x in range(0, len(row)):
if seats[y][x] != '.':
nrOfOccupied = 0
for n_x, n_y in get_neighbours_part_1(x, y, max_x, max_y):
if seats[n_y][n_x] == '#':
nrOfOccupied += 1
if nrOfOccupied == 0:
tmp[y][x] = '#'
elif nrOfOccupied >= 4:
tmp[y][x] = 'L'
return tmp
def part_1(seats):
print_map_part1(seats)
i = 0
while True:
tmp = part_1_iterate(seats)
if maps_are_equal_part1(seats, tmp):
print_map_part1(seats)
nr_of_occupied_seats = count_occupied_seats_part1(seats)
print(i, nr_of_occupied_seats)
break
seats = tmp
i += 1
print(i)
def part_2_iterate(seats):
tmp = []
for y in range(0, len(seats)):
row = seats[y]
tmp.append([])
for x in range(0, len(row)):
tmp[y].append({"content": seats[y][x]["content"], "neighbours": seats[y][x]["neighbours"]})
if seats[y][x]["content"] != '.':
nrOfOccupied = 0
for n_x, n_y in seats[y][x]["neighbours"]:
if seats[n_y][n_x]['content'] == '#':
nrOfOccupied += 1
if nrOfOccupied == 0:
tmp[y][x]["content"] = '#'
elif nrOfOccupied >= 5:
tmp[y][x]['content'] = 'L'
return tmp
def find_first_visible_neighbour(seats, x, y, x_dir, y_dir):
x += x_dir
y += y_dir
if 0 <= x <= (len(seats[0]) - 1) and 0 <= y <= (len(seats) - 1):
if seats[y][x] != '.':
return x, y
else:
return find_first_visible_neighbour(seats, x, y, x_dir, y_dir)
else:
return -1, -1
def maps_are_equal_part2(a, b):
for y in range(0, len(a)):
for x in range(0, len(a[y])):
if a[y][x]["content"] != b[y][x]["content"]:
return False
return True
def count_occupied_seats_part2(seats):
nr_of_occupied_seats = 0
for row in seats:
for seat in row:
if seat["content"] == '#':
nr_of_occupied_seats += 1
return nr_of_occupied_seats
def parse_neighbours(seats):
result = []
for y in range(0, len(seats)):
row = []
for x in range(0, len(seats[0])):
neighbours = [(n_x, n_y) for n_x, n_y in [
find_first_visible_neighbour(seats, x, y, -1, -1),
find_first_visible_neighbour(seats, x, y, -1, 1),
find_first_visible_neighbour(seats, x, y, 0, -1),
find_first_visible_neighbour(seats, x, y, 0, 1),
find_first_visible_neighbour(seats, x, y, -1, 0),
find_first_visible_neighbour(seats, x, y, 1, -1),
find_first_visible_neighbour(seats, x, y, 1, 0),
find_first_visible_neighbour(seats, x, y, 1, 1)]
if n_x >= 0 and n_y >= 0]
row.append({"content": seats[y][x], "neighbours": neighbours})
result.append(row)
return result
def print_map_part2(seats):
print("")
for line in seats:
print("".join([item["content"] for item in line]))
def part_2(seats):
seats = parse_neighbours(seats)
i = 0
while True:
print_map_part2(seats)
tmp = part_2_iterate(seats)
if maps_are_equal_part2(seats, tmp):
nr_of_occupied_seats = count_occupied_seats_part2(seats)
print(i, nr_of_occupied_seats)
break
seats = tmp
i += 1
print(i)
def execute(filename):
with open(filename) as file:
seats = []
for line in file.read().splitlines():
seats.append([a for a in line])
part_2(seats)
if __name__ == '__main__':
execute("test2.txt")
| def print_map_part1(seats):
print("")
for line in seats:
print("".join(line))
def get_neighbours_part_1(x, y, max_x, max_y):
neighbours = [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
return [(x, y) for x, y in neighbours if 0 <= x <= max_x and 0 <= y <= max_y]
def maps_are_equal_part1(a, b):
for y in range(0, len(a)):
for x in range(0, len(a[y])):
if a[y][x] != b[y][x]:
return False
return True
def count_occupied_seats_part1(seats):
nr_of_occupied_seats = 0
for row in seats:
for seat in row:
if seat == '#':
nr_of_occupied_seats += 1
return nr_of_occupied_seats
def part_1_iterate(seats):
tmp = []
max_y = len(seats) - 1
max_x = len(seats[0]) - 1
for y in range(0, len(seats)):
row = seats[y]
tmp.append(row[:])
for x in range(0, len(row)):
if seats[y][x] != '.':
nrOfOccupied = 0
for n_x, n_y in get_neighbours_part_1(x, y, max_x, max_y):
if seats[n_y][n_x] == '#':
nrOfOccupied += 1
if nrOfOccupied == 0:
tmp[y][x] = '#'
elif nrOfOccupied >= 4:
tmp[y][x] = 'L'
return tmp
def part_1(seats):
print_map_part1(seats)
i = 0
while True:
tmp = part_1_iterate(seats)
if maps_are_equal_part1(seats, tmp):
print_map_part1(seats)
nr_of_occupied_seats = count_occupied_seats_part1(seats)
print(i, nr_of_occupied_seats)
break
seats = tmp
i += 1
print(i)
def part_2_iterate(seats):
tmp = []
for y in range(0, len(seats)):
row = seats[y]
tmp.append([])
for x in range(0, len(row)):
tmp[y].append({"content": seats[y][x]["content"], "neighbours": seats[y][x]["neighbours"]})
if seats[y][x]["content"] != '.':
nrOfOccupied = 0
for n_x, n_y in seats[y][x]["neighbours"]:
if seats[n_y][n_x]['content'] == '#':
nrOfOccupied += 1
if nrOfOccupied == 0:
tmp[y][x]["content"] = '#'
elif nrOfOccupied >= 5:
tmp[y][x]['content'] = 'L'
return tmp
def find_first_visible_neighbour(seats, x, y, x_dir, y_dir):
x += x_dir
y += y_dir
if 0 <= x <= (len(seats[0]) - 1) and 0 <= y <= (len(seats) - 1):
if seats[y][x] != '.':
return x, y
else:
return find_first_visible_neighbour(seats, x, y, x_dir, y_dir)
else:
return -1, -1
def maps_are_equal_part2(a, b):
for y in range(0, len(a)):
for x in range(0, len(a[y])):
if a[y][x]["content"] != b[y][x]["content"]:
return False
return True
def count_occupied_seats_part2(seats):
nr_of_occupied_seats = 0
for row in seats:
for seat in row:
if seat["content"] == '#':
nr_of_occupied_seats += 1
return nr_of_occupied_seats
def parse_neighbours(seats):
result = []
for y in range(0, len(seats)):
row = []
for x in range(0, len(seats[0])):
neighbours = [(n_x, n_y) for n_x, n_y in [
find_first_visible_neighbour(seats, x, y, -1, -1),
find_first_visible_neighbour(seats, x, y, -1, 1),
find_first_visible_neighbour(seats, x, y, 0, -1),
find_first_visible_neighbour(seats, x, y, 0, 1),
find_first_visible_neighbour(seats, x, y, -1, 0),
find_first_visible_neighbour(seats, x, y, 1, -1),
find_first_visible_neighbour(seats, x, y, 1, 0),
find_first_visible_neighbour(seats, x, y, 1, 1)]
if n_x >= 0 and n_y >= 0]
row.append({"content": seats[y][x], "neighbours": neighbours})
result.append(row)
return result
def print_map_part2(seats):
print("")
for line in seats:
print("".join([item["content"] for item in line]))
def part_2(seats):
seats = parse_neighbours(seats)
i = 0
while True:
print_map_part2(seats)
tmp = part_2_iterate(seats)
if maps_are_equal_part2(seats, tmp):
nr_of_occupied_seats = count_occupied_seats_part2(seats)
print(i, nr_of_occupied_seats)
break
seats = tmp
i += 1
print(i)
def execute(filename):
with open(filename) as file:
seats = []
for line in file.read().splitlines():
seats.append([a for a in line])
part_2(seats)
if __name__ == '__main__':
execute("test2.txt") | none | 1 | 3.794775 | 4 | |
20_NumericStrings/NumericStrings.py | DevRoss/CodingInterviewChinese2 | 0 | 6618964 | <reponame>DevRoss/CodingInterviewChinese2<filename>20_NumericStrings/NumericStrings.py<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-15
def scan_unsigned_int(str_num: list):
before_len = len(str_num)
while len(str_num) and str_num[0].isdigit():
str_num.pop(0)
return len(str_num) < before_len
def scan_int(str_num: list):
if str_num[0] == '+' or str_num[0] == '-':
str_num.pop(0)
return scan_unsigned_int(str_num)
def solve(str_num: list):
str_num = list(str_num)
if not str_num:
return False
numeric = scan_int(str_num)
if len(str_num) and str_num[0] == '.':
str_num.pop(0)
numeric = scan_unsigned_int(str_num) or numeric
if len(str_num) and str_num[0].lower() == 'e':
str_num.pop(0)
numeric = numeric and scan_int(str_num)
return numeric and (len(str_num) == 0)
if __name__ == '__main__':
print(solve(''))
print(solve('+100'))
print(solve('-123'))
print(solve('-1.1445'))
print(solve('-1E-16'))
print(solve('-1a3.24'))
print(solve('+-4'))
print(solve('12e+5.4'))
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-15
def scan_unsigned_int(str_num: list):
before_len = len(str_num)
while len(str_num) and str_num[0].isdigit():
str_num.pop(0)
return len(str_num) < before_len
def scan_int(str_num: list):
if str_num[0] == '+' or str_num[0] == '-':
str_num.pop(0)
return scan_unsigned_int(str_num)
def solve(str_num: list):
str_num = list(str_num)
if not str_num:
return False
numeric = scan_int(str_num)
if len(str_num) and str_num[0] == '.':
str_num.pop(0)
numeric = scan_unsigned_int(str_num) or numeric
if len(str_num) and str_num[0].lower() == 'e':
str_num.pop(0)
numeric = numeric and scan_int(str_num)
return numeric and (len(str_num) == 0)
if __name__ == '__main__':
print(solve(''))
print(solve('+100'))
print(solve('-123'))
print(solve('-1.1445'))
print(solve('-1E-16'))
print(solve('-1a3.24'))
print(solve('+-4'))
print(solve('12e+5.4')) | en | 0.711704 | #!/usr/bin/python3 # -*- coding: utf-8 -*- # Created by Ross on 19-1-15 | 3.935697 | 4 |
examples/run_examples.py | paulkogni/backpack | 0 | 6618965 | <filename>examples/run_examples.py
"""
Run all example files.
Example files are identified by the pattern 'example_*.py'.
"""
import glob
import os
import subprocess
HERE = os.path.dirname(os.path.realpath(__file__))
PATTERN = os.path.join(HERE, r"example_*.py")
FILES = glob.glob(PATTERN)
for example in FILES:
print("\nRunning {}".format(example))
exit_code = subprocess.call(["python", example])
crash = exit_code != 0
if crash:
raise RuntimeError("Error running {}".format(example))
| <filename>examples/run_examples.py
"""
Run all example files.
Example files are identified by the pattern 'example_*.py'.
"""
import glob
import os
import subprocess
HERE = os.path.dirname(os.path.realpath(__file__))
PATTERN = os.path.join(HERE, r"example_*.py")
FILES = glob.glob(PATTERN)
for example in FILES:
print("\nRunning {}".format(example))
exit_code = subprocess.call(["python", example])
crash = exit_code != 0
if crash:
raise RuntimeError("Error running {}".format(example))
| en | 0.719816 | Run all example files. Example files are identified by the pattern 'example_*.py'. | 3.063243 | 3 |
neurokernel/tools/autoinit.py | KathyFeiyang/neurokernel | 235 | 6618966 | <reponame>KathyFeiyang/neurokernel<gh_stars>100-1000
#!/usr/bin/env python
"""
Autoinitialize multiple GPUs.
"""
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.tools as tools
import atexit
class MultiGPUManager(object):
"""
Create and manage contexts for multiple GPUs.
Parameters
----------
gpus : list of int
IDs of GPUs for which to create contexts. If no IDs are
specified, create contexts for all GPUs on the system.
Methods
-------
switch(gpu)
Make the context associated with the specified GPU active.
Notes
-----
After instantiation, the context associated with the last
specified GPU is active.
"""
def __init__(self, *gpus):
N = drv.Device(0).count()
if len(gpus) == 0:
gpus = range(N)
if max(gpus) > N-1:
raise ValueError('nonexistent GPU specified')
self._curr_gpu = None
self.dev_dict = {}
self.ctx_dict = {}
for gpu in gpus:
dev = drv.Device(gpu)
self.dev_dict[gpu] = dev
ctx = dev.make_context()
self.ctx_dict[gpu] = ctx
def cleanup():
ctx.pop()
tools.clear_context_caches()
atexit.register(cleanup)
self._curr_gpu = gpu
@property
def curr_gpu(self):
"""
Return GPU associated with currently active context.
"""
return self._curr_gpu
def switch_gpu(self, gpu):
"""
Switch to the context associated with the specified GPU.
"""
if not self.ctx_dict.has_key(gpu):
raise ValueError('nonexistent GPU specified')
if gpu != self.curr_gpu:
self.ctx_dict[self._curr_gpu].pop()
self.ctx_dict[gpu].push()
self._curr_gpu = gpu
drv.init()
global gpu_ctx_manager
gpu_ctx_manager = MultiGPUManager()
curr_gpu = gpu_ctx_manager.curr_gpu
switch_gpu = gpu_ctx_manager.switch_gpu
if __name__ == '__main__':
import numpy as np
man = MultiGPUManager()
x_gpu = gpuarray.to_gpu(np.array([1, 2, 3]))
man.switch_gpu(0)
y_gpu = gpuarray.to_gpu(np.array([4, 5, 6]))
man.switch_gpu(1)
print x_gpu
man.switch_gpu(0)
print y_gpu
# This will cause an error:
print x_gpu
| #!/usr/bin/env python
"""
Autoinitialize multiple GPUs.
"""
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.tools as tools
import atexit
class MultiGPUManager(object):
"""
Create and manage contexts for multiple GPUs.
Parameters
----------
gpus : list of int
IDs of GPUs for which to create contexts. If no IDs are
specified, create contexts for all GPUs on the system.
Methods
-------
switch(gpu)
Make the context associated with the specified GPU active.
Notes
-----
After instantiation, the context associated with the last
specified GPU is active.
"""
def __init__(self, *gpus):
N = drv.Device(0).count()
if len(gpus) == 0:
gpus = range(N)
if max(gpus) > N-1:
raise ValueError('nonexistent GPU specified')
self._curr_gpu = None
self.dev_dict = {}
self.ctx_dict = {}
for gpu in gpus:
dev = drv.Device(gpu)
self.dev_dict[gpu] = dev
ctx = dev.make_context()
self.ctx_dict[gpu] = ctx
def cleanup():
ctx.pop()
tools.clear_context_caches()
atexit.register(cleanup)
self._curr_gpu = gpu
@property
def curr_gpu(self):
"""
Return GPU associated with currently active context.
"""
return self._curr_gpu
def switch_gpu(self, gpu):
"""
Switch to the context associated with the specified GPU.
"""
if not self.ctx_dict.has_key(gpu):
raise ValueError('nonexistent GPU specified')
if gpu != self.curr_gpu:
self.ctx_dict[self._curr_gpu].pop()
self.ctx_dict[gpu].push()
self._curr_gpu = gpu
drv.init()
global gpu_ctx_manager
gpu_ctx_manager = MultiGPUManager()
curr_gpu = gpu_ctx_manager.curr_gpu
switch_gpu = gpu_ctx_manager.switch_gpu
if __name__ == '__main__':
import numpy as np
man = MultiGPUManager()
x_gpu = gpuarray.to_gpu(np.array([1, 2, 3]))
man.switch_gpu(0)
y_gpu = gpuarray.to_gpu(np.array([4, 5, 6]))
man.switch_gpu(1)
print x_gpu
man.switch_gpu(0)
print y_gpu
# This will cause an error:
print x_gpu | en | 0.70674 | #!/usr/bin/env python Autoinitialize multiple GPUs. Create and manage contexts for multiple GPUs. Parameters ---------- gpus : list of int IDs of GPUs for which to create contexts. If no IDs are specified, create contexts for all GPUs on the system. Methods ------- switch(gpu) Make the context associated with the specified GPU active. Notes ----- After instantiation, the context associated with the last specified GPU is active. Return GPU associated with currently active context. Switch to the context associated with the specified GPU. # This will cause an error: | 2.784395 | 3 |
build_server/teamcity_agent/shut_down_on_empty_queue.py | Empythy/geometry-learning | 21 | 6618967 | import http
import os
from datetime import datetime
import boto3
import requests
from slackclient import SlackClient
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
# Set this to the appropriate region
REGION_NAME = 'eu-west-1'
# Get environment variables
# Slack is required. We need to know if something is wrong
slack_token = os.environ['SLACK_API_TOKEN']
slack_channel = os.environ['SLACK_CHANNEL']
# We are also going to require Amazon credentials, set as environment variables
amazon_id = os.environ['AWS_ACCESS_KEY_ID']
amazon_key = os.environ['AWS_SECRET_ACCESS_KEY']
# Initialize frameworks
ec2 = boto3.client('ec2', region_name=REGION_NAME)
sc = SlackClient(slack_token)
# Slack notification function
def notify(signature, message):
sc.api_call("chat.postMessage", channel=slack_channel,
text="Script " + signature + " notification: " + str(message))
# Get build queue length
queue = "http://teamcity:8111/guestAuth/app/rest/buildQueue"
headers = {
'Accept': "application/json",
'Cache-Control': "no-cache",
}
queue_res = requests.get(queue, headers=headers)
queue_status = queue_res.json()
queue_length = queue_status['count']
# Get instance id for this machine
# https://stackoverflow.com/questions/33301880/how-to-obtain-current-instance-id-from-boto3#33307704
try:
instance_metadata = requests.get('http://1172.16.58.3/latest/meta-data/instance-id')
except ConnectionError as e:
notify(SCRIPT_NAME, 'ERROR getting instance id, cannot issue commands')
raise ConnectionError(e)
instance_id = instance_metadata.text
if queue_length == 0:
print('build server reports empty queue, shutting down.')
shutdown_res = ec2.stop_instances(InstanceIds=[instance_id])
http_status_code = shutdown_res['ResponseMetadata']['HTTPStatusCode']
http_status = http.HTTPStatus(http_status_code).name
if http_status_code == 200:
print('Stop instances:', http_status)
notify(SCRIPT_NAME, 'successful shutdown of {} with response {}'.format(instance_id, http_status))
else:
notify(SCRIPT_NAME, 'ERROR shutting down instance id: {}'.format(http_status))
else:
notify(SCRIPT_NAME, 'job finished, build server reports non-empty queue, continuing.')
| import http
import os
from datetime import datetime
import boto3
import requests
from slackclient import SlackClient
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
# Set this to the appropriate region
REGION_NAME = 'eu-west-1'
# Get environment variables
# Slack is required. We need to know if something is wrong
slack_token = os.environ['SLACK_API_TOKEN']
slack_channel = os.environ['SLACK_CHANNEL']
# We are also going to require Amazon credentials, set as environment variables
amazon_id = os.environ['AWS_ACCESS_KEY_ID']
amazon_key = os.environ['AWS_SECRET_ACCESS_KEY']
# Initialize frameworks
ec2 = boto3.client('ec2', region_name=REGION_NAME)
sc = SlackClient(slack_token)
# Slack notification function
def notify(signature, message):
sc.api_call("chat.postMessage", channel=slack_channel,
text="Script " + signature + " notification: " + str(message))
# Get build queue length
queue = "http://teamcity:8111/guestAuth/app/rest/buildQueue"
headers = {
'Accept': "application/json",
'Cache-Control': "no-cache",
}
queue_res = requests.get(queue, headers=headers)
queue_status = queue_res.json()
queue_length = queue_status['count']
# Get instance id for this machine
# https://stackoverflow.com/questions/33301880/how-to-obtain-current-instance-id-from-boto3#33307704
try:
instance_metadata = requests.get('http://1172.16.58.3/latest/meta-data/instance-id')
except ConnectionError as e:
notify(SCRIPT_NAME, 'ERROR getting instance id, cannot issue commands')
raise ConnectionError(e)
instance_id = instance_metadata.text
if queue_length == 0:
print('build server reports empty queue, shutting down.')
shutdown_res = ec2.stop_instances(InstanceIds=[instance_id])
http_status_code = shutdown_res['ResponseMetadata']['HTTPStatusCode']
http_status = http.HTTPStatus(http_status_code).name
if http_status_code == 200:
print('Stop instances:', http_status)
notify(SCRIPT_NAME, 'successful shutdown of {} with response {}'.format(instance_id, http_status))
else:
notify(SCRIPT_NAME, 'ERROR shutting down instance id: {}'.format(http_status))
else:
notify(SCRIPT_NAME, 'job finished, build server reports non-empty queue, continuing.')
| en | 0.836513 | # Set this to the appropriate region # Get environment variables # Slack is required. We need to know if something is wrong # We are also going to require Amazon credentials, set as environment variables # Initialize frameworks # Slack notification function # Get build queue length # Get instance id for this machine # https://stackoverflow.com/questions/33301880/how-to-obtain-current-instance-id-from-boto3#33307704 | 2.563172 | 3 |
src/outpost/django/typo3/views.py | medunigraz/outpost.django.typo3 | 0 | 6618968 | <gh_stars>0
import io
import logging
import requests
import mimeparse
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.generic import View
from PIL import Image
from . import models
from .conf import settings
logger = logging.getLogger(__name__)
@method_decorator(cache_page(3600), name="dispatch")
class MediaView(View):
def get(self, request, pk, width=None):
media = get_object_or_404(models.Media, pk=pk)
timeout = int(settings.TYPO3_MEDIA_CACHE_TIMEOUT.total_seconds())
response = HttpResponse()
try:
req = requests.get(media.url)
response["Cache-Control"] = f"private,max-age={timeout}"
contenttype = req.headers.get("Content-Type", "application/octet-stream")
maintype, *_ = mimeparse.parse_mime_type(contenttype)
if not width or maintype != "image":
response["Content-Type"] = contenttype
response.write(req.content)
return response
with Image.open(io.BytesIO(req.content)) as img:
fmt = img.format
response["Content-Type"] = Image.MIME[fmt]
width = int(width)
if img.width <= width:
response.write(req.content)
return response
height = int(img.height * (width / float(img.width)))
img = img.resize((width, height), Image.ANTIALIAS)
img.save(
response,
format=fmt,
quality=settings.TYPO3_MEDIA_CACHE_QUALITY,
optimize=True,
)
except Exception as e:
logger.warn(f"Failed to load image blob: {e}")
return HttpResponseNotFound()
return response
| import io
import logging
import requests
import mimeparse
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.generic import View
from PIL import Image
from . import models
from .conf import settings
logger = logging.getLogger(__name__)
@method_decorator(cache_page(3600), name="dispatch")
class MediaView(View):
def get(self, request, pk, width=None):
media = get_object_or_404(models.Media, pk=pk)
timeout = int(settings.TYPO3_MEDIA_CACHE_TIMEOUT.total_seconds())
response = HttpResponse()
try:
req = requests.get(media.url)
response["Cache-Control"] = f"private,max-age={timeout}"
contenttype = req.headers.get("Content-Type", "application/octet-stream")
maintype, *_ = mimeparse.parse_mime_type(contenttype)
if not width or maintype != "image":
response["Content-Type"] = contenttype
response.write(req.content)
return response
with Image.open(io.BytesIO(req.content)) as img:
fmt = img.format
response["Content-Type"] = Image.MIME[fmt]
width = int(width)
if img.width <= width:
response.write(req.content)
return response
height = int(img.height * (width / float(img.width)))
img = img.resize((width, height), Image.ANTIALIAS)
img.save(
response,
format=fmt,
quality=settings.TYPO3_MEDIA_CACHE_QUALITY,
optimize=True,
)
except Exception as e:
logger.warn(f"Failed to load image blob: {e}")
return HttpResponseNotFound()
return response | none | 1 | 2.04934 | 2 | |
run_scripts/eval_policy.py | apexrl/COIL | 15 | 6618969 | <filename>run_scripts/eval_policy.py
import yaml
import argparse
import joblib
import numpy as np
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
print(sys.path)
from gym.spaces import Dict
from rlkit.envs import get_env
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import setup_logger, set_seed, logger
from rlkit.core import eval_util
from rlkit.torch.sac.policies import ReparamTanhMultivariateGaussianPolicy
from rlkit.envs.wrappers import ScaledEnv
from rlkit.samplers import PathSampler
from rlkit.torch.sac.policies import MakeDeterministic
def experiment(variant, seed):
# with open('expert_demos_listing.yaml', 'r') as f:
# listings = yaml.load(f.read())ssssss
# expert_demos_path = listings[variant['expert_name']]['file_paths'][variant['expert_idx']]
# buffer_save_dict = joblib.load(expert_demos_path)
# expert_replay_buffer = buffer_save_dict['train']
# if 'minmax_env_with_demo_stats' in variant.keys():
# if variant['minmax_env_with_demo_stats']:
# print('Use minmax envs')
# assert 'norm_train' in buffer_save_dict.keys()
# expert_replay_buffer = buffer_save_dict['norm_train']
env_specs = variant['env_specs']
env = get_env(env_specs)
env.seed(seed)
env.reset()
obs_space = env.observation_space
act_space = env.action_space
assert not isinstance(obs_space, Dict)
assert len(obs_space.shape) == 1
assert len(act_space.shape) == 1
obs_dim = obs_space.shape[0]
action_dim = act_space.shape[0]
print('\n\nEnv: {}'.format(env_specs['env_name']))
print('kwargs: {}'.format(env_specs['env_kwargs']))
print('Obs Space: {}'.format(env.observation_space))
print('Act Space: {}\n\n'.format(env.action_space))
# if variant['scale_env_with_demo_stats']:
# env = ScaledEnv(
# env,
# obs_mean=buffer_save_dict['obs_mean'],
# obs_std=buffer_save_dict['obs_std'],
# acts_mean=buffer_save_dict['acts_mean'],
# acts_std=buffer_save_dict['acts_std'],
# )
#
# elif variant['minmax_env_with_demo_stats']:
# env = MinmaxEnv(
# env,
# obs_min=buffer_save_dict['obs_min'],
# obs_max=buffer_save_dict['obs_max'],
# )
if variant['test_random']:
net_size = 256
num_hidden = 2
policy = ReparamTanhMultivariateGaussianPolicy(
hidden_sizes=num_hidden * [net_size],
obs_dim=obs_dim,
action_dim=action_dim,
)
if variant['eval_deterministic']:
policy = MakeDeterministic(policy)
policy.to(ptu.device)
eval_sampler = PathSampler(
env,
policy,
variant['num_eval_steps'],
variant['max_path_length'],
no_terminal=variant['no_terminal'],
render=variant['render'],
render_kwargs=variant['render_kwargs']
)
test_paths = eval_sampler.obtain_samples()
average_returns, average_stds = eval_util.get_average_returns(test_paths, True)
logger.log('random mean: {}'.format(average_returns))
logger.log('random std: {}'.format(average_stds))
policy_checkpoint = variant['policy_checkpoint']
print('Policy Checkpoint: %s' % policy_checkpoint)
dirs = [_ for _ in os.listdir(policy_checkpoint) if os.path.isdir(os.path.join(policy_checkpoint, _))]
test_paths = []
for policy_name in variant['policy_name']:
for dir_name in dirs:
policy_path = os.path.join(policy_checkpoint, dir_name, '%s.pkl' % policy_name)
print("Loading from %s..." % policy_path)
try:
policy = joblib.load(policy_path)['exploration_policy']
except IOError:
print("Failed.")
continue
if variant['eval_deterministic']:
policy = MakeDeterministic(policy)
policy.to(ptu.device)
print("Sampling...")
eval_sampler = PathSampler(
env,
policy,
variant['num_eval_steps'],
variant['max_path_length'],
no_terminal=variant['no_terminal'],
render=variant['render'],
render_kwargs=variant['render_kwargs']
)
test_paths += eval_sampler.obtain_samples()
return test_paths
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
parser.add_argument('-g', '--gpu', help='gpu id', type=str, default=0)
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
exp_specs['env_specs']['eval_env_seed'] = exp_specs['env_specs']['training_env_seed']
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if exp_specs['num_gpu_per_worker'] > 0:
print('\n\nUSING GPU\n\n')
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = 0
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
paths = []
for seed in exp_specs['seed']:
logger.log("\n\ntest on seed %d..." % seed)
set_seed(seed)
paths += experiment(exp_specs, seed)
logger.log("Num paths: %d" % len(paths))
average_returns, average_stds = eval_util.get_average_returns(paths, True)
logger.log('test mean: {}'.format(average_returns))
logger.log('test std: {}'.format(average_stds))
| <filename>run_scripts/eval_policy.py
import yaml
import argparse
import joblib
import numpy as np
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
print(sys.path)
from gym.spaces import Dict
from rlkit.envs import get_env
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import setup_logger, set_seed, logger
from rlkit.core import eval_util
from rlkit.torch.sac.policies import ReparamTanhMultivariateGaussianPolicy
from rlkit.envs.wrappers import ScaledEnv
from rlkit.samplers import PathSampler
from rlkit.torch.sac.policies import MakeDeterministic
def experiment(variant, seed):
# with open('expert_demos_listing.yaml', 'r') as f:
# listings = yaml.load(f.read())ssssss
# expert_demos_path = listings[variant['expert_name']]['file_paths'][variant['expert_idx']]
# buffer_save_dict = joblib.load(expert_demos_path)
# expert_replay_buffer = buffer_save_dict['train']
# if 'minmax_env_with_demo_stats' in variant.keys():
# if variant['minmax_env_with_demo_stats']:
# print('Use minmax envs')
# assert 'norm_train' in buffer_save_dict.keys()
# expert_replay_buffer = buffer_save_dict['norm_train']
env_specs = variant['env_specs']
env = get_env(env_specs)
env.seed(seed)
env.reset()
obs_space = env.observation_space
act_space = env.action_space
assert not isinstance(obs_space, Dict)
assert len(obs_space.shape) == 1
assert len(act_space.shape) == 1
obs_dim = obs_space.shape[0]
action_dim = act_space.shape[0]
print('\n\nEnv: {}'.format(env_specs['env_name']))
print('kwargs: {}'.format(env_specs['env_kwargs']))
print('Obs Space: {}'.format(env.observation_space))
print('Act Space: {}\n\n'.format(env.action_space))
# if variant['scale_env_with_demo_stats']:
# env = ScaledEnv(
# env,
# obs_mean=buffer_save_dict['obs_mean'],
# obs_std=buffer_save_dict['obs_std'],
# acts_mean=buffer_save_dict['acts_mean'],
# acts_std=buffer_save_dict['acts_std'],
# )
#
# elif variant['minmax_env_with_demo_stats']:
# env = MinmaxEnv(
# env,
# obs_min=buffer_save_dict['obs_min'],
# obs_max=buffer_save_dict['obs_max'],
# )
if variant['test_random']:
net_size = 256
num_hidden = 2
policy = ReparamTanhMultivariateGaussianPolicy(
hidden_sizes=num_hidden * [net_size],
obs_dim=obs_dim,
action_dim=action_dim,
)
if variant['eval_deterministic']:
policy = MakeDeterministic(policy)
policy.to(ptu.device)
eval_sampler = PathSampler(
env,
policy,
variant['num_eval_steps'],
variant['max_path_length'],
no_terminal=variant['no_terminal'],
render=variant['render'],
render_kwargs=variant['render_kwargs']
)
test_paths = eval_sampler.obtain_samples()
average_returns, average_stds = eval_util.get_average_returns(test_paths, True)
logger.log('random mean: {}'.format(average_returns))
logger.log('random std: {}'.format(average_stds))
policy_checkpoint = variant['policy_checkpoint']
print('Policy Checkpoint: %s' % policy_checkpoint)
dirs = [_ for _ in os.listdir(policy_checkpoint) if os.path.isdir(os.path.join(policy_checkpoint, _))]
test_paths = []
for policy_name in variant['policy_name']:
for dir_name in dirs:
policy_path = os.path.join(policy_checkpoint, dir_name, '%s.pkl' % policy_name)
print("Loading from %s..." % policy_path)
try:
policy = joblib.load(policy_path)['exploration_policy']
except IOError:
print("Failed.")
continue
if variant['eval_deterministic']:
policy = MakeDeterministic(policy)
policy.to(ptu.device)
print("Sampling...")
eval_sampler = PathSampler(
env,
policy,
variant['num_eval_steps'],
variant['max_path_length'],
no_terminal=variant['no_terminal'],
render=variant['render'],
render_kwargs=variant['render_kwargs']
)
test_paths += eval_sampler.obtain_samples()
return test_paths
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
parser.add_argument('-g', '--gpu', help='gpu id', type=str, default=0)
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
exp_specs['env_specs']['eval_env_seed'] = exp_specs['env_specs']['training_env_seed']
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if exp_specs['num_gpu_per_worker'] > 0:
print('\n\nUSING GPU\n\n')
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = 0
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
paths = []
for seed in exp_specs['seed']:
logger.log("\n\ntest on seed %d..." % seed)
set_seed(seed)
paths += experiment(exp_specs, seed)
logger.log("Num paths: %d" % len(paths))
average_returns, average_stds = eval_util.get_average_returns(paths, True)
logger.log('test mean: {}'.format(average_returns))
logger.log('test std: {}'.format(average_stds))
| en | 0.43415 | # with open('expert_demos_listing.yaml', 'r') as f: # listings = yaml.load(f.read())ssssss # expert_demos_path = listings[variant['expert_name']]['file_paths'][variant['expert_idx']] # buffer_save_dict = joblib.load(expert_demos_path) # expert_replay_buffer = buffer_save_dict['train'] # if 'minmax_env_with_demo_stats' in variant.keys(): # if variant['minmax_env_with_demo_stats']: # print('Use minmax envs') # assert 'norm_train' in buffer_save_dict.keys() # expert_replay_buffer = buffer_save_dict['norm_train'] # if variant['scale_env_with_demo_stats']: # env = ScaledEnv( # env, # obs_mean=buffer_save_dict['obs_mean'], # obs_std=buffer_save_dict['obs_std'], # acts_mean=buffer_save_dict['acts_mean'], # acts_std=buffer_save_dict['acts_std'], # ) # # elif variant['minmax_env_with_demo_stats']: # env = MinmaxEnv( # env, # obs_min=buffer_save_dict['obs_min'], # obs_max=buffer_save_dict['obs_max'], # ) # Arguments | 1.844999 | 2 |
xdtools/artwork/__init__.py | tjcjc/xdtools | 43 | 6618970 | <reponame>tjcjc/xdtools<filename>xdtools/artwork/__init__.py<gh_stars>10-100
from .artwork import *
from .compound import *
from .ellipse import *
from .group import *
from .line import *
from .path import *
from .rectangle import *
from .text import *
| from .artwork import *
from .compound import *
from .ellipse import *
from .group import *
from .line import *
from .path import *
from .rectangle import *
from .text import * | none | 1 | 1.019539 | 1 | |
String/214. Shortest Palindrome.py | beckswu/Leetcode | 138 | 6618971 | """
214. Shortest Palindrome
Given a string s, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation.
Example 1:
Input: "aacecaaa"
Output: "aaacecaaa"
Example 2:
Input: "abcd"
Output: "dcbabcd"
"""
class Solution:
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
snew = s + "#" +s[::-1]
def getPrefix(snew):
prefix = [0]*len(snew)
j = 0
for i in range(1,len(snew)):
while j > 0 and snew[i]!=snew[j]:
j = prefix[j-1]
if snew[i] == snew[j]: j+=1
prefix[i] = j
return prefix
kmp = getPrefix(snew)
nonpal = s[kmp[-1]:]
return nonpal[::-1] + s
import functools
class Solution:
def shortestPalindrome(self, s):
snew = functools.reduce(lambda x, y: x + y + "#", s,"$#")+"^"
def manacher(snew):
p = [0]*len(snew)
mx = id = maxlen = 0
for i in range(1,len(snew)-1):
if i < mx:
p[i] = min(p[id*2 - i], p[mx - i])
else:
p[i] = 1
while snew[i + p[i]] == snew[i - p[i]]:
p[i] += 1
if p[i] + i > mx:
id, mx = i, p[i]+i
if p[i] == i:
maxlen = max(maxlen, p[i]-1)
return maxlen
maxlen = manacher(snew)
nonpal = s[maxlen:]
return nonpal[::-1] + s
"""
The basic idea is to find the longest palindrome starting from s[0], so that fewest charactors are needed in front of s.
For any charactor(c) in s appearing more than once, define l and r as the first and last index of c,
then the max length of palindrome starting from s[0] would be no larger than l+r+1, or the first c could never be matched.
For any charactor(c) in s appearing just once, l=r. Here is another key optimization.if s[:l+r+1] is not a palindrome,
the max length of palindrome starting from s[0] would be no larger than l.
In other words, c must be excluded from the palindrome, or it could not be matched.
If s[:l+r+1] is a palindrome, actually c is the center, matched by itself.
"""
class Solution:
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s: return ''
k=len(s)
for c in set(s):
l,r=s.find(c),s.rfind(c)
k=min(k,l if l==r and s[:l+r+1]!=s[:l+r+1][::-1] else l+r+1)
for i in range(k,0,-1):
if s[:i]==s[:i][::-1]: return s[i:][::-1]+s
| """
214. Shortest Palindrome
Given a string s, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation.
Example 1:
Input: "aacecaaa"
Output: "aaacecaaa"
Example 2:
Input: "abcd"
Output: "dcbabcd"
"""
class Solution:
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
snew = s + "#" +s[::-1]
def getPrefix(snew):
prefix = [0]*len(snew)
j = 0
for i in range(1,len(snew)):
while j > 0 and snew[i]!=snew[j]:
j = prefix[j-1]
if snew[i] == snew[j]: j+=1
prefix[i] = j
return prefix
kmp = getPrefix(snew)
nonpal = s[kmp[-1]:]
return nonpal[::-1] + s
import functools
class Solution:
def shortestPalindrome(self, s):
snew = functools.reduce(lambda x, y: x + y + "#", s,"$#")+"^"
def manacher(snew):
p = [0]*len(snew)
mx = id = maxlen = 0
for i in range(1,len(snew)-1):
if i < mx:
p[i] = min(p[id*2 - i], p[mx - i])
else:
p[i] = 1
while snew[i + p[i]] == snew[i - p[i]]:
p[i] += 1
if p[i] + i > mx:
id, mx = i, p[i]+i
if p[i] == i:
maxlen = max(maxlen, p[i]-1)
return maxlen
maxlen = manacher(snew)
nonpal = s[maxlen:]
return nonpal[::-1] + s
"""
The basic idea is to find the longest palindrome starting from s[0], so that fewest charactors are needed in front of s.
For any charactor(c) in s appearing more than once, define l and r as the first and last index of c,
then the max length of palindrome starting from s[0] would be no larger than l+r+1, or the first c could never be matched.
For any charactor(c) in s appearing just once, l=r. Here is another key optimization.if s[:l+r+1] is not a palindrome,
the max length of palindrome starting from s[0] would be no larger than l.
In other words, c must be excluded from the palindrome, or it could not be matched.
If s[:l+r+1] is a palindrome, actually c is the center, matched by itself.
"""
class Solution:
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s: return ''
k=len(s)
for c in set(s):
l,r=s.find(c),s.rfind(c)
k=min(k,l if l==r and s[:l+r+1]!=s[:l+r+1][::-1] else l+r+1)
for i in range(k,0,-1):
if s[:i]==s[:i][::-1]: return s[i:][::-1]+s
| en | 0.906976 | 214. Shortest Palindrome
Given a string s, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation.
Example 1:
Input: "aacecaaa"
Output: "aaacecaaa"
Example 2:
Input: "abcd"
Output: "dcbabcd" :type s: str
:rtype: str #")+"^" The basic idea is to find the longest palindrome starting from s[0], so that fewest charactors are needed in front of s.
For any charactor(c) in s appearing more than once, define l and r as the first and last index of c,
then the max length of palindrome starting from s[0] would be no larger than l+r+1, or the first c could never be matched.
For any charactor(c) in s appearing just once, l=r. Here is another key optimization.if s[:l+r+1] is not a palindrome,
the max length of palindrome starting from s[0] would be no larger than l.
In other words, c must be excluded from the palindrome, or it could not be matched.
If s[:l+r+1] is a palindrome, actually c is the center, matched by itself. :type s: str
:rtype: str | 3.883505 | 4 |
snakemake_reference/straincraker/scripts/straincracker.py | liebermanlab/wide-variant | 0 | 6618972 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 18:51:07 2020
@author: tamilieberman
"""
'''
straincraker.py reassigns reads from a kraken output file and produces a new kraken output file
It uses a purity threshold to reassign taxa to the taxonomic level at which the percent
of kmers assigned at a node or its descendents is above the specified the specified treshold
Kmers above the taxonomic level in consideration are not included in this calculation
Requires a precompiled tree, generated from straincracker_loadtree.py and nodes.dmp
Copyright (C) 2020 <NAME>, <EMAIL>
Required Parameters:
-i,--infile X....................kraken output file, gzipped
-t,--treefile X.....................straincraker treefile
-o, --output X......................modified kraken report file
-p, --purity X......................purity threshold
Example: python straincracker.py --t treematrix.npy --i Meta-F_Extract-Single_Ch_krakSeq.txt.gz --o Meta-F_Extract-Single_Ch_krakSeq.txt.cracked --p .99
#steps to all of straincraker
#make treematrix, krakendatabase format
#kraken
#strainkrak
#convert
#bracken
'''
# %% Run the actual program
#################################################################################
import numpy as np
import gzip
import argparse
import os
#################################################################################
# %%
def main():
#Parse arguments
parser = argparse.ArgumentParser(description='Reassign reads using treefile from strainkraker_loadtree.py')
parser.add_argument('--i', metavar='infile', nargs='?', required=True, help='gzipped output from kraken')
parser.add_argument('--o', metavar='outfile', nargs='?', required=True, help='destination output file')
parser.add_argument('--t', metavar='treefile', nargs='?', required=True, help='source tree file')
parser.add_argument('--p', metavar='purity', type=float, required=True, nargs='?', help='purity threshold')
args = parser.parse_args()
strainCrack(args.t,args.i,args.o,args.p)
#%%
def read_in_results(kstrings):
kcounts=dict({0:0, 1:0})
for i,s in enumerate(kstrings):
x=s.split(':')
if x[0] != '|':
if int(x[0]) in kcounts:
kcounts[int(x[0])]+=int(x[1])
else:
kcounts[int(x[0])]=int(x[1])
kcounts.pop(0) #ignore kmers alinging to levels 0 or 1
kcounts.pop(1) #ignore kmers alinging to levels 0 or 1
t=np.array(list(kcounts.keys()))
k=np.array(list(kcounts.values()))
return t,k
# %%
def classify_read(taxa,kmers, treem, purity_thresh,tlevels):
found_taxa=taxa.copy()
kmer_values=kmers.copy()
taxa_categorized=0
subtree=treem[found_taxa,:];
num_levels=max(np.where(subtree>0)[1])
#descend tree
#calculate kmers at each level
for l in range(1,num_levels+1):
taxa_at_this_level_or_below=np.logical_and(kmer_values>0,tlevels>=l)
#first try to classify at this level
if np.sum(taxa_at_this_level_or_below) == 1:
taxa_categorized=found_taxa[taxa_at_this_level_or_below][0]
break
elif np.sum(taxa_at_this_level_or_below) < 1:
#go with previous assignment
break
else:
#need to coninue classifying
#find all taxa at this level that have descendents w kmers
level_i_classification_for_taxa_found=subtree[:,l]
taxa_at_this_level_w_evidence=np.unique(level_i_classification_for_taxa_found[level_i_classification_for_taxa_found>0])
#are there 0, 1, or more taxa at this level?
if taxa_at_this_level_w_evidence.size == 1:
#just 1 taxa, pure by definitaion, continue going down tree
taxa_categorized = taxa_at_this_level_w_evidence[0]
elif taxa_at_this_level_w_evidence.size < 1:
#report prev taxa
print('Warning: somehow got here')
break
else: # taxa_at_this_level_w_evidence.size > 1:
#check which is best path to go down, or report prev
kmers_at_level=np.zeros(np.shape(taxa_at_this_level_w_evidence),dtype=int)
for i,t in enumerate(taxa_at_this_level_w_evidence):
self_and_children_taxa=np.equal(level_i_classification_for_taxa_found,t);
kmers_at_level[i]=sum(kmer_values[self_and_children_taxa])
purity=max(kmers_at_level)/sum(kmers_at_level)
if purity > purity_thresh:
taxa_categorized=taxa_at_this_level_w_evidence[kmers_at_level==max(kmers_at_level)]
taxa_categorized = taxa_categorized[0]
#set all counts on paths not below this to 0
to_delete=np.not_equal(level_i_classification_for_taxa_found,taxa_categorized)
kmer_values[to_delete]=0
else:
#report prev taxa
break
return taxa_categorized
# %%
def strainCrack(treefile,infile,outfile,purity_threshold):
#i mport tree structure
tree = np.load(treefile)
# make an array that says what taxonmic level each taxa is at, helpful for later
taxa_nums=np.array([range(tree.shape[0])])
taxa_exists=np.sum(np.equal(tree,taxa_nums.T)>0,1)
taxonomic_levels=np.zeros(taxa_exists.shape,dtype=int)
taxonomic_levels[taxa_exists>0]=np.array((np.nonzero(np.equal(tree,taxa_nums.T))))[1]
# read in input and output file
f=gzip.open(infile,"rt")
of=open(outfile,"w")
l=f.readline()
# test
changed=0
tested=0
while len(l) > 0 and l != "\n":
line=l.strip().split()
if line[0]=='C':
taxa, kmers =read_in_results(line[4:])
if kmers.size > 1:
tested+=1
new_classification=classify_read(taxa,kmers,tree,purity_threshold, taxonomic_levels[taxa])
if int(line[2]) != new_classification:
changed+=1
line[2]=str(new_classification)
newline1='\t'.join(map(str, line[:4]))
newline2=' '.join(map(str, line[4:]))
of.write(newline1 + '\t' + newline2 + '\n')
else:
of.write(l)
l=f.readline()
of.close()
f.close()
return changed, tested
if __name__ == "__main__":
main() | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 18:51:07 2020
@author: tamilieberman
"""
'''
straincraker.py reassigns reads from a kraken output file and produces a new kraken output file
It uses a purity threshold to reassign taxa to the taxonomic level at which the percent
of kmers assigned at a node or its descendents is above the specified the specified treshold
Kmers above the taxonomic level in consideration are not included in this calculation
Requires a precompiled tree, generated from straincracker_loadtree.py and nodes.dmp
Copyright (C) 2020 <NAME>, <EMAIL>
Required Parameters:
-i,--infile X....................kraken output file, gzipped
-t,--treefile X.....................straincraker treefile
-o, --output X......................modified kraken report file
-p, --purity X......................purity threshold
Example: python straincracker.py --t treematrix.npy --i Meta-F_Extract-Single_Ch_krakSeq.txt.gz --o Meta-F_Extract-Single_Ch_krakSeq.txt.cracked --p .99
#steps to all of straincraker
#make treematrix, krakendatabase format
#kraken
#strainkrak
#convert
#bracken
'''
# %% Run the actual program
#################################################################################
import numpy as np
import gzip
import argparse
import os
#################################################################################
# %%
def main():
#Parse arguments
parser = argparse.ArgumentParser(description='Reassign reads using treefile from strainkraker_loadtree.py')
parser.add_argument('--i', metavar='infile', nargs='?', required=True, help='gzipped output from kraken')
parser.add_argument('--o', metavar='outfile', nargs='?', required=True, help='destination output file')
parser.add_argument('--t', metavar='treefile', nargs='?', required=True, help='source tree file')
parser.add_argument('--p', metavar='purity', type=float, required=True, nargs='?', help='purity threshold')
args = parser.parse_args()
strainCrack(args.t,args.i,args.o,args.p)
#%%
def read_in_results(kstrings):
kcounts=dict({0:0, 1:0})
for i,s in enumerate(kstrings):
x=s.split(':')
if x[0] != '|':
if int(x[0]) in kcounts:
kcounts[int(x[0])]+=int(x[1])
else:
kcounts[int(x[0])]=int(x[1])
kcounts.pop(0) #ignore kmers alinging to levels 0 or 1
kcounts.pop(1) #ignore kmers alinging to levels 0 or 1
t=np.array(list(kcounts.keys()))
k=np.array(list(kcounts.values()))
return t,k
# %%
def classify_read(taxa,kmers, treem, purity_thresh,tlevels):
found_taxa=taxa.copy()
kmer_values=kmers.copy()
taxa_categorized=0
subtree=treem[found_taxa,:];
num_levels=max(np.where(subtree>0)[1])
#descend tree
#calculate kmers at each level
for l in range(1,num_levels+1):
taxa_at_this_level_or_below=np.logical_and(kmer_values>0,tlevels>=l)
#first try to classify at this level
if np.sum(taxa_at_this_level_or_below) == 1:
taxa_categorized=found_taxa[taxa_at_this_level_or_below][0]
break
elif np.sum(taxa_at_this_level_or_below) < 1:
#go with previous assignment
break
else:
#need to coninue classifying
#find all taxa at this level that have descendents w kmers
level_i_classification_for_taxa_found=subtree[:,l]
taxa_at_this_level_w_evidence=np.unique(level_i_classification_for_taxa_found[level_i_classification_for_taxa_found>0])
#are there 0, 1, or more taxa at this level?
if taxa_at_this_level_w_evidence.size == 1:
#just 1 taxa, pure by definitaion, continue going down tree
taxa_categorized = taxa_at_this_level_w_evidence[0]
elif taxa_at_this_level_w_evidence.size < 1:
#report prev taxa
print('Warning: somehow got here')
break
else: # taxa_at_this_level_w_evidence.size > 1:
#check which is best path to go down, or report prev
kmers_at_level=np.zeros(np.shape(taxa_at_this_level_w_evidence),dtype=int)
for i,t in enumerate(taxa_at_this_level_w_evidence):
self_and_children_taxa=np.equal(level_i_classification_for_taxa_found,t);
kmers_at_level[i]=sum(kmer_values[self_and_children_taxa])
purity=max(kmers_at_level)/sum(kmers_at_level)
if purity > purity_thresh:
taxa_categorized=taxa_at_this_level_w_evidence[kmers_at_level==max(kmers_at_level)]
taxa_categorized = taxa_categorized[0]
#set all counts on paths not below this to 0
to_delete=np.not_equal(level_i_classification_for_taxa_found,taxa_categorized)
kmer_values[to_delete]=0
else:
#report prev taxa
break
return taxa_categorized
# %%
def strainCrack(treefile,infile,outfile,purity_threshold):
#i mport tree structure
tree = np.load(treefile)
# make an array that says what taxonmic level each taxa is at, helpful for later
taxa_nums=np.array([range(tree.shape[0])])
taxa_exists=np.sum(np.equal(tree,taxa_nums.T)>0,1)
taxonomic_levels=np.zeros(taxa_exists.shape,dtype=int)
taxonomic_levels[taxa_exists>0]=np.array((np.nonzero(np.equal(tree,taxa_nums.T))))[1]
# read in input and output file
f=gzip.open(infile,"rt")
of=open(outfile,"w")
l=f.readline()
# test
changed=0
tested=0
while len(l) > 0 and l != "\n":
line=l.strip().split()
if line[0]=='C':
taxa, kmers =read_in_results(line[4:])
if kmers.size > 1:
tested+=1
new_classification=classify_read(taxa,kmers,tree,purity_threshold, taxonomic_levels[taxa])
if int(line[2]) != new_classification:
changed+=1
line[2]=str(new_classification)
newline1='\t'.join(map(str, line[:4]))
newline2=' '.join(map(str, line[4:]))
of.write(newline1 + '\t' + newline2 + '\n')
else:
of.write(l)
l=f.readline()
of.close()
f.close()
return changed, tested
if __name__ == "__main__":
main() | en | 0.627651 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Wed Oct 28 18:51:07 2020 @author: tamilieberman straincraker.py reassigns reads from a kraken output file and produces a new kraken output file It uses a purity threshold to reassign taxa to the taxonomic level at which the percent of kmers assigned at a node or its descendents is above the specified the specified treshold Kmers above the taxonomic level in consideration are not included in this calculation Requires a precompiled tree, generated from straincracker_loadtree.py and nodes.dmp Copyright (C) 2020 <NAME>, <EMAIL> Required Parameters: -i,--infile X....................kraken output file, gzipped -t,--treefile X.....................straincraker treefile -o, --output X......................modified kraken report file -p, --purity X......................purity threshold Example: python straincracker.py --t treematrix.npy --i Meta-F_Extract-Single_Ch_krakSeq.txt.gz --o Meta-F_Extract-Single_Ch_krakSeq.txt.cracked --p .99 #steps to all of straincraker #make treematrix, krakendatabase format #kraken #strainkrak #convert #bracken # %% Run the actual program ################################################################################# ################################################################################# # %% #Parse arguments #%% #ignore kmers alinging to levels 0 or 1 #ignore kmers alinging to levels 0 or 1 # %% #descend tree #calculate kmers at each level #first try to classify at this level #go with previous assignment #need to coninue classifying #find all taxa at this level that have descendents w kmers #are there 0, 1, or more taxa at this level? #just 1 taxa, pure by definitaion, continue going down tree #report prev taxa # taxa_at_this_level_w_evidence.size > 1: #check which is best path to go down, or report prev #set all counts on paths not below this to 0 #report prev taxa # %% #i mport tree structure # make an array that says what taxonmic level each taxa is at, helpful for later # read in input and output file # test | 2.31172 | 2 |
files/xss-scanner.py | Nabil-Official/N-WEB | 67 | 6618973 | <reponame>Nabil-Official/N-WEB<filename>files/xss-scanner.py
#!/usr/bin/python
import os
import requests as nabil
import concurrent.futures
logo = """ \033[1;94m
_ __ _ ____________
/ | / / | | / / ____/ __ )
/ |/ /____| | /| / / __/ / __ |
/ /| /_____/ |/ |/ / /___/ /_/ /
/_/ |_/ |__/|__/_____/_____/
Created By : \033[1;96mNabil-Rahman |\033[1;0m [V 1.2.2]
\033[1;32m------------------------------------------
\33[93m AUTHOR : Team DarkWeb -TD
\33[93m GITHUB : github.com/Nabil-Official
\33[93m FB : nabil.404
\033[1;32m------------------------------------------
"""
os.system('clear')
print(logo)
url = "http://testphp.vulnweb.com/listproducts.php?cat="
##### Paylaods From : https://github.com/capture0x/
print(
"""
\033[1;32m[1] \033[1;31m>> \033[1;32mBasic Payload
\033[1;32m[2] \033[1;31m>> \033[1;32mDiv Paylaod
\033[1;32m[3] \033[1;31m>> \033[1;32mImage Paylaod
\033[1;32m[4] \033[1;31m>> \033[1;32mBody Paylaod
"""
)
choice = str(input("\033[1;31m>> \033[1;32mChocse Paylaod : \033[1;36m"))
if choice == "1":
pay = "xss-pay/basic.txt"
elif choice == "2":
pay = "xss-pay/div.txt"
elif choice == "3":
pay = "xss-pay/img.txt"
elif choice == "4":
pay = "xss-pay/body.txt"
else:
print("\033[1;32m[!] \033[1;31mERROR : Not Found !")
exit()
o = open(pay,"r",encoding="utf8").readlines()
url = str(input("\033[1;31m>> \033[1;32mEnter Site Url : \033[1;36m"))
if '?' in url:
url = url
else:
print("\033[1;32m[!] \033[1;31mERROR : Enter url with paramater ! ")
exit()
def scan(x):
pay = x.strip()
url_P = url+pay
req = nabil.get(url_P).text
if pay in req:
print(f"\033[1;32m[+] FOUND : {url_P}")
else:
print(f"\033[1;31m[!] NOT FOUND : {url_P}")
with concurrent.futures.ThreadPoolExecutor() as exe:
exe.map(scan,o) | #!/usr/bin/python
import os
import requests as nabil
import concurrent.futures
logo = """ \033[1;94m
_ __ _ ____________
/ | / / | | / / ____/ __ )
/ |/ /____| | /| / / __/ / __ |
/ /| /_____/ |/ |/ / /___/ /_/ /
/_/ |_/ |__/|__/_____/_____/
Created By : \033[1;96mNabil-Rahman |\033[1;0m [V 1.2.2]
\033[1;32m------------------------------------------
\33[93m AUTHOR : Team DarkWeb -TD
\33[93m GITHUB : github.com/Nabil-Official
\33[93m FB : nabil.404
\033[1;32m------------------------------------------
"""
os.system('clear')
print(logo)
url = "http://testphp.vulnweb.com/listproducts.php?cat="
##### Paylaods From : https://github.com/capture0x/
print(
"""
\033[1;32m[1] \033[1;31m>> \033[1;32mBasic Payload
\033[1;32m[2] \033[1;31m>> \033[1;32mDiv Paylaod
\033[1;32m[3] \033[1;31m>> \033[1;32mImage Paylaod
\033[1;32m[4] \033[1;31m>> \033[1;32mBody Paylaod
"""
)
choice = str(input("\033[1;31m>> \033[1;32mChocse Paylaod : \033[1;36m"))
if choice == "1":
pay = "xss-pay/basic.txt"
elif choice == "2":
pay = "xss-pay/div.txt"
elif choice == "3":
pay = "xss-pay/img.txt"
elif choice == "4":
pay = "xss-pay/body.txt"
else:
print("\033[1;32m[!] \033[1;31mERROR : Not Found !")
exit()
o = open(pay,"r",encoding="utf8").readlines()
url = str(input("\033[1;31m>> \033[1;32mEnter Site Url : \033[1;36m"))
if '?' in url:
url = url
else:
print("\033[1;32m[!] \033[1;31mERROR : Enter url with paramater ! ")
exit()
def scan(x):
pay = x.strip()
url_P = url+pay
req = nabil.get(url_P).text
if pay in req:
print(f"\033[1;32m[+] FOUND : {url_P}")
else:
print(f"\033[1;31m[!] NOT FOUND : {url_P}")
with concurrent.futures.ThreadPoolExecutor() as exe:
exe.map(scan,o) | en | 0.227093 | #!/usr/bin/python \033[1;94m
_ __ _ ____________
/ | / / | | / / ____/ __ )
/ |/ /____| | /| / / __/ / __ |
/ /| /_____/ |/ |/ / /___/ /_/ /
/_/ |_/ |__/|__/_____/_____/
Created By : \033[1;96mNabil-Rahman |\033[1;0m [V 1.2.2]
\033[1;32m------------------------------------------
\33[93m AUTHOR : Team DarkWeb -TD
\33[93m GITHUB : github.com/Nabil-Official
\33[93m FB : nabil.404
\033[1;32m------------------------------------------ ##### Paylaods From : https://github.com/capture0x/ \033[1;32m[1] \033[1;31m>> \033[1;32mBasic Payload
\033[1;32m[2] \033[1;31m>> \033[1;32mDiv Paylaod
\033[1;32m[3] \033[1;31m>> \033[1;32mImage Paylaod
\033[1;32m[4] \033[1;31m>> \033[1;32mBody Paylaod | 1.901525 | 2 |
samples/notebook.py | TakamiChie/TkSugar | 2 | 6618974 | import sys
from pathlib import Path
import tkinter
sys.path.append(str(Path(__file__).parent.parent))
from tksugar import Generator
if __name__ == "__main__":
gen = Generator(r"samples\yml\notebook.yml")
man = gen.get_manager()
# list set
w = man.widgets["list"].widget
[w.insert(tkinter.END, f"item {n}") for n in range(1,5)]
w.select_set(1)
# canvas set
w = man.widgets["canvas"].widget
w.create_oval(10, 5, 90, 30, fill="red")
w.create_oval(10, 20, 90, 45, fill="blue")
w.create_text(50, 25, text="canvas", fill="green")
man.mainloop() | import sys
from pathlib import Path
import tkinter
sys.path.append(str(Path(__file__).parent.parent))
from tksugar import Generator
if __name__ == "__main__":
gen = Generator(r"samples\yml\notebook.yml")
man = gen.get_manager()
# list set
w = man.widgets["list"].widget
[w.insert(tkinter.END, f"item {n}") for n in range(1,5)]
w.select_set(1)
# canvas set
w = man.widgets["canvas"].widget
w.create_oval(10, 5, 90, 30, fill="red")
w.create_oval(10, 20, 90, 45, fill="blue")
w.create_text(50, 25, text="canvas", fill="green")
man.mainloop() | en | 0.5849 | # list set # canvas set | 2.566714 | 3 |
majestic-monolith-django/shipping/services.py | kokospapa8/majestic-monolith-django | 1 | 6618975 | from typing import Any, Optional
from dataclasses import dataclass
from django.utils import timezone
from core.services import DomainService
from .events import ShippingEventsEmitter
from .models import ShippingItem, ShippingBatch, ShippingTransport
from .choices import ShippingItemStatus
@dataclass
class ShippingDTO:
item: ShippingItem = None
batch: ShippingBatch = None
transport: ShippingTransport = None
class ShippingBatchService(DomainService):
dto: ShippingDTO
def add_to_transport(self) -> ShippingBatch:
batch = self.dto.batch
batch.shipping_transport = self.dto.transport
batch.timestamp_transport_assigned = timezone.now()
batch.save()
ShippingEventsEmitter().\
batch_added_to_transport({
"transport_uuid": self.dto.transport.uuid.hex,
"batch_alias": batch.alias
}
)
return batch
class ShippingItemService(DomainService):
dto: ShippingDTO
def add_to_batch(self) -> ShippingItem:
item = self.dto.item
item.shipping_batches.add(self.dto.batch)
if item.status == ShippingItemStatus.CREATED:
item.status = ShippingItemStatus.MOVING
item.save()
ShippingEventsEmitter().item_added_to_batch({
"item_tracking_number": item.tracking_number,
"batch_alias": self.dto.batch.alias
}
)
return item
class TransportService(DomainService):
dto: ShippingDTO
def transport_start(self, driver_uuid=None) -> None:
transport = self.dto.transport
if driver_uuid:
transport.driver_uuid = driver_uuid
transport.timestamp_departed = timezone.now()
transport.save()
def transport_complete(self) -> None:
transport = self.dto.transport
qs_batches = ShippingBatch.objects.filter(shipping_transport=transport)
qs_batches.update(completed=True, timestamp_completed=timezone.now())
ShippingItem.objects.filter(shipping_batches__in=qs_batches).update(
current_distribution_center_code=transport.distribution_center_code_destination
)
transport.timestamp_arrived = timezone.now()
transport.save()
ShippingEventsEmitter().transport_complete({
"transport_uuid": transport.uuid.hex
}
)
batch_service = ShippingBatchService()
shippingitem_service = ShippingItemService()
transport_service = TransportService()
| from typing import Any, Optional
from dataclasses import dataclass
from django.utils import timezone
from core.services import DomainService
from .events import ShippingEventsEmitter
from .models import ShippingItem, ShippingBatch, ShippingTransport
from .choices import ShippingItemStatus
@dataclass
class ShippingDTO:
item: ShippingItem = None
batch: ShippingBatch = None
transport: ShippingTransport = None
class ShippingBatchService(DomainService):
dto: ShippingDTO
def add_to_transport(self) -> ShippingBatch:
batch = self.dto.batch
batch.shipping_transport = self.dto.transport
batch.timestamp_transport_assigned = timezone.now()
batch.save()
ShippingEventsEmitter().\
batch_added_to_transport({
"transport_uuid": self.dto.transport.uuid.hex,
"batch_alias": batch.alias
}
)
return batch
class ShippingItemService(DomainService):
dto: ShippingDTO
def add_to_batch(self) -> ShippingItem:
item = self.dto.item
item.shipping_batches.add(self.dto.batch)
if item.status == ShippingItemStatus.CREATED:
item.status = ShippingItemStatus.MOVING
item.save()
ShippingEventsEmitter().item_added_to_batch({
"item_tracking_number": item.tracking_number,
"batch_alias": self.dto.batch.alias
}
)
return item
class TransportService(DomainService):
dto: ShippingDTO
def transport_start(self, driver_uuid=None) -> None:
transport = self.dto.transport
if driver_uuid:
transport.driver_uuid = driver_uuid
transport.timestamp_departed = timezone.now()
transport.save()
def transport_complete(self) -> None:
transport = self.dto.transport
qs_batches = ShippingBatch.objects.filter(shipping_transport=transport)
qs_batches.update(completed=True, timestamp_completed=timezone.now())
ShippingItem.objects.filter(shipping_batches__in=qs_batches).update(
current_distribution_center_code=transport.distribution_center_code_destination
)
transport.timestamp_arrived = timezone.now()
transport.save()
ShippingEventsEmitter().transport_complete({
"transport_uuid": transport.uuid.hex
}
)
batch_service = ShippingBatchService()
shippingitem_service = ShippingItemService()
transport_service = TransportService()
| none | 1 | 2.230393 | 2 | |
eval.py | kchro/plato | 2 | 6618976 | import argparse
# models
from models.seq2seq import Seq2Seq
from models.seq2tree import Seq2Tree
from models.tree2seq import Tree2Seq
from models.tree2tree import Tree2Tree
# tools
from data.load_data import load_file
from sklearn.model_selection import train_test_split
import torch
import json
import random
MODELS = {
'seq2seq': Seq2Seq,
'seq2tree': Seq2Tree,
'tree2seq': Tree2Seq,
'tree2tree': Tree2Tree
}
DATASETS = {
'toy': 'k3_tree_mini.out',
'sm': 'atomic_sents.out',
'md': 'k3_med.out',
'lg': 'k3_tree.out'
}
def get_parser():
'''
Set up argument parser
Returns:
parser: (ArgumentParser) the created parser
'''
parser = argparse.ArgumentParser()
parser.add_argument('--eval_only',
action='store_true')
parser.add_argument('-e', '--encoder',
required=True,
choices={'seq', 'tree'})
parser.add_argument('-d', '--decoder',
required=True,
choices={'seq', 'tree'})
parser.add_argument('-D', '--data',
required=True,
choices={'toy', 'sm', 'md', 'lg'})
parser.add_argument('--hidden',
required=True, type=int)
return parser
def get_model_name(args):
return '%s2%s' % (args.encoder, args.decoder)
def get_dataset_name(args):
if args.data in DATASETS:
return DATASETS[args.data]
return DATASETS['toy']
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print 'running on the %s' % device
sess = raw_input('load session: ')
# parse arguments
parser = get_parser()
args = parser.parse_args()
# model name
name = get_model_name(args)
print 'running the %s model' % name
# select dataset
dataset = get_dataset_name(args)
print 'using the %s dataset' % dataset
# load data
inputs, vocabs = load_file(filename=dataset,
encoder=args.encoder,
decoder=args.decoder,
device=device)
src_inputs, tar_inputs = inputs
src_vocab, tar_vocab = vocabs
# split data
X_train, X_test, y_train, y_test = train_test_split(src_inputs, tar_inputs, test_size=0.1)
print '%d training examples & %d testing examples.' % (len(X_train), len(X_test))
# load the model parameters
input_size = len(src_vocab)
#hidden_size = 200
hidden_size = args.hidden
output_size = len(tar_vocab)
model = MODELS[name](input_size=input_size,
hidden_size=hidden_size,
output_size=output_size,
src_vocab=src_vocab,
tar_vocab=tar_vocab,
sess=sess, device=device)
# load the saved model
print 'loading model parameters...',
model.load('%s.json' % sess)
print 'done.'
test = random.sample(list(zip(X_test, y_test)), 100)
X_test = [t[0] for t in test]
y_test = [t[1] for t in test]
preds = model.predict(X_test)
model.evaluate(X_test, y_test, preds, out='tmp')
raise
import random
for i in range(10):
print random.choice(X_test)
# enter the input string
while True:
src_input = raw_input('enter sentence (Q to quit): ')
if src_input == 'Q':
break
x_test = src_vocab.get_idx_tensor([src_input])
# make the prediction
print 'running the model on test set...'
preds = model.predict(x_test)
print 'done.'
print 'input: ' + src_input
print 'output: ' + tar_vocab.reverse(preds[0])
| import argparse
# models
from models.seq2seq import Seq2Seq
from models.seq2tree import Seq2Tree
from models.tree2seq import Tree2Seq
from models.tree2tree import Tree2Tree
# tools
from data.load_data import load_file
from sklearn.model_selection import train_test_split
import torch
import json
import random
MODELS = {
'seq2seq': Seq2Seq,
'seq2tree': Seq2Tree,
'tree2seq': Tree2Seq,
'tree2tree': Tree2Tree
}
DATASETS = {
'toy': 'k3_tree_mini.out',
'sm': 'atomic_sents.out',
'md': 'k3_med.out',
'lg': 'k3_tree.out'
}
def get_parser():
'''
Set up argument parser
Returns:
parser: (ArgumentParser) the created parser
'''
parser = argparse.ArgumentParser()
parser.add_argument('--eval_only',
action='store_true')
parser.add_argument('-e', '--encoder',
required=True,
choices={'seq', 'tree'})
parser.add_argument('-d', '--decoder',
required=True,
choices={'seq', 'tree'})
parser.add_argument('-D', '--data',
required=True,
choices={'toy', 'sm', 'md', 'lg'})
parser.add_argument('--hidden',
required=True, type=int)
return parser
def get_model_name(args):
return '%s2%s' % (args.encoder, args.decoder)
def get_dataset_name(args):
if args.data in DATASETS:
return DATASETS[args.data]
return DATASETS['toy']
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print 'running on the %s' % device
sess = raw_input('load session: ')
# parse arguments
parser = get_parser()
args = parser.parse_args()
# model name
name = get_model_name(args)
print 'running the %s model' % name
# select dataset
dataset = get_dataset_name(args)
print 'using the %s dataset' % dataset
# load data
inputs, vocabs = load_file(filename=dataset,
encoder=args.encoder,
decoder=args.decoder,
device=device)
src_inputs, tar_inputs = inputs
src_vocab, tar_vocab = vocabs
# split data
X_train, X_test, y_train, y_test = train_test_split(src_inputs, tar_inputs, test_size=0.1)
print '%d training examples & %d testing examples.' % (len(X_train), len(X_test))
# load the model parameters
input_size = len(src_vocab)
#hidden_size = 200
hidden_size = args.hidden
output_size = len(tar_vocab)
model = MODELS[name](input_size=input_size,
hidden_size=hidden_size,
output_size=output_size,
src_vocab=src_vocab,
tar_vocab=tar_vocab,
sess=sess, device=device)
# load the saved model
print 'loading model parameters...',
model.load('%s.json' % sess)
print 'done.'
test = random.sample(list(zip(X_test, y_test)), 100)
X_test = [t[0] for t in test]
y_test = [t[1] for t in test]
preds = model.predict(X_test)
model.evaluate(X_test, y_test, preds, out='tmp')
raise
import random
for i in range(10):
print random.choice(X_test)
# enter the input string
while True:
src_input = raw_input('enter sentence (Q to quit): ')
if src_input == 'Q':
break
x_test = src_vocab.get_idx_tensor([src_input])
# make the prediction
print 'running the model on test set...'
preds = model.predict(x_test)
print 'done.'
print 'input: ' + src_input
print 'output: ' + tar_vocab.reverse(preds[0])
| en | 0.267614 | # models # tools Set up argument parser Returns: parser: (ArgumentParser) the created parser # parse arguments # model name # select dataset # load data # split data # load the model parameters #hidden_size = 200 # load the saved model # enter the input string # make the prediction | 2.347877 | 2 |
login/__init__.py | OkayJosh/Nemis | 0 | 6618977 | default_app_config='login.apps.LoginConfig' | default_app_config='login.apps.LoginConfig' | none | 1 | 1.0751 | 1 | |
release_notes_generator.py | astrothesaurus/release-prep | 0 | 6618978 | <gh_stars>0
# coding: utf-8
## This scripts compares two versions of the UAT and generates
## information such as new concepts, deprecated concepts
## new related links, new alt labels, new pref labels, etc etc.
## Data is useful in creating the release notes
import os
import csv
import json
import codecs
import shutil
import rdflib
import unicodedata
#import pandas as pd
from datetime import datetime
timestamp = datetime.now().strftime("%Y_%m%d_%H%M")
print ("Reading the SKOS file...this may take a few seconds.")
##### RDF File Location #####
##### assign this variable to location of UAT SKOS-RDF file exported from VocBench #####
##export RDF/XML Concepts
uat_new = "UAT.rdf" # filename for the new version
#get previous version RDF from GitHub
uat_prev = "4.0.0/UAT.rdf" # filename for the previous version
##### Shared Functions and Variables #####
##### do NOT edit this section #####
#reads SKOS-RDF file into a RDFlib graph for use in these scripts
g = rdflib.Graph()
result = g.parse(uat_new)#.encode('utf8'))
f = rdflib.Graph()
result = f.parse(uat_prev)#.encode('utf8'))
#defines certain properties within the SKOS-RDF file
prefLabel = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')
broader = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#broader')
Concept = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#Concept')
vocstatus = rdflib.term.URIRef('http://art.uniroma2.it/ontologies/vocbench#hasStatus')
altLabel = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#altLabel')
TopConcept = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#topConceptOf')
ednotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#editorialNote')
changenotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#changeNote')
scopenotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#scopeNote')
example = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#example')
related = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#related')
definition = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#definition')
comment = rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#comment')
title = rdflib.term.URIRef('http://purl.org/dc/terms/title')
label = rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label')
#a list of all concepts
allnewconcepts = [gm for gm in g.subjects(rdflib.RDF.type, Concept)]
allprevconcepts = [fm for fm in f.subjects(rdflib.RDF.type, Concept)]
def lit(term):
d = rdflib.term.URIRef(term)
for prefterm in g.objects(subject=d, predicate=prefLabel):
return prefterm
def deplit(term):
d = rdflib.term.URIRef(term)
for prefterm in f.objects(subject=d, predicate=prefLabel):
return prefterm
#a function to get a list of all alt terms for a term
def getaltterms(term,version):
terminal = rdflib.term.URIRef(term)
alternateterms = {}
try:
for ats in version.objects(subject=terminal, predicate=altLabel):
try:
alternateterms[terminal].append(ats)
except KeyError:
alternateterms[terminal] = [ats]
return alternateterms[terminal]
except KeyError:
pass
#a function to get a list of all related terms for a term
def getrelatedterms(term,version):
terminal = rdflib.term.URIRef(term)
relatedterms = {}
try:
for rts in version.objects(subject=terminal, predicate=related):
try:
relatedterms[terminal].append(rts)
except KeyError:
relatedterms[terminal] = [rts]
return relatedterms[terminal]
except KeyError:
pass
#a function to get a list of all broader terms for a term
def getbroaderterms(term,version):
terminal = rdflib.term.URIRef(term)
broaderterms = {}
try:
for bts in version.objects(subject=terminal, predicate=broader):
try:
broaderterms[terminal].append(bts)
except KeyError:
broaderterms[terminal] = [bts]
return broaderterms[terminal]
except KeyError:
pass
#a function to return scope notes for a term
def getscopenotes(term,sf):
d = rdflib.term.URIRef(term)
for scnoteterm in sf.objects(subject=d, predicate=scopenotes):
return scnoteterm
#a function to return example notes for a term
def getexample(term,sf):
d = rdflib.term.URIRef(term)
exlist = []
for termex in sf.objects(subject=d, predicate=example):
exlist.append(termex)
return exlist
#a function to return the status of a term
def getdefinition(term,sf):
d=rdflib.term.URIRef(term)
for deftest in sf.objects(subject=d, predicate=definition):
return deftest
fileout = open('changes_'+timestamp+'.csv','w', encoding='utf-8', newline='')
csv_out = csv.writer(fileout, lineterminator='\n', delimiter=',')
wr = csv.writer(fileout,quoting=csv.QUOTE_ALL)#
#UnicodeWriter(fileout,lineterminator='\n', delimiter=',', dialect='excel',quoting=csv.QUOTE_ALL)
##prints all new concepts, new alts, removed alts
for newcon in allnewconcepts:
if newcon in allprevconcepts:
newalts = getaltterms(newcon, g)
oldalts = getaltterms(newcon, f)
copynewalts = getaltterms(newcon, g)
copyoldalts = getaltterms(newcon, f)
if oldalts == None or newalts == None :
pass
else:
for x in newalts:
if x in oldalts:
copynewalts.remove(x)
for y in oldalts:
if y in newalts:
copyoldalts.remove(y)
if copyoldalts != None and copyoldalts != []:
aoldalts = (", ").join(copyoldalts)
wr.writerow((["Removed Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[aoldalts]+[" |"]))
if copynewalts != None and copynewalts != []:
anewalts = (", ").join(copynewalts)
wr.writerow((["New Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[anewalts]+[" |"]))
# depaltlist = []
# for y in oldalts:
# if y in newalts:
# pass
# else:
# depaltlist.append(y)
# if depaltlist != []:
# for z in depaltlist:
# if z == lit(newcon):
# pass
# else:
# wr.writerow((["Removed Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+depaltlist+[" |"]))
else:
litterm = lit(newcon)
morealts = getaltterms(newcon, g)
wr.writerow(("New concept",newcon[30:],"| ",newcon," | ",litterm," |"))
if morealts != None:
amorealts = (", ").join(morealts)
wr.writerow((["New Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[amorealts]+[" |"]))
##finds all deprecated concepts
for oldcon in allprevconcepts:
if oldcon in allnewconcepts:
oldlit = deplit(oldcon)
newlit = lit(oldcon)
if oldlit != newlit:
wr.writerow(("Updated PrefLabel",oldcon[30:],"| ",oldcon," | ",oldlit," | ",newlit," |"))
else:
litterm = deplit(oldcon)
wr.writerow(("Deprecated concept",oldcon[30:],"| ",oldcon," | ",litterm," |"))
#finds all new related links
relatedlist = []
for oldcon in allprevconcepts:
litterm = lit(oldcon)
rterms = getrelatedterms(oldcon,f)
if rterms != None:
for x in rterms:
littermx = lit(x)
relatedlist.append([oldcon,x])
newrelatedlist = []
for newcon in allnewconcepts:
litterm = lit(newcon)
rterms = getrelatedterms(newcon,g)
if rterms != None:
for x in rterms:
littermx = lit(x)
newrelatedlist.append([newcon,x])
if [newcon,x] in relatedlist:
pass
else:
wr.writerow(("Related",newcon[30:],"| ",newcon," |",litterm," | ",x," | ",littermx," |"))
#finds all new defintions, scope notes, examples
deflist = []
scopelist = []
examplelist = []
for oldcon in allprevconcepts:
olddef = getdefinition(oldcon,f)
oldscope = getscopenotes(oldcon,f)
oldex = getexample(oldcon,f)
if olddef != None:
deflist.append([oldcon,olddef])
if oldscope != None:
scopelist.append([oldcon,oldscope])
if oldex != None:
examplelist.append([oldcon,oldex])
for newcon in allnewconcepts:
newdef = getdefinition(newcon,g)
newscope = getscopenotes(newcon,g)
newex = getexample(newcon,g)
litterm = lit(newcon)
if newdef != None:
if [newcon,newdef] not in deflist:
wr.writerow(("Definition",newcon[30:],"| ",newcon," |",litterm," |",newdef," |"))
if newscope != None:
if [newcon,newscope] in scopelist:
pass
else:
wr.writerow(("Scope Note",newcon[30:],"| ",newcon," |",litterm," |",newscope," |"))
if newex != []:
if [newcon,newex] in examplelist:
pass
else:
nex = ", ".join(newex)
wr.writerow(("Example",newcon[30:],"| ",newcon," |",litterm," |",nex," |"))
#gets removed related links
for a in relatedlist:
if a in newrelatedlist:
pass
else:
wr.writerow(("Removed Related",a[0][30:],"| ",a[0]," |",deplit(a[0])," |",a[1]," |",deplit(a[1])," |"))
fileout.close()
print ("finished!") | # coding: utf-8
## This scripts compares two versions of the UAT and generates
## information such as new concepts, deprecated concepts
## new related links, new alt labels, new pref labels, etc etc.
## Data is useful in creating the release notes
import os
import csv
import json
import codecs
import shutil
import rdflib
import unicodedata
#import pandas as pd
from datetime import datetime
timestamp = datetime.now().strftime("%Y_%m%d_%H%M")
print ("Reading the SKOS file...this may take a few seconds.")
##### RDF File Location #####
##### assign this variable to location of UAT SKOS-RDF file exported from VocBench #####
##export RDF/XML Concepts
uat_new = "UAT.rdf" # filename for the new version
#get previous version RDF from GitHub
uat_prev = "4.0.0/UAT.rdf" # filename for the previous version
##### Shared Functions and Variables #####
##### do NOT edit this section #####
#reads SKOS-RDF file into a RDFlib graph for use in these scripts
g = rdflib.Graph()
result = g.parse(uat_new)#.encode('utf8'))
f = rdflib.Graph()
result = f.parse(uat_prev)#.encode('utf8'))
#defines certain properties within the SKOS-RDF file
prefLabel = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')
broader = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#broader')
Concept = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#Concept')
vocstatus = rdflib.term.URIRef('http://art.uniroma2.it/ontologies/vocbench#hasStatus')
altLabel = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#altLabel')
TopConcept = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#topConceptOf')
ednotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#editorialNote')
changenotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#changeNote')
scopenotes = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#scopeNote')
example = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#example')
related = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#related')
definition = rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#definition')
comment = rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#comment')
title = rdflib.term.URIRef('http://purl.org/dc/terms/title')
label = rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label')
#a list of all concepts
allnewconcepts = [gm for gm in g.subjects(rdflib.RDF.type, Concept)]
allprevconcepts = [fm for fm in f.subjects(rdflib.RDF.type, Concept)]
def lit(term):
d = rdflib.term.URIRef(term)
for prefterm in g.objects(subject=d, predicate=prefLabel):
return prefterm
def deplit(term):
d = rdflib.term.URIRef(term)
for prefterm in f.objects(subject=d, predicate=prefLabel):
return prefterm
#a function to get a list of all alt terms for a term
def getaltterms(term,version):
terminal = rdflib.term.URIRef(term)
alternateterms = {}
try:
for ats in version.objects(subject=terminal, predicate=altLabel):
try:
alternateterms[terminal].append(ats)
except KeyError:
alternateterms[terminal] = [ats]
return alternateterms[terminal]
except KeyError:
pass
#a function to get a list of all related terms for a term
def getrelatedterms(term,version):
terminal = rdflib.term.URIRef(term)
relatedterms = {}
try:
for rts in version.objects(subject=terminal, predicate=related):
try:
relatedterms[terminal].append(rts)
except KeyError:
relatedterms[terminal] = [rts]
return relatedterms[terminal]
except KeyError:
pass
#a function to get a list of all broader terms for a term
def getbroaderterms(term,version):
terminal = rdflib.term.URIRef(term)
broaderterms = {}
try:
for bts in version.objects(subject=terminal, predicate=broader):
try:
broaderterms[terminal].append(bts)
except KeyError:
broaderterms[terminal] = [bts]
return broaderterms[terminal]
except KeyError:
pass
#a function to return scope notes for a term
def getscopenotes(term,sf):
d = rdflib.term.URIRef(term)
for scnoteterm in sf.objects(subject=d, predicate=scopenotes):
return scnoteterm
#a function to return example notes for a term
def getexample(term,sf):
d = rdflib.term.URIRef(term)
exlist = []
for termex in sf.objects(subject=d, predicate=example):
exlist.append(termex)
return exlist
#a function to return the status of a term
def getdefinition(term,sf):
d=rdflib.term.URIRef(term)
for deftest in sf.objects(subject=d, predicate=definition):
return deftest
fileout = open('changes_'+timestamp+'.csv','w', encoding='utf-8', newline='')
csv_out = csv.writer(fileout, lineterminator='\n', delimiter=',')
wr = csv.writer(fileout,quoting=csv.QUOTE_ALL)#
#UnicodeWriter(fileout,lineterminator='\n', delimiter=',', dialect='excel',quoting=csv.QUOTE_ALL)
##prints all new concepts, new alts, removed alts
for newcon in allnewconcepts:
if newcon in allprevconcepts:
newalts = getaltterms(newcon, g)
oldalts = getaltterms(newcon, f)
copynewalts = getaltterms(newcon, g)
copyoldalts = getaltterms(newcon, f)
if oldalts == None or newalts == None :
pass
else:
for x in newalts:
if x in oldalts:
copynewalts.remove(x)
for y in oldalts:
if y in newalts:
copyoldalts.remove(y)
if copyoldalts != None and copyoldalts != []:
aoldalts = (", ").join(copyoldalts)
wr.writerow((["Removed Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[aoldalts]+[" |"]))
if copynewalts != None and copynewalts != []:
anewalts = (", ").join(copynewalts)
wr.writerow((["New Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[anewalts]+[" |"]))
# depaltlist = []
# for y in oldalts:
# if y in newalts:
# pass
# else:
# depaltlist.append(y)
# if depaltlist != []:
# for z in depaltlist:
# if z == lit(newcon):
# pass
# else:
# wr.writerow((["Removed Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+depaltlist+[" |"]))
else:
litterm = lit(newcon)
morealts = getaltterms(newcon, g)
wr.writerow(("New concept",newcon[30:],"| ",newcon," | ",litterm," |"))
if morealts != None:
amorealts = (", ").join(morealts)
wr.writerow((["New Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+[amorealts]+[" |"]))
##finds all deprecated concepts
for oldcon in allprevconcepts:
if oldcon in allnewconcepts:
oldlit = deplit(oldcon)
newlit = lit(oldcon)
if oldlit != newlit:
wr.writerow(("Updated PrefLabel",oldcon[30:],"| ",oldcon," | ",oldlit," | ",newlit," |"))
else:
litterm = deplit(oldcon)
wr.writerow(("Deprecated concept",oldcon[30:],"| ",oldcon," | ",litterm," |"))
#finds all new related links
relatedlist = []
for oldcon in allprevconcepts:
litterm = lit(oldcon)
rterms = getrelatedterms(oldcon,f)
if rterms != None:
for x in rterms:
littermx = lit(x)
relatedlist.append([oldcon,x])
newrelatedlist = []
for newcon in allnewconcepts:
litterm = lit(newcon)
rterms = getrelatedterms(newcon,g)
if rterms != None:
for x in rterms:
littermx = lit(x)
newrelatedlist.append([newcon,x])
if [newcon,x] in relatedlist:
pass
else:
wr.writerow(("Related",newcon[30:],"| ",newcon," |",litterm," | ",x," | ",littermx," |"))
#finds all new defintions, scope notes, examples
deflist = []
scopelist = []
examplelist = []
for oldcon in allprevconcepts:
olddef = getdefinition(oldcon,f)
oldscope = getscopenotes(oldcon,f)
oldex = getexample(oldcon,f)
if olddef != None:
deflist.append([oldcon,olddef])
if oldscope != None:
scopelist.append([oldcon,oldscope])
if oldex != None:
examplelist.append([oldcon,oldex])
for newcon in allnewconcepts:
newdef = getdefinition(newcon,g)
newscope = getscopenotes(newcon,g)
newex = getexample(newcon,g)
litterm = lit(newcon)
if newdef != None:
if [newcon,newdef] not in deflist:
wr.writerow(("Definition",newcon[30:],"| ",newcon," |",litterm," |",newdef," |"))
if newscope != None:
if [newcon,newscope] in scopelist:
pass
else:
wr.writerow(("Scope Note",newcon[30:],"| ",newcon," |",litterm," |",newscope," |"))
if newex != []:
if [newcon,newex] in examplelist:
pass
else:
nex = ", ".join(newex)
wr.writerow(("Example",newcon[30:],"| ",newcon," |",litterm," |",nex," |"))
#gets removed related links
for a in relatedlist:
if a in newrelatedlist:
pass
else:
wr.writerow(("Removed Related",a[0][30:],"| ",a[0]," |",deplit(a[0])," |",a[1]," |",deplit(a[1])," |"))
fileout.close()
print ("finished!") | en | 0.593708 | # coding: utf-8 ## This scripts compares two versions of the UAT and generates ## information such as new concepts, deprecated concepts ## new related links, new alt labels, new pref labels, etc etc. ## Data is useful in creating the release notes #import pandas as pd ##### RDF File Location ##### ##### assign this variable to location of UAT SKOS-RDF file exported from VocBench ##### ##export RDF/XML Concepts # filename for the new version #get previous version RDF from GitHub # filename for the previous version ##### Shared Functions and Variables ##### ##### do NOT edit this section ##### #reads SKOS-RDF file into a RDFlib graph for use in these scripts #.encode('utf8')) #.encode('utf8')) #defines certain properties within the SKOS-RDF file #prefLabel') #broader') #Concept') #hasStatus') #altLabel') #topConceptOf') #editorialNote') #changeNote') #scopeNote') #example') #related') #definition') #comment') #label') #a list of all concepts #a function to get a list of all alt terms for a term #a function to get a list of all related terms for a term #a function to get a list of all broader terms for a term #a function to return scope notes for a term #a function to return example notes for a term #a function to return the status of a term # #UnicodeWriter(fileout,lineterminator='\n', delimiter=',', dialect='excel',quoting=csv.QUOTE_ALL) ##prints all new concepts, new alts, removed alts # depaltlist = [] # for y in oldalts: # if y in newalts: # pass # else: # depaltlist.append(y) # if depaltlist != []: # for z in depaltlist: # if z == lit(newcon): # pass # else: # wr.writerow((["Removed Alts"]+[newcon[30:]]+["| "]+[newcon]+[" | "]+[lit(newcon)]+[" | "]+depaltlist+[" |"])) ##finds all deprecated concepts #finds all new related links #finds all new defintions, scope notes, examples #gets removed related links | 2.124707 | 2 |
zulipterminal/config/markdown_examples.py | zee-bit/zulip-terminal | 407 | 6618979 | from typing import List
from typing_extensions import TypedDict
class MarkdownElements(TypedDict):
name: str
raw_text: str
html_element: str
MARKDOWN_ELEMENTS: List[MarkdownElements] = [
{
# BOLD TEXT
"name": "Bold text",
"raw_text": "**bold**",
"html_element": "<strong>bold</strong>",
},
{
# EMOJI
"name": "Emoji",
"raw_text": ":heart:",
"html_element": '<span class="emoji">:heart:</span>',
},
{
# MESSAGE LINKS
"name": "Message links",
"raw_text": "[Zulip website]\n(https://zulip.org)",
"html_element": '<a href="https://zulip.org">Zulip website</a>',
},
{
# BULLET LISTS
"name": "Bullet lists",
"raw_text": "* Milk\n* Tea\n * Green tea\n * Black tea\n"
" * Oolong tea\n* Coffee",
"html_element": "<ul><li>Milk</li><li>Tea<ul><li>Green tea</li>"
"<li>Black tea</li><li>Oolong tea</li></ul>"
"</li><li>Coffee</li>",
},
{
# NUMBERED LISTS
"name": "Numbered lists",
"raw_text": "1. Milk\n2. Tea\n3. Coffee",
"html_element": "<ol><li>Milk</li><li>Tea</li><li>Coffee</li></ol>",
},
{
# USER MENTIONS
"name": "User mentions",
"raw_text": "@**King Hamlet**",
"html_element": '<span class="user-mention">@King Hamlet</span>',
},
{
# USER SILENT MENTIONS
"name": "User silent mentions",
"raw_text": "@_**King Hamlet**",
"html_element": '<span class="user-mention silent">King Hamlet</span>',
},
{
# NOTIFY ALL RECIPIENTS
"name": "<NAME> recipients",
"raw_text": "@**all**",
"html_element": '<span class="user-mention">@all</span>',
},
{
# LINK TO A STREAM
"name": "Link to a stream",
"raw_text": "#**announce**",
"html_element": '<a class="stream" data-stream-id="6" '
'href="/#narrow/stream/6-announce">#announce</a>',
},
{
# STATUS MESSAGE
"name": "Status message",
"raw_text": "/me is busy writing code.",
"html_element": "<strong>{user}</strong> is busy writing code.",
},
{
# INLINE CODE
"name": "Inline code",
"raw_text": "Some inline `code`",
"html_element": "Some inline <code>code</code>",
},
{
# CODE BLOCK
"name": "Code block",
"raw_text": "```\ndef zulip():\n print 'Zulip'\n```",
"html_element": '<div class="codehilite"><pre><span></span><code>\n'
"def zulip():\n print 'Zulip'</code></pre></div>",
},
{
# QUOTED TEXT
"name": "Quoted text",
"raw_text": ">Quoted",
"html_element": "<blockquote>░ Quoted</blockquote>",
},
{
# QUOTED BLOCK
"name": "Quoted block",
"raw_text": "```quote\nQuoted block\n```",
"html_element": "<blockquote>\n░ Quoted block</blockquote>",
},
{
# TABLE RENDERING
"name": "Table rendering",
"raw_text": "|Name|Id|\n|--|--:|\n|Robert|1|\n|Mary|100|",
"html_element": (
"<table>"
"<thead>"
'<tr><th align="left">Name</th><th align="right">Id</th></tr>'
"</thead>"
"<tbody>"
'<tr><td align="left">Robert</td><td align="right">1</td></tr>'
'<tr><td align="left">Mary</td><td align="right">100</td></tr>'
"</tbody>"
"</table>"
),
},
]
| from typing import List
from typing_extensions import TypedDict
class MarkdownElements(TypedDict):
name: str
raw_text: str
html_element: str
MARKDOWN_ELEMENTS: List[MarkdownElements] = [
{
# BOLD TEXT
"name": "Bold text",
"raw_text": "**bold**",
"html_element": "<strong>bold</strong>",
},
{
# EMOJI
"name": "Emoji",
"raw_text": ":heart:",
"html_element": '<span class="emoji">:heart:</span>',
},
{
# MESSAGE LINKS
"name": "Message links",
"raw_text": "[Zulip website]\n(https://zulip.org)",
"html_element": '<a href="https://zulip.org">Zulip website</a>',
},
{
# BULLET LISTS
"name": "Bullet lists",
"raw_text": "* Milk\n* Tea\n * Green tea\n * Black tea\n"
" * Oolong tea\n* Coffee",
"html_element": "<ul><li>Milk</li><li>Tea<ul><li>Green tea</li>"
"<li>Black tea</li><li>Oolong tea</li></ul>"
"</li><li>Coffee</li>",
},
{
# NUMBERED LISTS
"name": "Numbered lists",
"raw_text": "1. Milk\n2. Tea\n3. Coffee",
"html_element": "<ol><li>Milk</li><li>Tea</li><li>Coffee</li></ol>",
},
{
# USER MENTIONS
"name": "User mentions",
"raw_text": "@**King Hamlet**",
"html_element": '<span class="user-mention">@King Hamlet</span>',
},
{
# USER SILENT MENTIONS
"name": "User silent mentions",
"raw_text": "@_**King Hamlet**",
"html_element": '<span class="user-mention silent">King Hamlet</span>',
},
{
# NOTIFY ALL RECIPIENTS
"name": "<NAME> recipients",
"raw_text": "@**all**",
"html_element": '<span class="user-mention">@all</span>',
},
{
# LINK TO A STREAM
"name": "Link to a stream",
"raw_text": "#**announce**",
"html_element": '<a class="stream" data-stream-id="6" '
'href="/#narrow/stream/6-announce">#announce</a>',
},
{
# STATUS MESSAGE
"name": "Status message",
"raw_text": "/me is busy writing code.",
"html_element": "<strong>{user}</strong> is busy writing code.",
},
{
# INLINE CODE
"name": "Inline code",
"raw_text": "Some inline `code`",
"html_element": "Some inline <code>code</code>",
},
{
# CODE BLOCK
"name": "Code block",
"raw_text": "```\ndef zulip():\n print 'Zulip'\n```",
"html_element": '<div class="codehilite"><pre><span></span><code>\n'
"def zulip():\n print 'Zulip'</code></pre></div>",
},
{
# QUOTED TEXT
"name": "Quoted text",
"raw_text": ">Quoted",
"html_element": "<blockquote>░ Quoted</blockquote>",
},
{
# QUOTED BLOCK
"name": "Quoted block",
"raw_text": "```quote\nQuoted block\n```",
"html_element": "<blockquote>\n░ Quoted block</blockquote>",
},
{
# TABLE RENDERING
"name": "Table rendering",
"raw_text": "|Name|Id|\n|--|--:|\n|Robert|1|\n|Mary|100|",
"html_element": (
"<table>"
"<thead>"
'<tr><th align="left">Name</th><th align="right">Id</th></tr>'
"</thead>"
"<tbody>"
'<tr><td align="left">Robert</td><td align="right">1</td></tr>'
'<tr><td align="left">Mary</td><td align="right">100</td></tr>'
"</tbody>"
"</table>"
),
},
]
| en | 0.425184 | # BOLD TEXT # EMOJI # MESSAGE LINKS # BULLET LISTS # NUMBERED LISTS # USER MENTIONS # USER SILENT MENTIONS # NOTIFY ALL RECIPIENTS # LINK TO A STREAM #narrow/stream/6-announce">#announce</a>', # STATUS MESSAGE # INLINE CODE # CODE BLOCK # QUOTED TEXT # QUOTED BLOCK # TABLE RENDERING | 2.963074 | 3 |
mvpa_itab/script/carlo/mnemonic_representation/matteo_figures_2.py | robbisg/mvpa_itab_wu | 1 | 6618980 | import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
from pyitab.analysis.results.base import filter_dataframe
from pyitab.analysis.results.dataframe import apply_function
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
def find_distance_boundaries(data):
scene_center = .5*(d['Scena_offset_sec'] - d['Scena_onset_sec'])
distance_offset = scene_center - d['VAS sec']
value_click = np.int_(np.sign(distance_offset) == 1)
return value_click
def windowed_similarity(x, y, window):
spearman = []
for i in range(len(x) - window):
s = spearmanr(x[i:i+window], y[i:i+window])
spearman.append(s[0])
return spearman
def bootstrap(x, y, n=100, fx=windowed_similarity, window=10):
permutations = []
for p in range(n):
idx = np.sort(np.random.choice(len(x), size=len(x), replace=True))
spearman = windowed_similarity(x[idx], y[idx], window)
permutations.append(spearman)
return permutations
def plot_fit(x, y, ax, linestyle='--', color='gray'):
from scipy.stats import linregress
m, b, r, p, s = linregress(x, y)
ax.plot(x, m*x+b, linestyle=linestyle, c=color, label=r**2)
#ax.legend()
pl.style.use("seaborn")
fontsize = 18
style = {
'figure.figsize': (19, 15),
'axes.facecolor': 'white',
'axes.spines.top': False,
'axes.spines.right': False,
'axes.spines.bottom': True,
'axes.spines.left': True,
'axes.edgecolor': 'black',
'axes.linewidth': 1.5,
'axes.grid': False,
'grid.color': 'white',
'xtick.color': 'black',
'ytick.color': 'black',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 3,
'ytick.major.size': 3,
'xtick.minor.size': 2,
'ytick.minor.size': 2,
'ytick.labelsize': fontsize-2,
'xtick.labelsize': fontsize-2,
'legend.fontsize': fontsize-5,
'legend.title_fontsize': fontsize-4,
'font.size': fontsize,
'axes.labelsize': fontsize-1,
'axes.titlesize': fontsize,
'svg.fonttype':'none'
}
pl.rcParams.update(style)
palette_scatter = LinearSegmentedColormap.from_list("scatter_click", ['#73a87c', '#eba2b6'], N=2)
palette_half = LinearSegmentedColormap.from_list("palette_part", ['purple', 'orange'], N=2)
experiment_list = [
"VAS_DOPPIA_Delayed", "VAS_DOPPIA_Immediate", "VAS_Mid", "VAS_NewIns"
]
experiment_figure = {
'VAS_DOPPIA_Delayed':'Exp. 3 | Delayed',
'VAS_DOPPIA_Immediate': 'Exp. 3 | Immediate',
'VAS_Mid': 'Exp. 2',
'VAS_NewIns': 'Exp. 1'
}
palette = {
'VAS_NewIns': sns.light_palette("dimgray", n_colors=9),
'VAS_Mid': sns.light_palette("#046c9a", n_colors=9),
'VAS_DOPPIA_Immediate': sns.light_palette("#f2300f", n_colors=9),
'VAS_DOPPIA_Delayed': sns.light_palette("#0b775e", n_colors=9),
}
for e in experiment_list:
pl.figure()
sns.palplot(palette[e])
path = "/home/robbis/Dropbox/PhD/experiments/memory_movie/paper_2/"
full_dataset = list()
for experiment in experiment_list[:]:
print(experiment)
data = pd.read_excel(os.path.join(path, experiment+"_Recognition.xlsx"))
d = filter_dataframe(data, corresp=[1], **{'IR.ACC':[1]})
d = d.dropna()
if experiment == "VAS_DOPPIA_Delayed":
d = filter_dataframe(d, Session=[2])
if experiment == "VAS_DOPPIA_Immediate":
d = filter_dataframe(d, Session=[1])
d['experiment'] = [experiment for _ in range(d.shape[0])]
d['Experiment'] = [experiment_figure[experiment] for _ in range(d.shape[0])]
full_dataset.append(d)
ds = pd.concat(full_dataset)
#################################################
############### Figure 2 ########################
#################################################
experiment = 'VAS_NewIns'
d = filter_dataframe(ds, experiment=['VAS_NewIns'])
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(8, 2, figure=fig)
color_light = palette[experiment][4]
color_dark = palette[experiment][-1]
#### Click distribution ###
value_click = np.int_(np.sign(d['DIST sec']) == 1)
ax1 = pl.subplot(grid[:3, 0])
scatter = ax1.scatter(d['VAS_sec'], d['Subject'],
marker='|',
c=value_click,
cmap=palette_scatter)
handles = scatter.legend_elements()[0]
labels = ['Underestimation', 'Overestimation']
#legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response")
ax1.set_yticks(np.arange(1, 1+np.max(d['Subject'])))
ax1.set_yticklabels(np.unique(d['Subject']))
ax1.set_ylabel("Subject")
ax1.set_title("Click distribution")
ax1.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2 = pl.subplot(grid[3:4, 0], sharex=ax1)
sns.distplot(d['VAS_sec'], ax=ax2, bins=100, color=color_light)
ax2.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2.set_xlabel("Clip onset (sec)")
### Distribution of errors ###
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = color_light
color_abs = color_dark
# Scatter
ax3 = pl.subplot(grid[:4, 1])
ax3.scatter(d['VAS_Corr sec'], d['DIST sec'], alpha=0.2, marker='.', color=color_rel)
ax3.plot(drel_mean['VAS_Corr sec'], drel_mean["DIST sec"], '-o', c=color_rel, label="Relative")
ax3.scatter(d['VAS_Corr sec'], d['DIST(ABS) sec'], alpha=0.2, marker='.', color=color_abs)
ax3.plot(dabs_mean['VAS_Corr sec'], dabs_mean["DIST(ABS) sec"], '-o', c=color_abs, label="Absolute")
ax3.hlines(0, 0, np.max(d['VAS_Corr sec']), color='black', linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
ax3.set_xlabel("Clip onset (sec)")
legend = pl.legend(loc=3)
legend.set_title("Distance")
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax3 = pl.subplot(grid[4:, 0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax3
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
# Scatter distance
drel_mean['Clip distance from end (sec)'] = np.max(drel_mean['VAS_Corr sec']) - drel_mean['VAS_Corr sec']
dabs_mean['Clip distance from end (sec)'] = np.max(dabs_mean['VAS_Corr sec']) - dabs_mean['VAS_Corr sec']
ax4 = pl.subplot(grid[4:,1])
ax4.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax4)
ax4.set_xlabel("Clip onset (sec)")
ax4.set_ylabel("Relative positioning error (sec)")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure2.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure2.png"), dpi=300)
#######################################################
###################### Figure 3 #######################
#######################################################
experiment = 'VAS_Mid'
d = filter_dataframe(ds, experiment=['VAS_Mid'])
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(8, 2, figure=fig)
color_light = palette[experiment][4]
color_dark = palette[experiment][-1]
#### Panel A - Click distribution ###
value_click = np.int_(np.sign(d['DIST sec']) == 1)
ax1 = pl.subplot(grid[:3, 0])
scatter = ax1.scatter(d['VAS_sec'], d['Subject'],
marker='|',
c=value_click,
cmap=palette_scatter)
handles = scatter.legend_elements()[0]
labels = ['Anticipated', 'Posticipated']
#legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response")
ax1.set_yticks(np.arange(1, 1+np.max(d['Subject'])))
ax1.set_yticklabels(np.unique(d['Subject']))
ax1.set_ylabel("Subject")
ax1.set_title("Click distribution")
ax2 = pl.subplot(grid[3:4, 0], sharex=ax1)
sns.distplot(d['VAS_sec'], ax=ax2, bins=100, color=color_light)
ax2.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax1.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2.set_xlabel("Clip onset (sec)")
######## Panel B - ANOVA #########
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = color_light
color_abs = color_dark
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax3 = pl.subplot(grid[:4, 1])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax3
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
### Panel C - ANOVA NewIns vs Mid ###
ax4 = pl.subplot(grid[4:, 0])
comparison = ['VAS_NewIns', 'VAS_Mid']
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part'], attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
color1_l = palette[comparison[0]][4]
color2_l = palette[comparison[1]][4]
color1_d = palette[comparison[0]][-1]
color2_d = palette[comparison[1]][-1]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
hue_order=comparison_figure,
palette=[color1_l, color2_l],
ax=ax4
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o', c=color1_d, label="VAS_NewIns", ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o', c=color2_d, label="VAS_Mid", ms=10)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax4.set_ylabel("Distance (sec)")
########### Panel D ###################
ax5 = pl.subplot(grid[4:,1])
d = filter_dataframe(ds, experiment=['VAS_Mid'])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
half_ds = int(drel_mean.shape[0] * 0.5)
drel_half1 = drel_mean[:half_ds]
drel_half2 = drel_mean[half_ds:]
drel_mean['nhalf'] = np.ones_like(drel_mean.shape[0])
drel_mean['nhalf'].values[half_ds:] = 2
scatter = ax5.scatter(
drel_half2['VAS_Corr sec'],
drel_half2['DIST sec'],
marker='o',
c=color_rel,
#cmap='purple'
)
#plot_fit(drel_mean['VAS_Corr sec'],
# drel_mean['DIST sec'],
# ax5)
#plot_fit(drel_half1['VAS_Corr sec'],
# drel_half1['DIST sec'], ax5)
plot_fit(drel_half2['VAS_Corr sec'],
drel_half2['DIST sec'], ax5)
"""
ax5.vlines(drel_mean['VAS_Corr sec'][half_ds],
np.min(drel_mean['DIST sec']),
np.max(drel_mean['DIST sec']),
color='black', zorder=5, linestyles="solid")
"""
ax5.set_xlabel("Clip onset (sec)")
ax5.set_ylabel("Relative positioning error (sec)")
#handles = scatter.legend_elements()[0]
#labels = ['First Half', 'Second Half']
#legend1 = ax5.legend(handles, labels, loc='upper right', title="Part")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure3.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure3.png"), dpi=300)
#######################################################
################## Figure 4 ###########################
#######################################################
#experiments = ['VAS_DOPPIA_Immediate', 'VAS_DOPPIA_Delayed']
experiment = 'VAS_DOPPIA_Immediate'
#for e, experiment in enumerate(experiments):
fig = pl.figure(figsize=(20, 7))
grid = pl.GridSpec(1, 3, figure=fig)
d = filter_dataframe(ds, experiment=[experiment])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = palette[experiment][4]
color_abs = palette[experiment][-1]
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax1 = pl.subplot(grid[0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax1
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
###########
ax2 = pl.subplot(grid[1])
ax2.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax2)
ax2.set_xlabel("Clip onset (sec)")
ax2.set_ylabel("Relative positioning error (sec)")
#####
ax3 = pl.subplot(grid[2])
comparison = ['VAS_NewIns', experiment]
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax3
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
pl.tight_layout()
name = "Figure%d" % (4)
pl.savefig(os.path.join(path, name+".svg"), dpi=300)
pl.savefig(os.path.join(path, name+".png"), dpi=300)
#######################################################
###################### Figure 5 #######################
#######################################################
list_comparison = [
['VAS_DOPPIA_Delayed', 'VAS_DOPPIA_Immediate'],
#['VAS_NewIns', 'VAS_DOPPIA_Immediate'],
#['VAS_NewIns', 'VAS_DOPPIA_Delayed'],
]
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(2, 2, figure=fig)
for c, comparison in enumerate(list_comparison):
experiment = comparison[0]
d = filter_dataframe(ds, experiment=[experiment])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = palette[experiment][4]
color_abs = palette[experiment][-1]
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax1 = pl.subplot(grid[0, 0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax1
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
###########
ax2 = pl.subplot(grid[0, 1])
ax2.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax2)
ax2.set_xlabel("Clip onset (sec)")
ax2.set_ylabel("Relative positioning error (sec)")
#####
ax3 = pl.subplot(grid[1, 0])
comparison = ['VAS_NewIns', experiment]
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax3
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
##################################
ax4 = pl.subplot(grid[1, 1])
comparison = ['VAS_DOPPIA_Delayed', 'VAS_DOPPIA_Immediate']
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax4
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax4.set_ylabel("Distance (sec)")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure5.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure5.png"), dpi=300) | import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
from pyitab.analysis.results.base import filter_dataframe
from pyitab.analysis.results.dataframe import apply_function
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
def find_distance_boundaries(data):
scene_center = .5*(d['Scena_offset_sec'] - d['Scena_onset_sec'])
distance_offset = scene_center - d['VAS sec']
value_click = np.int_(np.sign(distance_offset) == 1)
return value_click
def windowed_similarity(x, y, window):
spearman = []
for i in range(len(x) - window):
s = spearmanr(x[i:i+window], y[i:i+window])
spearman.append(s[0])
return spearman
def bootstrap(x, y, n=100, fx=windowed_similarity, window=10):
permutations = []
for p in range(n):
idx = np.sort(np.random.choice(len(x), size=len(x), replace=True))
spearman = windowed_similarity(x[idx], y[idx], window)
permutations.append(spearman)
return permutations
def plot_fit(x, y, ax, linestyle='--', color='gray'):
from scipy.stats import linregress
m, b, r, p, s = linregress(x, y)
ax.plot(x, m*x+b, linestyle=linestyle, c=color, label=r**2)
#ax.legend()
pl.style.use("seaborn")
fontsize = 18
style = {
'figure.figsize': (19, 15),
'axes.facecolor': 'white',
'axes.spines.top': False,
'axes.spines.right': False,
'axes.spines.bottom': True,
'axes.spines.left': True,
'axes.edgecolor': 'black',
'axes.linewidth': 1.5,
'axes.grid': False,
'grid.color': 'white',
'xtick.color': 'black',
'ytick.color': 'black',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 3,
'ytick.major.size': 3,
'xtick.minor.size': 2,
'ytick.minor.size': 2,
'ytick.labelsize': fontsize-2,
'xtick.labelsize': fontsize-2,
'legend.fontsize': fontsize-5,
'legend.title_fontsize': fontsize-4,
'font.size': fontsize,
'axes.labelsize': fontsize-1,
'axes.titlesize': fontsize,
'svg.fonttype':'none'
}
pl.rcParams.update(style)
palette_scatter = LinearSegmentedColormap.from_list("scatter_click", ['#73a87c', '#eba2b6'], N=2)
palette_half = LinearSegmentedColormap.from_list("palette_part", ['purple', 'orange'], N=2)
experiment_list = [
"VAS_DOPPIA_Delayed", "VAS_DOPPIA_Immediate", "VAS_Mid", "VAS_NewIns"
]
experiment_figure = {
'VAS_DOPPIA_Delayed':'Exp. 3 | Delayed',
'VAS_DOPPIA_Immediate': 'Exp. 3 | Immediate',
'VAS_Mid': 'Exp. 2',
'VAS_NewIns': 'Exp. 1'
}
palette = {
'VAS_NewIns': sns.light_palette("dimgray", n_colors=9),
'VAS_Mid': sns.light_palette("#046c9a", n_colors=9),
'VAS_DOPPIA_Immediate': sns.light_palette("#f2300f", n_colors=9),
'VAS_DOPPIA_Delayed': sns.light_palette("#0b775e", n_colors=9),
}
for e in experiment_list:
pl.figure()
sns.palplot(palette[e])
path = "/home/robbis/Dropbox/PhD/experiments/memory_movie/paper_2/"
full_dataset = list()
for experiment in experiment_list[:]:
print(experiment)
data = pd.read_excel(os.path.join(path, experiment+"_Recognition.xlsx"))
d = filter_dataframe(data, corresp=[1], **{'IR.ACC':[1]})
d = d.dropna()
if experiment == "VAS_DOPPIA_Delayed":
d = filter_dataframe(d, Session=[2])
if experiment == "VAS_DOPPIA_Immediate":
d = filter_dataframe(d, Session=[1])
d['experiment'] = [experiment for _ in range(d.shape[0])]
d['Experiment'] = [experiment_figure[experiment] for _ in range(d.shape[0])]
full_dataset.append(d)
ds = pd.concat(full_dataset)
#################################################
############### Figure 2 ########################
#################################################
experiment = 'VAS_NewIns'
d = filter_dataframe(ds, experiment=['VAS_NewIns'])
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(8, 2, figure=fig)
color_light = palette[experiment][4]
color_dark = palette[experiment][-1]
#### Click distribution ###
value_click = np.int_(np.sign(d['DIST sec']) == 1)
ax1 = pl.subplot(grid[:3, 0])
scatter = ax1.scatter(d['VAS_sec'], d['Subject'],
marker='|',
c=value_click,
cmap=palette_scatter)
handles = scatter.legend_elements()[0]
labels = ['Underestimation', 'Overestimation']
#legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response")
ax1.set_yticks(np.arange(1, 1+np.max(d['Subject'])))
ax1.set_yticklabels(np.unique(d['Subject']))
ax1.set_ylabel("Subject")
ax1.set_title("Click distribution")
ax1.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2 = pl.subplot(grid[3:4, 0], sharex=ax1)
sns.distplot(d['VAS_sec'], ax=ax2, bins=100, color=color_light)
ax2.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2.set_xlabel("Clip onset (sec)")
### Distribution of errors ###
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = color_light
color_abs = color_dark
# Scatter
ax3 = pl.subplot(grid[:4, 1])
ax3.scatter(d['VAS_Corr sec'], d['DIST sec'], alpha=0.2, marker='.', color=color_rel)
ax3.plot(drel_mean['VAS_Corr sec'], drel_mean["DIST sec"], '-o', c=color_rel, label="Relative")
ax3.scatter(d['VAS_Corr sec'], d['DIST(ABS) sec'], alpha=0.2, marker='.', color=color_abs)
ax3.plot(dabs_mean['VAS_Corr sec'], dabs_mean["DIST(ABS) sec"], '-o', c=color_abs, label="Absolute")
ax3.hlines(0, 0, np.max(d['VAS_Corr sec']), color='black', linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
ax3.set_xlabel("Clip onset (sec)")
legend = pl.legend(loc=3)
legend.set_title("Distance")
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax3 = pl.subplot(grid[4:, 0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax3
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
# Scatter distance
drel_mean['Clip distance from end (sec)'] = np.max(drel_mean['VAS_Corr sec']) - drel_mean['VAS_Corr sec']
dabs_mean['Clip distance from end (sec)'] = np.max(dabs_mean['VAS_Corr sec']) - dabs_mean['VAS_Corr sec']
ax4 = pl.subplot(grid[4:,1])
ax4.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax4)
ax4.set_xlabel("Clip onset (sec)")
ax4.set_ylabel("Relative positioning error (sec)")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure2.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure2.png"), dpi=300)
#######################################################
###################### Figure 3 #######################
#######################################################
experiment = 'VAS_Mid'
d = filter_dataframe(ds, experiment=['VAS_Mid'])
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(8, 2, figure=fig)
color_light = palette[experiment][4]
color_dark = palette[experiment][-1]
#### Panel A - Click distribution ###
value_click = np.int_(np.sign(d['DIST sec']) == 1)
ax1 = pl.subplot(grid[:3, 0])
scatter = ax1.scatter(d['VAS_sec'], d['Subject'],
marker='|',
c=value_click,
cmap=palette_scatter)
handles = scatter.legend_elements()[0]
labels = ['Anticipated', 'Posticipated']
#legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response")
ax1.set_yticks(np.arange(1, 1+np.max(d['Subject'])))
ax1.set_yticklabels(np.unique(d['Subject']))
ax1.set_ylabel("Subject")
ax1.set_title("Click distribution")
ax2 = pl.subplot(grid[3:4, 0], sharex=ax1)
sns.distplot(d['VAS_sec'], ax=ax2, bins=100, color=color_light)
ax2.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax1.set_xlim(-200, 200+np.max(d['VAS_Corr sec']))
ax2.set_xlabel("Clip onset (sec)")
######## Panel B - ANOVA #########
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = color_light
color_abs = color_dark
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax3 = pl.subplot(grid[:4, 1])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax3
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
### Panel C - ANOVA NewIns vs Mid ###
ax4 = pl.subplot(grid[4:, 0])
comparison = ['VAS_NewIns', 'VAS_Mid']
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part'], attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
color1_l = palette[comparison[0]][4]
color2_l = palette[comparison[1]][4]
color1_d = palette[comparison[0]][-1]
color2_d = palette[comparison[1]][-1]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
hue_order=comparison_figure,
palette=[color1_l, color2_l],
ax=ax4
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o', c=color1_d, label="VAS_NewIns", ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o', c=color2_d, label="VAS_Mid", ms=10)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax4.set_ylabel("Distance (sec)")
########### Panel D ###################
ax5 = pl.subplot(grid[4:,1])
d = filter_dataframe(ds, experiment=['VAS_Mid'])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
half_ds = int(drel_mean.shape[0] * 0.5)
drel_half1 = drel_mean[:half_ds]
drel_half2 = drel_mean[half_ds:]
drel_mean['nhalf'] = np.ones_like(drel_mean.shape[0])
drel_mean['nhalf'].values[half_ds:] = 2
scatter = ax5.scatter(
drel_half2['VAS_Corr sec'],
drel_half2['DIST sec'],
marker='o',
c=color_rel,
#cmap='purple'
)
#plot_fit(drel_mean['VAS_Corr sec'],
# drel_mean['DIST sec'],
# ax5)
#plot_fit(drel_half1['VAS_Corr sec'],
# drel_half1['DIST sec'], ax5)
plot_fit(drel_half2['VAS_Corr sec'],
drel_half2['DIST sec'], ax5)
"""
ax5.vlines(drel_mean['VAS_Corr sec'][half_ds],
np.min(drel_mean['DIST sec']),
np.max(drel_mean['DIST sec']),
color='black', zorder=5, linestyles="solid")
"""
ax5.set_xlabel("Clip onset (sec)")
ax5.set_ylabel("Relative positioning error (sec)")
#handles = scatter.legend_elements()[0]
#labels = ['First Half', 'Second Half']
#legend1 = ax5.legend(handles, labels, loc='upper right', title="Part")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure3.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure3.png"), dpi=300)
#######################################################
################## Figure 4 ###########################
#######################################################
#experiments = ['VAS_DOPPIA_Immediate', 'VAS_DOPPIA_Delayed']
experiment = 'VAS_DOPPIA_Immediate'
#for e, experiment in enumerate(experiments):
fig = pl.figure(figsize=(20, 7))
grid = pl.GridSpec(1, 3, figure=fig)
d = filter_dataframe(ds, experiment=[experiment])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = palette[experiment][4]
color_abs = palette[experiment][-1]
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax1 = pl.subplot(grid[0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax1
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
###########
ax2 = pl.subplot(grid[1])
ax2.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax2)
ax2.set_xlabel("Clip onset (sec)")
ax2.set_ylabel("Relative positioning error (sec)")
#####
ax3 = pl.subplot(grid[2])
comparison = ['VAS_NewIns', experiment]
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax3
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
pl.tight_layout()
name = "Figure%d" % (4)
pl.savefig(os.path.join(path, name+".svg"), dpi=300)
pl.savefig(os.path.join(path, name+".png"), dpi=300)
#######################################################
###################### Figure 5 #######################
#######################################################
list_comparison = [
['VAS_DOPPIA_Delayed', 'VAS_DOPPIA_Immediate'],
#['VAS_NewIns', 'VAS_DOPPIA_Immediate'],
#['VAS_NewIns', 'VAS_DOPPIA_Delayed'],
]
fig = pl.figure(figsize=(15, 15))
grid = pl.GridSpec(2, 2, figure=fig)
for c, comparison in enumerate(list_comparison):
experiment = comparison[0]
d = filter_dataframe(ds, experiment=[experiment])
drel_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST sec', fx=np.nanmean)
dabs_mean = apply_function(d, keys=['VAS_Corr sec'], attr='DIST(ABS) sec', fx=np.nanmean)
color_rel = palette[experiment][4]
color_abs = palette[experiment][-1]
# Anova
dmelt = d.melt(id_vars=['Subject', 'Part'],
value_vars=['DIST sec', "DIST(ABS) sec"],
value_name='Distance (sec)',
var_name="Distance"
)
ax1 = pl.subplot(grid[0, 0])
g = sns.boxenplot(x="Part",
y="Distance (sec)",
hue="Distance",
data=dmelt,
dodge=True,
showfliers=False,
palette=sns.color_palette([color_rel, color_abs], n_colors=2),
ax=ax1
)
legend = g.axes.legend(loc=3)
pl.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
legend.set_title("Distance")
texts = g.get_legend().get_texts()
for t, l in zip(texts, ['Relative', 'Absolute']): t.set_text(l)
###########
ax2 = pl.subplot(grid[0, 1])
ax2.scatter(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'],
marker='o',
color=color_rel)
plot_fit(drel_mean['VAS_Corr sec'],
drel_mean['DIST sec'], ax2)
ax2.set_xlabel("Clip onset (sec)")
ax2.set_ylabel("Relative positioning error (sec)")
#####
ax3 = pl.subplot(grid[1, 0])
comparison = ['VAS_NewIns', experiment]
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax3
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax3.set_ylabel("Distance (sec)")
##################################
ax4 = pl.subplot(grid[1, 1])
comparison = ['VAS_DOPPIA_Delayed', 'VAS_DOPPIA_Immediate']
ds_comp = filter_dataframe(ds, experiment=comparison)
m = apply_function(ds_comp, keys=['experiment', 'Part', "Experiment"],
attr='DIST sec', fx=np.mean)
m1 = filter_dataframe(m, experiment=[comparison[0]])
m2 = filter_dataframe(m, experiment=[comparison[1]])
palette_light = [palette[comp][4] for comp in comparison]
palette_dark = [palette[comp][-1] for comp in comparison]
comparison_figure = [experiment_figure[comparison[0]],
experiment_figure[comparison[1]]]
g = sns.boxenplot(x="Part",
y="DIST sec",
hue="Experiment",
data=ds_comp,
dodge=True,
showfliers=False,
palette=palette_light,
hue_order=comparison_figure,
ax=ax4
)
g.plot(m1['Part']-1.2, m1["DIST sec"], 'o',
c=palette_dark[0], label=comparison[0], ms=10)
g.plot(m2['Part']-.8, m2["DIST sec"], 'o',
c=palette_dark[1], label=comparison[1], ms=10)
g.hlines(0, -.5, 5.5, color='dimgray', zorder=5, linestyles="dashed")
ax4.set_ylabel("Distance (sec)")
pl.tight_layout()
pl.savefig(os.path.join(path, "Figure5.svg"), dpi=300)
pl.savefig(os.path.join(path, "Figure5.png"), dpi=300) | de | 0.448675 | #ax.legend() ################################################# ############### Figure 2 ######################## ################################################# #### Click distribution ### #legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response") ### Distribution of errors ### # Scatter # Anova # Scatter distance ####################################################### ###################### Figure 3 ####################### ####################################################### #### Panel A - Click distribution ### #legend1 = ax1.legend(handles, labels, loc=(1.,.9), title="Response") ######## Panel B - ANOVA ######### # Anova ### Panel C - ANOVA NewIns vs Mid ### ########### Panel D ################### #cmap='purple' #plot_fit(drel_mean['VAS_Corr sec'], # drel_mean['DIST sec'], # ax5) #plot_fit(drel_half1['VAS_Corr sec'], # drel_half1['DIST sec'], ax5) ax5.vlines(drel_mean['VAS_Corr sec'][half_ds], np.min(drel_mean['DIST sec']), np.max(drel_mean['DIST sec']), color='black', zorder=5, linestyles="solid") #handles = scatter.legend_elements()[0] #labels = ['First Half', 'Second Half'] #legend1 = ax5.legend(handles, labels, loc='upper right', title="Part") ####################################################### ################## Figure 4 ########################### ####################################################### #experiments = ['VAS_DOPPIA_Immediate', 'VAS_DOPPIA_Delayed'] #for e, experiment in enumerate(experiments): # Anova ########### ##### ####################################################### ###################### Figure 5 ####################### ####################################################### #['VAS_NewIns', 'VAS_DOPPIA_Immediate'], #['VAS_NewIns', 'VAS_DOPPIA_Delayed'], # Anova ########### ##### ################################## | 2.289278 | 2 |
ProteinGraphML/MLTools/MetapathFeatures/__init__.py | JessBinder/ProteinGraphML | 10 | 6618981 |
from .nodes import *
from .functions import *
from .featureBuilder import * |
from .nodes import *
from .functions import *
from .featureBuilder import * | none | 1 | 0.930005 | 1 | |
cowsay/lib/cows/goat2.py | Ovlic/cowsay_py | 0 | 6618982 | def Goat2(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts}
)__(
'|{eyes}|'________/
|__| |
{tongue}||"""""""||
|| ||
""" | def Goat2(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts}
)__(
'|{eyes}|'________/
|__| |
{tongue}||"""""""||
|| ||
""" | en | 0.234029 | {thoughts} {thoughts} )__( '|{eyes}|'________/ |__| | {tongue}|| "|| || || | 2.484989 | 2 |
api/geoservice_types.py | nextgis/qms_external_api_python | 0 | 6618983 | <gh_stars>0
class GeoServiceType(object):
TMS = 'tms'
WMS = 'wms'
WFS = 'wfs'
GeoJSON = 'geojson'
enum = [
TMS,
WMS,
WFS,
GeoJSON
]
| class GeoServiceType(object):
TMS = 'tms'
WMS = 'wms'
WFS = 'wfs'
GeoJSON = 'geojson'
enum = [
TMS,
WMS,
WFS,
GeoJSON
] | none | 1 | 1.933558 | 2 | |
examples/hacking/hh-001/omstd_hh_001/lib/data.py | cr0hn/OMSTD | 26 | 6618984 | <reponame>cr0hn/OMSTD<filename>examples/hacking/hh-001/omstd_hh_001/lib/data.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
# --------------------------------------------------------------------------
class Parameters:
"""Program parameters"""
# ----------------------------------------------------------------------
def __init__(self, **kwargs):
"""
:param ports_range: ports range as string: '1-2000'
:type ports_range: str
:param targets: list os string with targets
:type targets: list(str)
:param random_port_scan: Select port to scan in random order
:type random_port_scan: bool
:param verbosity: verbosity level
:type verbosity: int
:param only_open: only manage opened ports
:type only_open: bool
:param print_function: function used to display debug info. Default is 'print' call.
:type print_function: function
:param proxy: URL with proxy info
:type proxy: str
:raises: ValueError
"""
self.ports_range = kwargs.get("ports_range", "0-1024")
self.targets = kwargs.get("targets", None)
self.verbosity = int(kwargs.get("verbosity", 0))
self.random_port_scan = kwargs.get("random_port_scan", False)
self.print_function = kwargs.get("print_function", print)
self.only_open = kwargs.get("only_open", False)
self.proxy = kwargs.get("proxy", None)
self.proxy_user = kwargs.get("proxy_user", None)
self.proxy_pass = kwargs.get("proxy_pass", None)
if not isinstance(self.ports_range, str):
raise TypeError("Expected str, got '%s' instead" % type(self.ports_range))
if not isinstance(self.targets, list):
raise TypeError("Expected list, got '%s' instead" % type(self.targets))
else:
for p in self.targets:
if not isinstance(p, str):
raise TypeError("Expected str, got '%s' instead" % type(p))
# Remove duplicates
self.targets = list(set(self.targets))
# Expand ports
_total_ports = []
_parsed_tmp = self.ports_range.strip().split(",")
for r in _parsed_tmp:
if "-" in r:
_parsed_ports = r.strip().split("-")
if len(_parsed_ports) == 1:
_p_start = int(_parsed_ports[0])
_p_end = _p_start + 1
elif len(_parsed_ports) == 2:
_p_start = int(_parsed_ports[0])
_p_end = int(_parsed_ports[1])
else:
raise ValueError("Port range must be defined as start-end: 1-4025")
_total_ports.extend(int(x) for x in range(_p_start, _p_end))
else:
_total_ports.append(int(r))
self.ports_range = _total_ports
if self.proxy is not None:
from urllib.parse import urlparse
_scheme = "http" if self.proxy.startswith("http://") or self.proxy.startswith("https://") else ""
self.proxy = urlparse(self.proxy, scheme=_scheme)
# --------------------------------------------------------------------------
class Results:
"""Program results"""
# ----------------------------------------------------------------------
def __init__(self, **kwargs):
"""
:param ports: Port status as format: {PORT_NUMBER: STATUS}
:type ports: dict(int: str)
:param scan_time: Time got for scan in miliseconds
:type scan_time: float
"""
self.ports = kwargs.get("ports", None)
self.scan_time = kwargs.get("scan_time", 0)
# Truncate time
self.scan_time = '{number:.2f}'.format(number=self.scan_time)
self.__open_ports = None
# ----------------------------------------------------------------------
@property
def open_ports(self):
"""
:return: Return only open ports
:rtype: list(int)
"""
if self.__open_ports is None:
self.__open_ports = [x for x, y in self.ports.items() if y.lower() == "open"]
return self.__open_ports
__all__ = ["Results", "Parameters"] | # -*- coding: utf-8 -*-
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
# --------------------------------------------------------------------------
class Parameters:
"""Program parameters"""
# ----------------------------------------------------------------------
def __init__(self, **kwargs):
"""
:param ports_range: ports range as string: '1-2000'
:type ports_range: str
:param targets: list os string with targets
:type targets: list(str)
:param random_port_scan: Select port to scan in random order
:type random_port_scan: bool
:param verbosity: verbosity level
:type verbosity: int
:param only_open: only manage opened ports
:type only_open: bool
:param print_function: function used to display debug info. Default is 'print' call.
:type print_function: function
:param proxy: URL with proxy info
:type proxy: str
:raises: ValueError
"""
self.ports_range = kwargs.get("ports_range", "0-1024")
self.targets = kwargs.get("targets", None)
self.verbosity = int(kwargs.get("verbosity", 0))
self.random_port_scan = kwargs.get("random_port_scan", False)
self.print_function = kwargs.get("print_function", print)
self.only_open = kwargs.get("only_open", False)
self.proxy = kwargs.get("proxy", None)
self.proxy_user = kwargs.get("proxy_user", None)
self.proxy_pass = kwargs.get("proxy_pass", None)
if not isinstance(self.ports_range, str):
raise TypeError("Expected str, got '%s' instead" % type(self.ports_range))
if not isinstance(self.targets, list):
raise TypeError("Expected list, got '%s' instead" % type(self.targets))
else:
for p in self.targets:
if not isinstance(p, str):
raise TypeError("Expected str, got '%s' instead" % type(p))
# Remove duplicates
self.targets = list(set(self.targets))
# Expand ports
_total_ports = []
_parsed_tmp = self.ports_range.strip().split(",")
for r in _parsed_tmp:
if "-" in r:
_parsed_ports = r.strip().split("-")
if len(_parsed_ports) == 1:
_p_start = int(_parsed_ports[0])
_p_end = _p_start + 1
elif len(_parsed_ports) == 2:
_p_start = int(_parsed_ports[0])
_p_end = int(_parsed_ports[1])
else:
raise ValueError("Port range must be defined as start-end: 1-4025")
_total_ports.extend(int(x) for x in range(_p_start, _p_end))
else:
_total_ports.append(int(r))
self.ports_range = _total_ports
if self.proxy is not None:
from urllib.parse import urlparse
_scheme = "http" if self.proxy.startswith("http://") or self.proxy.startswith("https://") else ""
self.proxy = urlparse(self.proxy, scheme=_scheme)
# --------------------------------------------------------------------------
class Results:
"""Program results"""
# ----------------------------------------------------------------------
def __init__(self, **kwargs):
"""
:param ports: Port status as format: {PORT_NUMBER: STATUS}
:type ports: dict(int: str)
:param scan_time: Time got for scan in miliseconds
:type scan_time: float
"""
self.ports = kwargs.get("ports", None)
self.scan_time = kwargs.get("scan_time", 0)
# Truncate time
self.scan_time = '{number:.2f}'.format(number=self.scan_time)
self.__open_ports = None
# ----------------------------------------------------------------------
@property
def open_ports(self):
"""
:return: Return only open ports
:rtype: list(int)
"""
if self.__open_ports is None:
self.__open_ports = [x for x, y in self.ports.items() if y.lower() == "open"]
return self.__open_ports
__all__ = ["Results", "Parameters"] | en | 0.599468 | # -*- coding: utf-8 -*- Project name: Open Methodology for Security Tool Developers Project URL: https://github.com/cr0hn/OMSTD Copyright (c) 2014, cr0hn<-AT->cr0hn.com All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -------------------------------------------------------------------------- Program parameters # ---------------------------------------------------------------------- :param ports_range: ports range as string: '1-2000' :type ports_range: str :param targets: list os string with targets :type targets: list(str) :param random_port_scan: Select port to scan in random order :type random_port_scan: bool :param verbosity: verbosity level :type verbosity: int :param only_open: only manage opened ports :type only_open: bool :param print_function: function used to display debug info. Default is 'print' call. :type print_function: function :param proxy: URL with proxy info :type proxy: str :raises: ValueError # Remove duplicates # Expand ports # -------------------------------------------------------------------------- Program results # ---------------------------------------------------------------------- :param ports: Port status as format: {PORT_NUMBER: STATUS} :type ports: dict(int: str) :param scan_time: Time got for scan in miliseconds :type scan_time: float # Truncate time # ---------------------------------------------------------------------- :return: Return only open ports :rtype: list(int) | 0.763481 | 1 |
gtfs/models/stop.py | montel-ig/maritime-maas | 0 | 6618985 | <reponame>montel-ig/maritime-maas
from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel, TranslatedFields
from maas.models import MaasOperator
from .base import GTFSModelWithSourceID
from .feed import Feed
class StopQueryset(TranslatableQuerySet):
def for_maas_operator(self, maas_operator: MaasOperator):
feeds = Feed.objects.for_maas_operator(maas_operator)
return self.filter(feed__in=feeds)
class Stop(TranslatableModel, GTFSModelWithSourceID):
class WheelchairBoarding(models.IntegerChoices):
UNKNOWN = 0, _("Unknown")
POSSIBLE = 1, _("Possible")
NOT_POSSIBLE = 2, _("Not possible")
translations = TranslatedFields(
name=models.CharField(verbose_name=_("name"), max_length=255, blank=True),
desc=models.TextField(verbose_name=_("description"), blank=True),
tts_name=models.CharField(
verbose_name=_("TTS name"),
max_length=255,
help_text=_("readable version of the name"),
blank=True,
),
)
code = models.CharField(verbose_name=_("code"), max_length=255, blank=True)
point = models.PointField(verbose_name=_("point"))
wheelchair_boarding = models.PositiveSmallIntegerField(
verbose_name=_("wheelchair boarding"),
choices=WheelchairBoarding.choices,
default=WheelchairBoarding.UNKNOWN,
)
objects = StopQueryset.as_manager()
class Meta(GTFSModelWithSourceID.Meta):
verbose_name = _("stop")
verbose_name_plural = _("stops")
default_related_name = "stops"
def __str__(self):
return self.safe_translation_getter(
"name", default=super().__str__, any_language=True
)
| from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel, TranslatedFields
from maas.models import MaasOperator
from .base import GTFSModelWithSourceID
from .feed import Feed
class StopQueryset(TranslatableQuerySet):
def for_maas_operator(self, maas_operator: MaasOperator):
feeds = Feed.objects.for_maas_operator(maas_operator)
return self.filter(feed__in=feeds)
class Stop(TranslatableModel, GTFSModelWithSourceID):
class WheelchairBoarding(models.IntegerChoices):
UNKNOWN = 0, _("Unknown")
POSSIBLE = 1, _("Possible")
NOT_POSSIBLE = 2, _("Not possible")
translations = TranslatedFields(
name=models.CharField(verbose_name=_("name"), max_length=255, blank=True),
desc=models.TextField(verbose_name=_("description"), blank=True),
tts_name=models.CharField(
verbose_name=_("TTS name"),
max_length=255,
help_text=_("readable version of the name"),
blank=True,
),
)
code = models.CharField(verbose_name=_("code"), max_length=255, blank=True)
point = models.PointField(verbose_name=_("point"))
wheelchair_boarding = models.PositiveSmallIntegerField(
verbose_name=_("wheelchair boarding"),
choices=WheelchairBoarding.choices,
default=WheelchairBoarding.UNKNOWN,
)
objects = StopQueryset.as_manager()
class Meta(GTFSModelWithSourceID.Meta):
verbose_name = _("stop")
verbose_name_plural = _("stops")
default_related_name = "stops"
def __str__(self):
return self.safe_translation_getter(
"name", default=super().__str__, any_language=True
) | none | 1 | 1.97875 | 2 | |
tests/cases/exceptions.py | MiguelMarcelino/py2many | 2 | 6618986 | <gh_stars>1-10
#!/usr/bin/env python3
def show():
s = []
try:
raise Exception("foo")
except Exception as e:
s.append("foo")
finally:
s.append("Finally")
try:
3 / 0
except ZeroDivisionError:
s.append("ZeroDivisionError")
try:
raise Exception("foo")
except:
s.append("foo_2")
return s
if __name__ == "__main__":
assert show() == ["foo", "Finally", "ZeroDivisionError", "foo_2"]
| #!/usr/bin/env python3
def show():
s = []
try:
raise Exception("foo")
except Exception as e:
s.append("foo")
finally:
s.append("Finally")
try:
3 / 0
except ZeroDivisionError:
s.append("ZeroDivisionError")
try:
raise Exception("foo")
except:
s.append("foo_2")
return s
if __name__ == "__main__":
assert show() == ["foo", "Finally", "ZeroDivisionError", "foo_2"] | fr | 0.221828 | #!/usr/bin/env python3 | 3.49289 | 3 |
demo/urls.py | thibaudcolas/django-draftail | 7 | 6618987 | <reponame>thibaudcolas/django-draftail
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("polls/", include("demo.polls.urls")),
path("feedback/", include("demo.feedback.urls")),
path("admin/doc/", include("django.contrib.admindocs.urls")),
path("admin/", admin.site.urls),
]
| from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("polls/", include("demo.polls.urls")),
path("feedback/", include("demo.feedback.urls")),
path("admin/doc/", include("django.contrib.admindocs.urls")),
path("admin/", admin.site.urls),
] | none | 1 | 1.631518 | 2 | |
linux/com_read_thread.py | derand/GPSTime2net | 0 | 6618988 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2015'
import threading#, Queue
import serial
import fcntl
import time, datetime
import sys
from logger_thread import Message
class ComReadThread(threading.Thread):
def __init__(self, com_prms, log_queue=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self.com_prms = com_prms
self.logger_queue = log_queue
self.com = None
self.ntp_offset = 0
self.ntp_offset_prev = self.ntp_offset
self._running = False
def run(self):
self._running = True
# configure the serial connections (the parameters differs on the device you are connecting to)
self.com = serial.Serial(**self.com_prms)
fcntl.flock(self.com.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
#self.com.open()
print self.com.isOpen()
if self.read_loop(check_time=True):
print 'Reconnect'
self.com_prms['parity'] = serial.PARITY_ODD
self.com = serial.Serial(**self.com_prms)
fcntl.flock(self.com.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
print self.com.isOpen()
self.read_loop()
#log.close()
def read_loop(self, check_time=False):
start_tm = time.time()
buff = ''
c = 0
gps_tm_str = ''
gps_tm = 0
gps_status = ''
gps_cal_type = '0'
gps_sattelites = '0'
gps_tm_diff = 0
gps_gsv_sattelites = 0
gps_gsv_sattelites_count = 0
gps_gsv_sattelites_power = []
max_length = 0
while 1:
tmp = self.com.readline(80)
#print ord(tmp[0]), len(tmp), tmp
x = tmp.find('\n')
if x == -1:
buff += tmp
else:
buff += tmp[:x-1]
check_time = False
#print buff
if len(buff) and buff[0] == '$':
arr = buff.split(',')
if arr[0] == '$GPRMC':
if len(arr) > 12:
# $GPRMC,170038.00,V,4628.5074,N,03041.5680,E,,,031115,,,N*4F
gps_tm_str = '%s0000 %s'%(arr[1], arr[9])
#gps_tm = calendar.timegm(time.strptime(gps_tm_str, '%H%M%S.%f %d%m%y')) # '.%f' not supported
gps_tm = time.mktime(datetime.datetime.strptime(gps_tm_str, '%H%M%S.%f %d%m%y').timetuple()) - time.timezone
gps_tm_diff = time.time()-gps_tm
gps_status = arr[2]
elif arr[0] == '$GPGGA':
if len(arr) > 7:
# $GPGGA,170004.00,4628.5074,N,03041.5680,E,0,00,0.0,,M,,M,,*5C
gps_cal_type = arr[6]
gps_sattelites = arr[7]
elif arr[0] == '$GPGSV':
if arr[2] == '1':
gps_gsv_sattelites = 0
gps_gsv_sattelites_power = []
gps_gsv_sattelites_count = arr[3]
idx = 7
while idx < len(arr):
power = arr[idx].split('*')[0]
if len(power) > 0:
gps_gsv_sattelites += 1
gps_gsv_sattelites_power.append(power)
idx += 4
if arr[0] == '$GPGSV' and arr[1] == arr[2]:
c += 1
s = '\'%s\' %s%s %s(%02d/%s %s) %.6f'%(gps_tm_str, gps_status, gps_cal_type, gps_sattelites, gps_gsv_sattelites, gps_gsv_sattelites_count, '-'.join(gps_gsv_sattelites_power), gps_tm_diff, )
if self.ntp_offset == self.ntp_offset_prev:
self.logger_queue.put(Message(s, (c%10) == 0))
s += ' %.6f'%self.ntp_offset
else:
s += ' %.6f'%self.ntp_offset
self.logger_queue.put(Message(s, (c%10) == 0))
max_length = max(max_length, len(s))
print '\r%s'%s.ljust(max_length),
sys.stdout.flush()
self.ntp_offset_prev = self.ntp_offset
buff = tmp[x+1:]
if check_time and (time.time() - start_tm) > 3:
self.com.close()
return True
if not self._running:
break
self.com.close()
return False
def stop(self):
self._running = False
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2015'
import threading#, Queue
import serial
import fcntl
import time, datetime
import sys
from logger_thread import Message
class ComReadThread(threading.Thread):
def __init__(self, com_prms, log_queue=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self.com_prms = com_prms
self.logger_queue = log_queue
self.com = None
self.ntp_offset = 0
self.ntp_offset_prev = self.ntp_offset
self._running = False
def run(self):
self._running = True
# configure the serial connections (the parameters differs on the device you are connecting to)
self.com = serial.Serial(**self.com_prms)
fcntl.flock(self.com.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
#self.com.open()
print self.com.isOpen()
if self.read_loop(check_time=True):
print 'Reconnect'
self.com_prms['parity'] = serial.PARITY_ODD
self.com = serial.Serial(**self.com_prms)
fcntl.flock(self.com.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
print self.com.isOpen()
self.read_loop()
#log.close()
def read_loop(self, check_time=False):
start_tm = time.time()
buff = ''
c = 0
gps_tm_str = ''
gps_tm = 0
gps_status = ''
gps_cal_type = '0'
gps_sattelites = '0'
gps_tm_diff = 0
gps_gsv_sattelites = 0
gps_gsv_sattelites_count = 0
gps_gsv_sattelites_power = []
max_length = 0
while 1:
tmp = self.com.readline(80)
#print ord(tmp[0]), len(tmp), tmp
x = tmp.find('\n')
if x == -1:
buff += tmp
else:
buff += tmp[:x-1]
check_time = False
#print buff
if len(buff) and buff[0] == '$':
arr = buff.split(',')
if arr[0] == '$GPRMC':
if len(arr) > 12:
# $GPRMC,170038.00,V,4628.5074,N,03041.5680,E,,,031115,,,N*4F
gps_tm_str = '%s0000 %s'%(arr[1], arr[9])
#gps_tm = calendar.timegm(time.strptime(gps_tm_str, '%H%M%S.%f %d%m%y')) # '.%f' not supported
gps_tm = time.mktime(datetime.datetime.strptime(gps_tm_str, '%H%M%S.%f %d%m%y').timetuple()) - time.timezone
gps_tm_diff = time.time()-gps_tm
gps_status = arr[2]
elif arr[0] == '$GPGGA':
if len(arr) > 7:
# $GPGGA,170004.00,4628.5074,N,03041.5680,E,0,00,0.0,,M,,M,,*5C
gps_cal_type = arr[6]
gps_sattelites = arr[7]
elif arr[0] == '$GPGSV':
if arr[2] == '1':
gps_gsv_sattelites = 0
gps_gsv_sattelites_power = []
gps_gsv_sattelites_count = arr[3]
idx = 7
while idx < len(arr):
power = arr[idx].split('*')[0]
if len(power) > 0:
gps_gsv_sattelites += 1
gps_gsv_sattelites_power.append(power)
idx += 4
if arr[0] == '$GPGSV' and arr[1] == arr[2]:
c += 1
s = '\'%s\' %s%s %s(%02d/%s %s) %.6f'%(gps_tm_str, gps_status, gps_cal_type, gps_sattelites, gps_gsv_sattelites, gps_gsv_sattelites_count, '-'.join(gps_gsv_sattelites_power), gps_tm_diff, )
if self.ntp_offset == self.ntp_offset_prev:
self.logger_queue.put(Message(s, (c%10) == 0))
s += ' %.6f'%self.ntp_offset
else:
s += ' %.6f'%self.ntp_offset
self.logger_queue.put(Message(s, (c%10) == 0))
max_length = max(max_length, len(s))
print '\r%s'%s.ljust(max_length),
sys.stdout.flush()
self.ntp_offset_prev = self.ntp_offset
buff = tmp[x+1:]
if check_time and (time.time() - start_tm) > 3:
self.com.close()
return True
if not self._running:
break
self.com.close()
return False
def stop(self):
self._running = False
| en | 0.672521 | #!/usr/bin/env python # -*- coding: utf-8 -*- #, Queue # configure the serial connections (the parameters differs on the device you are connecting to) #self.com.open() #log.close() #print ord(tmp[0]), len(tmp), tmp #print buff # $GPRMC,170038.00,V,4628.5074,N,03041.5680,E,,,031115,,,N*4F #gps_tm = calendar.timegm(time.strptime(gps_tm_str, '%H%M%S.%f %d%m%y')) # '.%f' not supported # $GPGGA,170004.00,4628.5074,N,03041.5680,E,0,00,0.0,,M,,M,,*5C | 2.520317 | 3 |
setup.py | raspi/pyzmqarp | 0 | 6618989 | # -*- encoding: utf8 -*-
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
classifiers = [
"Programming Language :: Python",
]
requires = [
'asyncio',
'zmq',
'pyroute2',
]
tests_require = [
]
testing_extras = tests_require + [
'nose',
'coverage',
'virtualenv',
]
setup(author=u'<NAME>',
name='pyzmqarp',
version='0.0.1',
description='ARP events to zmq',
long_description='Listen on ',
classifiers=classifiers,
author_email='',
url='https://',
keywords='python pyroute2 arp ',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| # -*- encoding: utf8 -*-
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
classifiers = [
"Programming Language :: Python",
]
requires = [
'asyncio',
'zmq',
'pyroute2',
]
tests_require = [
]
testing_extras = tests_require + [
'nose',
'coverage',
'virtualenv',
]
setup(author=u'<NAME>',
name='pyzmqarp',
version='0.0.1',
description='ARP events to zmq',
long_description='Listen on ',
classifiers=classifiers,
author_email='',
url='https://',
keywords='python pyroute2 arp ',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| en | 0.4002 | # -*- encoding: utf8 -*- | 1.266132 | 1 |
Osnove/7_2_2ModuloOperator.py | Smajkan/PythonUcenjePonovo | 0 | 6618990 | <reponame>Smajkan/PythonUcenjePonovo
print("Ostatak pri dijeljenju brojeva 20 i 6 iznosi:", 20 % 6)
print("Ostatak pri dijeljenju brojeva 1.25 i 0.5 iznosi:", 1.25 % 0.5)
| print("Ostatak pri dijeljenju brojeva 20 i 6 iznosi:", 20 % 6)
print("Ostatak pri dijeljenju brojeva 1.25 i 0.5 iznosi:", 1.25 % 0.5) | none | 1 | 2.446708 | 2 | |
grblPendant.py | jduanen/cnc_pendant | 1 | 6618991 | #!/usr/bin/env python3
'''
Application that connects a XHC WHB04B-4 pendant to a grbl controller
'''
import argparse
import json
import logging
import os
import signal
import sys
import threading
import time
import yaml
from yaml import Loader
from Controller import Controller
from Host import Host
from Pendant import Pendant
from Processor import Processor
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING"
'macroPath': "./whb04b.yml"
}
def run(options):
"""????
"""
def stop():
logging.debug(f"Active Threads: {threading.enumerate()}")
if proc:
logging.debug("Shutting down Processor")
proc.shutdown()
if host:
logging.debug("Shutting down Host")
host.shutdown(False)
if ctlr:
logging.debug("Shutting down Controller")
ctlr.shutdown()
if pend:
logging.debug("Shutting down Pendant")
pend.shutdown()
def shutdownHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
stop()
for s in ('TERM', 'HUP', 'INT'):
sig = getattr(signal, 'SIG'+s)
signal.signal(sig, shutdownHandler)
def reloadHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
macros = {}
if os.path.exists(options.macroPath):
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
proc.defineMacros(macros)
if options.verbose:
print("Reload Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
else:
logging.warning(f"Macros file '{options.macroPath}' does not exist")
signal.signal(signal.SIGUSR1, reloadHandler)
macros = {}
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
if options.verbose:
print("Initial Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
pend = Pendant()
ctlr = Controller()
host = Host()
proc = Processor(pend, ctlr, host, macros)
if proc:
if options.magicCommands:
magicCmdNames = proc.magicCommandNames()
if options.verbose:
print("Magic Commands:")
json.dump(magicCmdNames, sys.stdout, indent=4, sort_keys=True)
print("")
else:
print(f"Magic Commands: {magicCmdNames}")
else:
while proc.isAlive():
#### FIXME do something here
print("running...")
time.sleep(30)
stop()
sys.exit(0)
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-m <macroPath>] [-M]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-M", "--magicCommands", action="store_true", default=False,
help="Print names of magic commands and exit")
ap.add_argument(
"-m", "--macroPath", action="store", type=str, default=DEFAULTS['macroPath'],
help="Path to YAML file containing macro key definitions")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if not os.path.exists(opts.macroPath):
logging.error(f"Macro key definitions file not found: {opts.macroPath}")
sys.exit(1)
if opts.verbose:
print(f" Macro definitions file: {opts.macroPath}")
return opts
if __name__ == '__main__':
opts = getOpts()
r = run(opts)
sys.exit(r)
| #!/usr/bin/env python3
'''
Application that connects a XHC WHB04B-4 pendant to a grbl controller
'''
import argparse
import json
import logging
import os
import signal
import sys
import threading
import time
import yaml
from yaml import Loader
from Controller import Controller
from Host import Host
from Pendant import Pendant
from Processor import Processor
DEFAULTS = {
'logLevel': "INFO", #"DEBUG" #"WARNING"
'macroPath': "./whb04b.yml"
}
def run(options):
"""????
"""
def stop():
logging.debug(f"Active Threads: {threading.enumerate()}")
if proc:
logging.debug("Shutting down Processor")
proc.shutdown()
if host:
logging.debug("Shutting down Host")
host.shutdown(False)
if ctlr:
logging.debug("Shutting down Controller")
ctlr.shutdown()
if pend:
logging.debug("Shutting down Pendant")
pend.shutdown()
def shutdownHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
stop()
for s in ('TERM', 'HUP', 'INT'):
sig = getattr(signal, 'SIG'+s)
signal.signal(sig, shutdownHandler)
def reloadHandler(signum, frame):
logging.debug(f"Caught signal: {signum}")
macros = {}
if os.path.exists(options.macroPath):
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
proc.defineMacros(macros)
if options.verbose:
print("Reload Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
else:
logging.warning(f"Macros file '{options.macroPath}' does not exist")
signal.signal(signal.SIGUSR1, reloadHandler)
macros = {}
with open(options.macroPath, "r") as f:
macros = yaml.load(f, Loader=Loader)
if options.verbose:
print("Initial Macros:")
json.dump(macros, sys.stdout, indent=4, sort_keys=True)
print("")
pend = Pendant()
ctlr = Controller()
host = Host()
proc = Processor(pend, ctlr, host, macros)
if proc:
if options.magicCommands:
magicCmdNames = proc.magicCommandNames()
if options.verbose:
print("Magic Commands:")
json.dump(magicCmdNames, sys.stdout, indent=4, sort_keys=True)
print("")
else:
print(f"Magic Commands: {magicCmdNames}")
else:
while proc.isAlive():
#### FIXME do something here
print("running...")
time.sleep(30)
stop()
sys.exit(0)
def getOpts():
usage = f"Usage: {sys.argv[0]} [-v] [-L <logLevel>] [-l <logFile>] " + \
"[-m <macroPath>] [-M]"
ap = argparse.ArgumentParser()
ap.add_argument(
"-L", "--logLevel", action="store", type=str,
default=DEFAULTS['logLevel'],
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level")
ap.add_argument(
"-l", "--logFile", action="store", type=str,
help="Path to location of logfile (create it if it doesn't exist)")
ap.add_argument(
"-M", "--magicCommands", action="store_true", default=False,
help="Print names of magic commands and exit")
ap.add_argument(
"-m", "--macroPath", action="store", type=str, default=DEFAULTS['macroPath'],
help="Path to YAML file containing macro key definitions")
ap.add_argument(
"-v", "--verbose", action="count", default=0,
help="Enable printing of debug info")
opts = ap.parse_args()
if opts.logFile:
logging.basicConfig(filename=opts.logFile,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=opts.logLevel)
else:
logging.basicConfig(level=opts.logLevel,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if not os.path.exists(opts.macroPath):
logging.error(f"Macro key definitions file not found: {opts.macroPath}")
sys.exit(1)
if opts.verbose:
print(f" Macro definitions file: {opts.macroPath}")
return opts
if __name__ == '__main__':
opts = getOpts()
r = run(opts)
sys.exit(r)
| en | 0.522629 | #!/usr/bin/env python3 Application that connects a XHC WHB04B-4 pendant to a grbl controller #"DEBUG" #"WARNING" ???? #### FIXME do something here | 2.182613 | 2 |
www/purple_admin/views.py | SubminO/vas | 0 | 6618992 | <reponame>SubminO/vas<filename>www/purple_admin/views.py
from django.contrib.auth.decorators import login_required
from django.forms.models import model_to_dict
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from purple_admin.forms import RouteForm, RoutePlatformForm, PlatformTypeForm, BusModelForm, RoutePlatformFormset, \
RouteSelectForm
from route.models import PlatformType, Route, RoutePlatform, BusModel, RoutePoint
@login_required
def cabinet(request):
return render(request, 'admin_panel/cabinet.html')
@login_required
def cabinet_delete(request, pk, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'ts': BusModel
# 'flat_type': RealEstateFlatTypeModel,
}
Model = models_by_type[type]
objects = get_object_or_404(Model, pk=pk)
objects.delete()
if request.is_ajax():
return JsonResponse({})
return redirect('admin_panel_' + Model.model_type + '_list')
@login_required
def cabinet_add(request, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'route_platform_type': PlatformType,
'ts': BusModel,
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
'route_platform_type': PlatformTypeForm,
'ts': BusModelForm,
}
template_by_type = {
'route': 'cabinet_form.html',
'route_platform': 'cabinet_map_form.html',
'route_platform_type': '_form.html',
}
Model = models_by_type[type]
Form = form_by_type[type]
template = template_by_type.get(type, 'cabinet_form.html')
if request.method == 'POST':
form = Form(request.POST or None, request.FILES or None)
if form.is_valid():
config = form.save()
if request.is_ajax():
return JsonResponse({"id": config.id, "name": config.name})
return redirect('admin_panel_' + Model.model_type + '_list')
else:
form = Form()
context = {
'form': form,
'description': Model.model_description,
'model_type': type,
}
return render(request, 'admin_panel/' + template, context=context)
@login_required
def cabinet_list(request, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'ts': BusModel,
# 'flat_type': RealEstateFlatTypeModel,
}
model = models_by_type[type]
objects = model.objects.all()
context = {
'objects': objects,
'model_type': type,
}
return render(request, 'admin_panel/cabinet_list.html', context=context)
@login_required
def cabinet_edit(request, pk, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'route_platform_type': PlatformType,
'ts': BusModel,
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
'route_platform_type': PlatformTypeForm,
'ts': BusModelForm,
}
template_by_type = {
'route': 'cabinet_form.html',
'route_platform': 'cabinet_map_form.html',
'route_platform_type': '_form.html',
}
Model = models_by_type[type]
Form = form_by_type[type]
template = template_by_type.get(type, 'cabinet_form.html')
objects = get_object_or_404(Model, pk=pk)
if request.method == 'POST':
form = Form(request.POST or None, request.FILES or None, instance=objects)
if form.is_valid():
config = form.save()
return redirect('admin_panel_' + Model.model_type + '_list')
else:
form = Form(instance=objects)
context = {
'form': form,
'description': Model.model_description,
'model_type': type,
}
return render(request, 'admin_panel/' + template, context=context)
@login_required
def ajax_add(request, type):
# models_by_type = {
# 'object': RealEstateObjectModel,
# 'object_type': RealEstateObjectTypeModel,
# 'flat_type': RealEstateFlatTypeModel,
# }
form_template_type = {
'route': 'forms/_route_form.html',
'route_platform': '_map_form.html',
# 'flat_type': '_flat_form.html',
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
# 'flat_type': RealEstateFlatTypeForm,
}
# Model = models_by_type[type]
Form = form_by_type[type]
template = form_template_type[type]
# type = 'realestate' if type == 'flat_type' else type
if request.is_ajax():
if request.method == 'POST':
form = Form(request.POST, request.FILES)
if form.is_valid():
form = form.save()
return JsonResponse(model_to_dict(form))
else:
form = Form()
else:
return
else:
if request.method == 'POST':
form = Form(request.POST, request.FILES)
if form.is_valid():
form = form.save()
# form.create_relations_by_string_ids(request.POST.get('flats', None))
return redirect('admin_panel_' + type + '_list')
return redirect('admin_panel_cabinet')
context = {
'form': form,
'model_type': Form.Meta.model.model_type,
'model_description': Form.Meta.model.model_description,
}
return render(request, 'admin_panel/' + template, context=context)
def mapped_route_add(request):
"""
Страница конструктора, которая позволит задавать последовательность остановок,
Далее, открыть карту и построить промежуточные маршруты,
Подтвердить и исходранить маршрут в БД.
:param request:
:return: Template of constructor
"""
template_name = 'admin_panel/mapper_route_add.html'
if request.method == 'GET':
# route_form = RouteSelectForm(request.GET or None)
routes = Route.objects.all()
formset = RoutePlatformFormset(queryset=RoutePoint.objects.none())
elif request.method == 'POST':
# route_form = RouteSelectForm(request.POST)
route_id = request.POST.get('route')
route = get_object_or_404(Route, pk=route_id)
formset = RoutePlatformFormset(request.POST)
platform = PlatformType.objects.get(pk=1)
platform_endpoint = PlatformType.objects.get(pk=3)
last_route_point = None
if formset.is_valid():
# route = route_form.save()
for num, form in enumerate(formset, start=1):
route_point = form.save(commit=False)
route_point.latitude = route_point.route_platform.latitude
route_point.longitude = route_point.route_platform.longitude
route_point.route = route
if last_route_point:
last_route_point.next = route_point
route_point.prev = last_route_point
# first iteration
if num == 1:
route_point.route_platform_type = platform_endpoint
# last iteration
if num == len(formset):
route_point.route_platform_type = platform_endpoint
route_point.save()
last_route_point = route_point
return redirect('admin_panel_cabinet')
return render(request, template_name, {
'routes': routes,
'formset': formset,
})
| from django.contrib.auth.decorators import login_required
from django.forms.models import model_to_dict
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from purple_admin.forms import RouteForm, RoutePlatformForm, PlatformTypeForm, BusModelForm, RoutePlatformFormset, \
RouteSelectForm
from route.models import PlatformType, Route, RoutePlatform, BusModel, RoutePoint
@login_required
def cabinet(request):
return render(request, 'admin_panel/cabinet.html')
@login_required
def cabinet_delete(request, pk, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'ts': BusModel
# 'flat_type': RealEstateFlatTypeModel,
}
Model = models_by_type[type]
objects = get_object_or_404(Model, pk=pk)
objects.delete()
if request.is_ajax():
return JsonResponse({})
return redirect('admin_panel_' + Model.model_type + '_list')
@login_required
def cabinet_add(request, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'route_platform_type': PlatformType,
'ts': BusModel,
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
'route_platform_type': PlatformTypeForm,
'ts': BusModelForm,
}
template_by_type = {
'route': 'cabinet_form.html',
'route_platform': 'cabinet_map_form.html',
'route_platform_type': '_form.html',
}
Model = models_by_type[type]
Form = form_by_type[type]
template = template_by_type.get(type, 'cabinet_form.html')
if request.method == 'POST':
form = Form(request.POST or None, request.FILES or None)
if form.is_valid():
config = form.save()
if request.is_ajax():
return JsonResponse({"id": config.id, "name": config.name})
return redirect('admin_panel_' + Model.model_type + '_list')
else:
form = Form()
context = {
'form': form,
'description': Model.model_description,
'model_type': type,
}
return render(request, 'admin_panel/' + template, context=context)
@login_required
def cabinet_list(request, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'ts': BusModel,
# 'flat_type': RealEstateFlatTypeModel,
}
model = models_by_type[type]
objects = model.objects.all()
context = {
'objects': objects,
'model_type': type,
}
return render(request, 'admin_panel/cabinet_list.html', context=context)
@login_required
def cabinet_edit(request, pk, type):
models_by_type = {
'route': Route,
'route_platform': RoutePlatform,
'route_platform_type': PlatformType,
'ts': BusModel,
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
'route_platform_type': PlatformTypeForm,
'ts': BusModelForm,
}
template_by_type = {
'route': 'cabinet_form.html',
'route_platform': 'cabinet_map_form.html',
'route_platform_type': '_form.html',
}
Model = models_by_type[type]
Form = form_by_type[type]
template = template_by_type.get(type, 'cabinet_form.html')
objects = get_object_or_404(Model, pk=pk)
if request.method == 'POST':
form = Form(request.POST or None, request.FILES or None, instance=objects)
if form.is_valid():
config = form.save()
return redirect('admin_panel_' + Model.model_type + '_list')
else:
form = Form(instance=objects)
context = {
'form': form,
'description': Model.model_description,
'model_type': type,
}
return render(request, 'admin_panel/' + template, context=context)
@login_required
def ajax_add(request, type):
# models_by_type = {
# 'object': RealEstateObjectModel,
# 'object_type': RealEstateObjectTypeModel,
# 'flat_type': RealEstateFlatTypeModel,
# }
form_template_type = {
'route': 'forms/_route_form.html',
'route_platform': '_map_form.html',
# 'flat_type': '_flat_form.html',
}
form_by_type = {
'route': RouteForm,
'route_platform': RoutePlatformForm,
# 'flat_type': RealEstateFlatTypeForm,
}
# Model = models_by_type[type]
Form = form_by_type[type]
template = form_template_type[type]
# type = 'realestate' if type == 'flat_type' else type
if request.is_ajax():
if request.method == 'POST':
form = Form(request.POST, request.FILES)
if form.is_valid():
form = form.save()
return JsonResponse(model_to_dict(form))
else:
form = Form()
else:
return
else:
if request.method == 'POST':
form = Form(request.POST, request.FILES)
if form.is_valid():
form = form.save()
# form.create_relations_by_string_ids(request.POST.get('flats', None))
return redirect('admin_panel_' + type + '_list')
return redirect('admin_panel_cabinet')
context = {
'form': form,
'model_type': Form.Meta.model.model_type,
'model_description': Form.Meta.model.model_description,
}
return render(request, 'admin_panel/' + template, context=context)
def mapped_route_add(request):
"""
Страница конструктора, которая позволит задавать последовательность остановок,
Далее, открыть карту и построить промежуточные маршруты,
Подтвердить и исходранить маршрут в БД.
:param request:
:return: Template of constructor
"""
template_name = 'admin_panel/mapper_route_add.html'
if request.method == 'GET':
# route_form = RouteSelectForm(request.GET or None)
routes = Route.objects.all()
formset = RoutePlatformFormset(queryset=RoutePoint.objects.none())
elif request.method == 'POST':
# route_form = RouteSelectForm(request.POST)
route_id = request.POST.get('route')
route = get_object_or_404(Route, pk=route_id)
formset = RoutePlatformFormset(request.POST)
platform = PlatformType.objects.get(pk=1)
platform_endpoint = PlatformType.objects.get(pk=3)
last_route_point = None
if formset.is_valid():
# route = route_form.save()
for num, form in enumerate(formset, start=1):
route_point = form.save(commit=False)
route_point.latitude = route_point.route_platform.latitude
route_point.longitude = route_point.route_platform.longitude
route_point.route = route
if last_route_point:
last_route_point.next = route_point
route_point.prev = last_route_point
# first iteration
if num == 1:
route_point.route_platform_type = platform_endpoint
# last iteration
if num == len(formset):
route_point.route_platform_type = platform_endpoint
route_point.save()
last_route_point = route_point
return redirect('admin_panel_cabinet')
return render(request, template_name, {
'routes': routes,
'formset': formset,
}) | ru | 0.275457 | # 'flat_type': RealEstateFlatTypeModel, # 'flat_type': RealEstateFlatTypeModel, # models_by_type = { # 'object': RealEstateObjectModel, # 'object_type': RealEstateObjectTypeModel, # 'flat_type': RealEstateFlatTypeModel, # } # 'flat_type': '_flat_form.html', # 'flat_type': RealEstateFlatTypeForm, # Model = models_by_type[type] # type = 'realestate' if type == 'flat_type' else type # form.create_relations_by_string_ids(request.POST.get('flats', None)) Страница конструктора, которая позволит задавать последовательность остановок, Далее, открыть карту и построить промежуточные маршруты, Подтвердить и исходранить маршрут в БД. :param request: :return: Template of constructor # route_form = RouteSelectForm(request.GET or None) # route_form = RouteSelectForm(request.POST) # route = route_form.save() # first iteration # last iteration | 2.037829 | 2 |
src/statue/exceptions.py | cclauss/statue | 8 | 6618993 | """Exceptions module."""
class StatueException(Exception):
"""Exceptions base for Statue."""
class EmptyConfiguration(StatueException):
"""Configuration must be set."""
def __init__(self) -> None:
"""Exception constructor."""
super().__init__("Statue configuration is empty!")
class InvalidStatueConfiguration(StatueException):
"""User-Defined Statue configuration is invalid."""
class MissingConfiguration(InvalidStatueConfiguration):
"""Part of the Statue configuration is missing."""
def __init__(self, part_name: str) -> None:
"""
Exception constructor.
:param part_name: The missing part from the configuration
:type part_name: str
"""
super().__init__(f'"{part_name}" is missing from Statue configuration.')
class UnknownCommand(StatueException):
"""Command isn't recognized."""
def __init__(self, command_name: str) -> None:
"""
Exception constructor.
:param command_name: Name of the unfound command
:type command_name: str
"""
super().__init__(f'Could not find command named "{command_name}".')
class InvalidCommand(StatueException):
"""Command doesn't fit restrictions."""
class UnknownContext(StatueException):
"""Context isn't recognized."""
def __init__(self, context_name: str) -> None:
"""
Exception constructor.
:param context_name: Name of the unfound context
:type context_name: str
"""
super().__init__(f'Could not find context named "{context_name}".')
class CommandExecutionError(StatueException):
"""Command cannot be executed."""
def __init__(self, command_name: str) -> None:
"""
Exception constructor.
:param command_name: Command name
:type command_name: str
"""
super().__init__(
f'Cannot execute "{command_name}" because it is not installed.'
)
| """Exceptions module."""
class StatueException(Exception):
"""Exceptions base for Statue."""
class EmptyConfiguration(StatueException):
"""Configuration must be set."""
def __init__(self) -> None:
"""Exception constructor."""
super().__init__("Statue configuration is empty!")
class InvalidStatueConfiguration(StatueException):
"""User-Defined Statue configuration is invalid."""
class MissingConfiguration(InvalidStatueConfiguration):
"""Part of the Statue configuration is missing."""
def __init__(self, part_name: str) -> None:
"""
Exception constructor.
:param part_name: The missing part from the configuration
:type part_name: str
"""
super().__init__(f'"{part_name}" is missing from Statue configuration.')
class UnknownCommand(StatueException):
"""Command isn't recognized."""
def __init__(self, command_name: str) -> None:
"""
Exception constructor.
:param command_name: Name of the unfound command
:type command_name: str
"""
super().__init__(f'Could not find command named "{command_name}".')
class InvalidCommand(StatueException):
"""Command doesn't fit restrictions."""
class UnknownContext(StatueException):
"""Context isn't recognized."""
def __init__(self, context_name: str) -> None:
"""
Exception constructor.
:param context_name: Name of the unfound context
:type context_name: str
"""
super().__init__(f'Could not find context named "{context_name}".')
class CommandExecutionError(StatueException):
"""Command cannot be executed."""
def __init__(self, command_name: str) -> None:
"""
Exception constructor.
:param command_name: Command name
:type command_name: str
"""
super().__init__(
f'Cannot execute "{command_name}" because it is not installed.'
)
| en | 0.806004 | Exceptions module. Exceptions base for Statue. Configuration must be set. Exception constructor. User-Defined Statue configuration is invalid. Part of the Statue configuration is missing. Exception constructor. :param part_name: The missing part from the configuration :type part_name: str Command isn't recognized. Exception constructor. :param command_name: Name of the unfound command :type command_name: str Command doesn't fit restrictions. Context isn't recognized. Exception constructor. :param context_name: Name of the unfound context :type context_name: str Command cannot be executed. Exception constructor. :param command_name: Command name :type command_name: str | 2.88543 | 3 |
Project/Project/urls.py | IsaacDSC/PagDjango | 0 | 6618994 | <filename>Project/Project/urls.py
from django.contrib import admin
from django.urls import include, path
from CRUD_App.views import (
home,
insert_products,
admProducts,
editing,
searchEdit,
upload,
contact_us,
list_contact_us,
register,
# login,
# register,
)
urlpatterns = [
path('admin/', admin.site.urls, name = 'admin'),
path('accounts/', include('allauth.urls'), name = 'accounts'),
path('', home, name = 'home'),
path('insert/', insert_products, name = 'insert_products'),
path('edit/', admProducts, name = 'admEdit'),
path('searchEdit/', searchEdit, name = 'searchEdit'),
path('editing/', editing, name = 'editing'),
path('upload/', upload, name = 'upload'),
path('contact/', contact_us, name = 'contact'),
path('contacts/', list_contact_us, name = 'list_contacts'),
path('accounts/signup/', register, name = 'signup')
]
#accounts/signup/
#{% url 'account_logout' %} sair
#{% url 'account_login' %} login
#{% url 'account_signup' %} register URL = /accounts/signup/ | <filename>Project/Project/urls.py
from django.contrib import admin
from django.urls import include, path
from CRUD_App.views import (
home,
insert_products,
admProducts,
editing,
searchEdit,
upload,
contact_us,
list_contact_us,
register,
# login,
# register,
)
urlpatterns = [
path('admin/', admin.site.urls, name = 'admin'),
path('accounts/', include('allauth.urls'), name = 'accounts'),
path('', home, name = 'home'),
path('insert/', insert_products, name = 'insert_products'),
path('edit/', admProducts, name = 'admEdit'),
path('searchEdit/', searchEdit, name = 'searchEdit'),
path('editing/', editing, name = 'editing'),
path('upload/', upload, name = 'upload'),
path('contact/', contact_us, name = 'contact'),
path('contacts/', list_contact_us, name = 'list_contacts'),
path('accounts/signup/', register, name = 'signup')
]
#accounts/signup/
#{% url 'account_logout' %} sair
#{% url 'account_login' %} login
#{% url 'account_signup' %} register URL = /accounts/signup/ | en | 0.211901 | # login, # register, #accounts/signup/ #{% url 'account_logout' %} sair #{% url 'account_login' %} login #{% url 'account_signup' %} register URL = /accounts/signup/ | 1.948629 | 2 |
open_connect/media/tests/test_tasks.py | lpatmo/actionify_the_news | 66 | 6618995 | <gh_stars>10-100
"""Media app task tests"""
from django.test import TestCase
from mock import Mock, patch
from open_connect.media import tasks
from open_connect.media.tests import get_in_memory_image_file
@patch.object(tasks, 'import_image')
class ProcessImageTest(TestCase):
"""Tests for image processing tasks"""
def test_process_image(self, mock):
"""Testing for process_image task"""
image_mock = Mock()
image_mock.image.read.return_value = get_in_memory_image_file().read()
image_model = Mock()
image_model.objects.get.return_value = image_mock
mock.return_value = image_model
tasks.process_image(image_id=1)
self.assertEqual(image_mock.create_display_size.call_count, 1)
self.assertEqual(image_mock.create_thumbnail.call_count, 1)
self.assertEqual(image_mock.process_exif_data.call_count, 1)
| """Media app task tests"""
from django.test import TestCase
from mock import Mock, patch
from open_connect.media import tasks
from open_connect.media.tests import get_in_memory_image_file
@patch.object(tasks, 'import_image')
class ProcessImageTest(TestCase):
"""Tests for image processing tasks"""
def test_process_image(self, mock):
"""Testing for process_image task"""
image_mock = Mock()
image_mock.image.read.return_value = get_in_memory_image_file().read()
image_model = Mock()
image_model.objects.get.return_value = image_mock
mock.return_value = image_model
tasks.process_image(image_id=1)
self.assertEqual(image_mock.create_display_size.call_count, 1)
self.assertEqual(image_mock.create_thumbnail.call_count, 1)
self.assertEqual(image_mock.process_exif_data.call_count, 1) | en | 0.822341 | Media app task tests Tests for image processing tasks Testing for process_image task | 2.377855 | 2 |
sourcecode/MSG_GAN/utils/iter_utils.py | jacobwjs/BBMSG-GAN | 45 | 6618996 | """ Utilities related to python iterator """
class hn_wrapper:
"""
Wrapper around an iterator which implements the safe
has_next functionality.
args:
it: iterator object
"""
def __init__(self, it):
self.it = iter(it)
self._hasnext = None
def __iter__(self):
return self
def __next__(self):
if self._hasnext:
result = self._thenext
else:
result = next(self.it)
self._hasnext = None
return result
def hasnext(self):
if self._hasnext is None:
try:
self._thenext = next(self.it)
except StopIteration:
self._hasnext = False
else:
self._hasnext = True
return self._hasnext
| """ Utilities related to python iterator """
class hn_wrapper:
"""
Wrapper around an iterator which implements the safe
has_next functionality.
args:
it: iterator object
"""
def __init__(self, it):
self.it = iter(it)
self._hasnext = None
def __iter__(self):
return self
def __next__(self):
if self._hasnext:
result = self._thenext
else:
result = next(self.it)
self._hasnext = None
return result
def hasnext(self):
if self._hasnext is None:
try:
self._thenext = next(self.it)
except StopIteration:
self._hasnext = False
else:
self._hasnext = True
return self._hasnext
| en | 0.714834 | Utilities related to python iterator Wrapper around an iterator which implements the safe has_next functionality. args: it: iterator object | 3.271681 | 3 |
tests/analysis_tests.py | JoaoLages/ecco | 1,391 | 6618997 | from ecco import analysis
import pytest
import numpy as np
shape = (100, 1000)
np.random.seed(seed=1)
@pytest.fixture
def acts():
acts1 = np.random.randn(*shape)
acts2 = np.random.randn(*shape)
yield acts1, acts2
class TestAnalysis:
def test_cca_smoke(self, acts):
actual = analysis.cca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_svcca_smoke(self, acts):
actual = analysis.svcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_pwcca_smoke(self, acts):
actual = analysis.pwcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_cka_smoke(self, acts):
actual = analysis.cka(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_linear_transformation(self, acts):
acts_1 = acts[0]
acts_2 = acts_1 * 10
assert pytest.approx(analysis.cca(acts_1, acts_2), 1.0), "CCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.svcca(acts_1, acts_2), 1.0), "SVCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.pwcca(acts_1, acts_2), 1.0), "PWCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.cka(acts_1, acts_2), 1.0), "CKA of linear transformation is approx 1.0"
| from ecco import analysis
import pytest
import numpy as np
shape = (100, 1000)
np.random.seed(seed=1)
@pytest.fixture
def acts():
acts1 = np.random.randn(*shape)
acts2 = np.random.randn(*shape)
yield acts1, acts2
class TestAnalysis:
def test_cca_smoke(self, acts):
actual = analysis.cca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_svcca_smoke(self, acts):
actual = analysis.svcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_pwcca_smoke(self, acts):
actual = analysis.pwcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_cka_smoke(self, acts):
actual = analysis.cka(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_linear_transformation(self, acts):
acts_1 = acts[0]
acts_2 = acts_1 * 10
assert pytest.approx(analysis.cca(acts_1, acts_2), 1.0), "CCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.svcca(acts_1, acts_2), 1.0), "SVCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.pwcca(acts_1, acts_2), 1.0), "PWCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.cka(acts_1, acts_2), 1.0), "CKA of linear transformation is approx 1.0"
| none | 1 | 2.512781 | 3 | |
src/modules/auth/core.py | tomsaudrins/api-service | 3 | 6618998 | <reponame>tomsaudrins/api-service<gh_stars>1-10
import jwt
from fastapi.security import OAuth2PasswordBearer, SecurityScopes
from fastapi import Depends, HTTPException, status
from pydantic import ValidationError
from datetime import datetime, timedelta
from passlib.context import CryptContext
# Local packages
from src.modules.mysql import DBConnection
from src.settings.envvariables import Settings
class Auth:
# Used by adding token: str = Depends(Auth.validate_token) as a parameter
# FastAPI token schema
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl="token",
)
# Decrypts the token
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Validate the user before calling an endpoint
async def validate_token(
security_scopes: SecurityScopes, token: str = Depends(oauth2_scheme)
):
# Token type (Bearer)
authenticate_value = f"Bearer"
# Exception to aise when token is invalid
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid token",
headers={"WWW-Authenticate": authenticate_value},
)
# Validate the token by finding the user and generating the token
try:
payload = jwt.decode(token, Settings().JWT_SECRET, algorithms="HS256")
email: str = payload.get("email")
token_expires_datetime = datetime.strptime(
payload.get("expires"), "%Y-%m-%d %H:%M:%S"
)
if token_expires_datetime < datetime.now() or email is None:
raise credentials_exception
except (jwt.PyJWTError, ValidationError):
raise credentials_exception
# Find the user in the database
user = DBConnection().find_user_by_email(email=email)
# If use doesn't exist raise exception
if user is None:
raise credentials_exception
return user
# Create a token to authenticate the user
def create_token(user, expires_delta=1440):
to_encode = user.copy()
del to_encode["password"]
to_encode.update(
{
"expires": (
datetime.now() + timedelta(minutes=expires_delta)
).__str__()[0:-7]
}
)
token = jwt.encode(to_encode, Settings().JWT_SECRET, algorithm="HS256")
return dict(access_token=token, token_type="bearer")
| import jwt
from fastapi.security import OAuth2PasswordBearer, SecurityScopes
from fastapi import Depends, HTTPException, status
from pydantic import ValidationError
from datetime import datetime, timedelta
from passlib.context import CryptContext
# Local packages
from src.modules.mysql import DBConnection
from src.settings.envvariables import Settings
class Auth:
# Used by adding token: str = Depends(Auth.validate_token) as a parameter
# FastAPI token schema
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl="token",
)
# Decrypts the token
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Validate the user before calling an endpoint
async def validate_token(
security_scopes: SecurityScopes, token: str = Depends(oauth2_scheme)
):
# Token type (Bearer)
authenticate_value = f"Bearer"
# Exception to aise when token is invalid
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid token",
headers={"WWW-Authenticate": authenticate_value},
)
# Validate the token by finding the user and generating the token
try:
payload = jwt.decode(token, Settings().JWT_SECRET, algorithms="HS256")
email: str = payload.get("email")
token_expires_datetime = datetime.strptime(
payload.get("expires"), "%Y-%m-%d %H:%M:%S"
)
if token_expires_datetime < datetime.now() or email is None:
raise credentials_exception
except (jwt.PyJWTError, ValidationError):
raise credentials_exception
# Find the user in the database
user = DBConnection().find_user_by_email(email=email)
# If use doesn't exist raise exception
if user is None:
raise credentials_exception
return user
# Create a token to authenticate the user
def create_token(user, expires_delta=1440):
to_encode = user.copy()
del to_encode["password"]
to_encode.update(
{
"expires": (
datetime.now() + timedelta(minutes=expires_delta)
).__str__()[0:-7]
}
)
token = jwt.encode(to_encode, Settings().JWT_SECRET, algorithm="HS256")
return dict(access_token=token, token_type="bearer") | en | 0.660222 | # Local packages # Used by adding token: str = Depends(Auth.validate_token) as a parameter # FastAPI token schema # Decrypts the token # Validate the user before calling an endpoint # Token type (Bearer) # Exception to aise when token is invalid # Validate the token by finding the user and generating the token # Find the user in the database # If use doesn't exist raise exception # Create a token to authenticate the user | 2.732903 | 3 |
plaything/models.py | sellonen/django-security-tips | 66 | 6618999 | <reponame>sellonen/django-security-tips
from django.db import models
from django.db import connection
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.hashers import make_password
from django.utils.crypto import get_random_string
class IntegerTuple(models.Model):
first = models.IntegerField(default=2)
second = models.IntegerField(default=4)
third = models.IntegerField(default=6)
class CustomUser(AbstractUser):
def make_random_password(self):
length = 35
allowed_chars='abcdefghjkmnpqrstuvwxyz' + 'ABCDEFGHJKLMNPQRSTUVWXYZ' + '23456789'
return get_random_string(length, allowed_chars)
def save(self, *args, **kwargs):
update_pw = ('update_fields' not in kwargs or 'password' in kwargs['update_fields']) and '$' in self.password
if update_pw:
algo, iterations, salt, pw_hash = self.password.split('$', 3)
# self.password should be unique anyway for get_session_auth_hash()
self.password = self.<PASSWORD>()
super(CustomUser, self).save(*args, **kwargs)
if update_pw:
cursor = connection.cursor()
cursor.execute("SELECT auth_schema.insert_or_update_password(%d, '%s', '%s');" % (self.id, salt, pw_hash))
return
def check_password(self, raw_password):
cursor = connection.cursor()
cursor.execute("SELECT auth_schema.get_salt(%d);" % self.id)
salt = cursor.fetchone()[0]
algo, iterations, salt, pw_hash = make_password(raw_password, salt=salt).split('$', 3)
cursor.execute("SELECT auth_schema.check_password(%d, '%s');" % (self.id, pw_hash))
pw_correct = cursor.fetchone()[0]
return bool(pw_correct)
| from django.db import models
from django.db import connection
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.hashers import make_password
from django.utils.crypto import get_random_string
class IntegerTuple(models.Model):
first = models.IntegerField(default=2)
second = models.IntegerField(default=4)
third = models.IntegerField(default=6)
class CustomUser(AbstractUser):
def make_random_password(self):
length = 35
allowed_chars='abcdefghjkmnpqrstuvwxyz' + 'ABCDEFGHJKLMNPQRSTUVWXYZ' + '23456789'
return get_random_string(length, allowed_chars)
def save(self, *args, **kwargs):
update_pw = ('update_fields' not in kwargs or 'password' in kwargs['update_fields']) and '$' in self.password
if update_pw:
algo, iterations, salt, pw_hash = self.password.split('$', 3)
# self.password should be unique anyway for get_session_auth_hash()
self.password = self.<PASSWORD>()
super(CustomUser, self).save(*args, **kwargs)
if update_pw:
cursor = connection.cursor()
cursor.execute("SELECT auth_schema.insert_or_update_password(%d, '%s', '%s');" % (self.id, salt, pw_hash))
return
def check_password(self, raw_password):
cursor = connection.cursor()
cursor.execute("SELECT auth_schema.get_salt(%d);" % self.id)
salt = cursor.fetchone()[0]
algo, iterations, salt, pw_hash = make_password(raw_password, salt=salt).split('$', 3)
cursor.execute("SELECT auth_schema.check_password(%d, '%s');" % (self.id, pw_hash))
pw_correct = cursor.fetchone()[0]
return bool(pw_correct) | en | 0.491746 | # self.password should be unique anyway for get_session_auth_hash() | 2.586565 | 3 |
Python/d036_spotify_songs_v_popularity/song_popularity.py | yashaslokesh/100-Days-Of-Code | 7 | 6619000 | <reponame>yashaslokesh/100-Days-Of-Code
import json
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("/Users/lokeshkrishnappa/Desktop/python-projects/100-Days-Of-Code/Python/d034_spotify_song_lengths")
import spotify_authorize as sa
def get_playlist(user : str, oauth) -> str:
response = oauth.get(f"https://api.spotify.com/v1/users/{user}/playlists?offset=0")
data = json.loads(response.text)
print("\nFormat:\nPlaylist ID -> Playlist Title\n")
playlists = [f'{item["id"]} -> {item["name"]}' for item in data["items"]]
next_query = data["next"]
while next_query != None:
response = oauth.get(next_query)
data = json.loads(response.text)
playlists += [f'{item["id"]} -> {item["name"]}' for item in data["items"]]
next_query = data["next"]
print('\n'.join(playlists) + "\n")
playlist_id = input("Enter the playlist ID for corresponding to the playlist title that you want to analyze: ")
return playlist_id.strip()
def get_stats(user, playlist_id, oauth):
response = oauth.get(f"https://api.spotify.com/v1/users/{user}/playlists/{playlist_id}/tracks?offset=0")
data = json.loads(response.text)
lengths = [item["track"]["duration_ms"] for item in data["items"]]
popularities = [item["track"]["popularity"] for item in data["items"]]
next_query = data["next"]
while next_query != None:
response = oauth.get(next_query)
data = json.loads(response.text)
lengths += [item["track"]["duration_ms"] for item in data["items"]]
popularities += [item["track"]["popularity"] for item in data["items"]]
next_query = data["next"]
print(f"Total # of songs: {len(lengths)}")
lengths = np.divide(lengths, 1000)
print(f"Longest song length: {max(lengths)}")
max_pop = max(popularities)
min_pop = min(popularities)
# Map numbers from min_pop to max_pop range to the 0-100 range for relative popularities
popularities = np.multiply(np.subtract(popularities, min_pop), 100/(max_pop - min_pop))
# low2 + (value - low1) * (high2 - low2) / (high1 - low1)
return lengths, popularities
def plot_results(song_lengths : list, popularities : list):
plt.figure(figsize=(10,10))
plt.scatter(song_lengths, popularities, marker='.', c='c')
plt.axis([min(song_lengths), max(song_lengths), 0, 100])
plt.xlabel('Length')
plt.ylabel('Popularity')
plt.title('Song length vs. popularity')
# plt.axis()
plt.show()
def main():
oauth = sa.authorize()
input()
user = input("Enter desired user to retrieve their public playlists: ")
playlist_id = get_playlist(user, oauth)
song_lengths, popularities = get_stats(user, playlist_id, oauth)
plot_results(song_lengths, popularities)
if __name__ == '__main__':
main() | import json
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("/Users/lokeshkrishnappa/Desktop/python-projects/100-Days-Of-Code/Python/d034_spotify_song_lengths")
import spotify_authorize as sa
def get_playlist(user : str, oauth) -> str:
response = oauth.get(f"https://api.spotify.com/v1/users/{user}/playlists?offset=0")
data = json.loads(response.text)
print("\nFormat:\nPlaylist ID -> Playlist Title\n")
playlists = [f'{item["id"]} -> {item["name"]}' for item in data["items"]]
next_query = data["next"]
while next_query != None:
response = oauth.get(next_query)
data = json.loads(response.text)
playlists += [f'{item["id"]} -> {item["name"]}' for item in data["items"]]
next_query = data["next"]
print('\n'.join(playlists) + "\n")
playlist_id = input("Enter the playlist ID for corresponding to the playlist title that you want to analyze: ")
return playlist_id.strip()
def get_stats(user, playlist_id, oauth):
response = oauth.get(f"https://api.spotify.com/v1/users/{user}/playlists/{playlist_id}/tracks?offset=0")
data = json.loads(response.text)
lengths = [item["track"]["duration_ms"] for item in data["items"]]
popularities = [item["track"]["popularity"] for item in data["items"]]
next_query = data["next"]
while next_query != None:
response = oauth.get(next_query)
data = json.loads(response.text)
lengths += [item["track"]["duration_ms"] for item in data["items"]]
popularities += [item["track"]["popularity"] for item in data["items"]]
next_query = data["next"]
print(f"Total # of songs: {len(lengths)}")
lengths = np.divide(lengths, 1000)
print(f"Longest song length: {max(lengths)}")
max_pop = max(popularities)
min_pop = min(popularities)
# Map numbers from min_pop to max_pop range to the 0-100 range for relative popularities
popularities = np.multiply(np.subtract(popularities, min_pop), 100/(max_pop - min_pop))
# low2 + (value - low1) * (high2 - low2) / (high1 - low1)
return lengths, popularities
def plot_results(song_lengths : list, popularities : list):
plt.figure(figsize=(10,10))
plt.scatter(song_lengths, popularities, marker='.', c='c')
plt.axis([min(song_lengths), max(song_lengths), 0, 100])
plt.xlabel('Length')
plt.ylabel('Popularity')
plt.title('Song length vs. popularity')
# plt.axis()
plt.show()
def main():
oauth = sa.authorize()
input()
user = input("Enter desired user to retrieve their public playlists: ")
playlist_id = get_playlist(user, oauth)
song_lengths, popularities = get_stats(user, playlist_id, oauth)
plot_results(song_lengths, popularities)
if __name__ == '__main__':
main() | en | 0.657693 | # of songs: {len(lengths)}") # Map numbers from min_pop to max_pop range to the 0-100 range for relative popularities # low2 + (value - low1) * (high2 - low2) / (high1 - low1) # plt.axis() | 3.405526 | 3 |
src/metaerg/data_model.py | kinestetika/MetaErg | 0 | 6619001 | import re
from pathlib import Path
from enum import Enum, auto
from collections import Counter
from metaerg.run_and_read import subsystems_data
class DBentry:
def __init__(self, *, domain: str, descr: str, taxon: str = '', ncbi: str='', gene: str='', length: int=0,
pos: int=0):
self.domain = domain
self.descr = descr
self.taxon = taxon
self.ncbi = ncbi
self.gene = gene
self.length = length
self.pos = pos
def __iter__(self):
return ((k, v) for k, v in zip(('domain', 'descr', 'taxon', 'ncbi', 'gene', 'length', 'pos'),
(self.domain, self.descr, self.taxon, self.ncbi, self.gene, self.length, self.pos)))
def __repr__(self):
return '{}({})'.format(type(self).__name__, ', '.join(f'{k}={v:!r}' for k, v in self if v))
def __len__(self):
return self.length
def taxon_at_genus(self) -> str:
for t in reversed(self.taxon.split("; ")):
if " " not in t:
return t
return ''
class BlastHit:
def __init__(self, query: str, hit: DBentry, percent_id: float, aligned_length: int, mismatches: int, gaps: int,
query_start: int, query_end: int, hit_start: int, hit_end: int, evalue: float, score: float):
self.query = query
self.hit = hit
self.percent_id = percent_id
self.aligned_length = aligned_length
self.mismatches = mismatches
self.gaps = gaps
self.query_start = query_start
self.query_end = query_end
self.hit_start = hit_start
self.hit_end = hit_end
self.evalue = evalue
self.score = score
def __repr__(self):
return '{}({!r}, {!r}, {.1f}, {}, {}, {}, {}, {}, {}, {}, {.1e}, {.1f})'.format(type(self).__name__,
self.query,
self.hit,
self.percent_id,
self.aligned_length,
self.mismatches,
self.gaps,
self.query_start,
self.query_end,
self.hit_start,
self.hit_end,
self.evalue,
self.score)
def __len__(self):
return self.aligned_length
class BlastResult:
def __init__(self, hits: tuple[BlastHit]):
self.hits = hits
if not len(hits):
raise Exception('Attempt to create empty blast result.')
def __iter__(self):
return self.hits.__iter__()
def __len__(self):
return len(self.hits)
def __repr__(self):
return '{}({})'.format(type(self).__name__, ',\n'.join(f'{h!r}' for h in self))
def query(self):
return self.hits[0].query
def percent_aligned(self) -> float:
return 100 * len(self.hits[0]) / len(self.hits[0].hit)
def percent_recall(self) -> float:
return 100 * sum((1 for h in self.hits[1:] if h.hit.descr == self.hits[0].hit.descr)) / len(self)
def summary(self) -> str:
identical_function_count = sum((1 for h in self.hits[1:] if h.hit.descr == self.hits[0].hit.descr))
return '[{}/{}] aa@{}% [{}/{}] {}'.format(len(self.hits[0]),
len(self.hits[0].hit),
self.hits[0].percent_id,
identical_function_count,
len(self),
self.hits[0].hit.descr)
class FeatureType(Enum):
CDS = auto()
rRNA = auto()
tRNA = auto()
tmRNA = auto()
ncRNA = auto()
repeat = auto()
crispr_repeat = auto()
retrotransposon = auto()
def __repr__(self):
return '{}[{!r}]'.format(type(self).__name__, self.name)
RNA_FEATURES = (FeatureType.rRNA, FeatureType.tRNA, FeatureType.tmRNA, FeatureType.ncRNA, FeatureType.retrotransposon)
class SeqFeature:
"""Describes a sequence feature, such as a gene."""
displayed_keys = 'start end strand type inference product taxon antismash transmembrane_helixes signal_peptide' \
'subsystem notes'.split()
def __init__(self, start: int, end: int, strand: int, type, inference: str, seq: str, id: str = '', descr: str = '',
taxon: str = '', antismash: str = '', transmembrane_helixes: str = '', signal_peptide: str = '',
cdd: BlastResult = None, blast: BlastResult = None, subsystem = None, notes = None):
self.start = start
self.end = end
self.strand = strand
self.type = type if isinstance(type, FeatureType) else FeatureType[type]
self.inference = inference
self.seq = ''.join(seq.split())
self.id = id
self.descr = descr
self.taxon = taxon
self.antismash = antismash
self.transmembrane_helixes = transmembrane_helixes
self.signal_peptide = signal_peptide
self.cdd = cdd
self.blast = blast
self.subsystem = subsystem if subsystem else set()
self.notes = notes if notes else set()
def __len__(self):
return self.end - self.start
def __iter__(self):
return ((k, v) for k, v in zip(('id', 'type', 'start', 'end', 'strand', 'descr', 'notes', 'taxon', 'inference',
'antismash', 'transmembrane_helixes', 'signal_peptide', 'subsystem', 'seq',
'cdd', 'blast'),
(self.id, self.type, self.start, self.end, self.strand, self.descr, self.notes, self.taxon,
self.inference, self.antismash, self.transmembrane_helixes, self.signal_peptide, self.subsystem,
self.seq, self.cdd, self.blast)))
def __repr__(self):
return '\n{}({})'.format(type(self).__name__, ',\n '.join(f'{k}={v!r}' for k, v in self if v))
def __lt__(self, other):
return self.start < other.start
def __gt__(self, other):
return self.start > other.start
def __eq__(self, other):
return self.start == other.start
def __le__(self, other):
return self.start <= other.start
def __ge__(self, other):
return self.start >= other.start
def __ne__(self, other):
return self.start != other.start
def tmh_count(self):
try:
return int(self.transmembrane_helixes.split()[0])
except ValueError:
return 0
def taxon_at_genus(self) -> str:
for t in reversed(self.taxon.split("; ")):
if " " not in t:
return t
return ''
class SubSystem:
def __init__(self, id: str, targets: [str] = None, hits = None):
self.id = id
self.targets = targets if targets else list()
self.hits = hits if hits else dict()
def __repr__(self):
return '{}({!r},{!r},{!r})'.format(type(self).__name__, self.id, self.targets, self.hits)
def add_hit(self, feature_id: str, target: str = 'none'):
self.hits.setdefault(feature_id, set()).add(target)
def get_hits(self, target):
return (k for k, v in self.hits.items() if target in v)
def get_stats(self):
if self.targets:
genes_present = len(set(self.hits.values()))
return genes_present, len(self.targets), genes_present / len(self.targets)
else:
return len(self.hits), 0, 1
class SubSystems:
def __init__(self, subsystems: dict[str, SubSystem] = None):
self.subsystems = {}
self.cues = {}
current_subsystem = None
for line in subsystems_data.subsystem_data().split('\n'):
line = line.strip()
if line.startswith("#") or not len(line):
continue
elif line.startswith(">"):
current_subsystem = SubSystem(line[1:])
self.subsystems[current_subsystem.id] = current_subsystem
elif current_subsystem is not None:
current_subsystem.targets.append(line)
self.cues[line] = current_subsystem
if subsystems:
self.subsystems = subsystems
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.subsystems)
def match(self, feature: SeqFeature, descriptions):
for d in descriptions:
for cue, subsystem in self.cues.items():
if len(d.descr) > len(cue) + 20:
continue
match = re.search(r'\b' + cue + r'\b', d)
if match and match.start() < 10:
subsystem.add_hit(feature.id, cue)
feature.subsystem.add(subsystem.id)
return True
return False
class SeqRecord:
def __init__(self, id: str, seq: str, descr: str = '', features: list[SeqFeature] = None):
self.id = id
self.seq = ''.join(seq.split())
self.descr = descr
self.features = features if features else list()
def __repr__(self):
seq_lines = (self.seq[i:i+80] for i in range(0, len(self.seq), 80))
return "{}(id={!r},descr={!r},features={!r},\nseq='''{}''')\n".format(type(self).__name__, self.id, self.descr,
self.features, '\n'.join(seq_lines))
def __len__(self):
return len(self.seq)
class Masker:
def __init__(self, mask=True, exceptions=None, min_length=50):
self.apply_mask = mask
self.exceptions = exceptions if exceptions else list()
self.min_length = min_length
self.nt_total = 0
self.nt_masked = 0
def mask(self, seq_record: SeqRecord) -> SeqRecord:
seq = seq_record.seq
seq_record.nt_masked = 0
if self.apply_mask:
for f in seq_record.features:
if f.inference not in self.exceptions and len(f) >= self.min_length:
seq = seq[:f.start] + 'N' * len(f) + seq[f.end:]
self.nt_masked += len(f)
self.nt_total += len(seq_record)
return SeqRecord(id=seq_record.id, descr=seq_record.descr, seq=seq)
# record.annotations['molecule_type'] = 'DNA'
def stats(self):
return f'Masked {self.nt_masked / max(self.nt_total, 1) * 100:.1f}% of sequence data.'
class Genome:
def __init__(self, id: str, contigs: dict[str, SeqRecord]=None, delimiter: str = '.',
translation_table: int = 11, properties: dict = None, subsystems: SubSystems = None):
self.id = id
self.contigs = contigs if contigs else dict()
self.delimiter = delimiter
self.translation_table = translation_table
self.properties = properties if properties else dict()
self.subsystems = subsystems if subsystems else SubSystems()
def __len__(self):
return sum(len(c) for c in self.contigs.values())
def __repr__(self):
return '{}(id={!r},\ndelimiter={!r},\ntranslation_table={!r},\n' \
'properties={!r},\nsubsystems={!r},\ncontigs={!r})\n'.format(type(self).__name__,
self.id,
self.delimiter,
self.translation_table,
self.properties,
self.subsystems,
self.contigs)
def validate_ids(self):
if self.delimiter in self.id:
raise Exception(f'Genome id {self.id} contains {self.delimiter}; change using --delimiter')
for c_id in self.contigs.keys():
if self.delimiter in c_id:
raise Exception(f'Contig id {c_id} contains {self.delimiter}; change using --delimiter')
def rename_contigs(self, mappings_file:Path):
i = 0
with open(mappings_file, 'w') as mapping_writer:
for c in self.contigs.values():
new_id = f'{self.id}.c{i:0>4}'
mapping_writer.write(f'{c.id}\t{new_id}\n')
c.id = new_id
i += 1
def generate_feature_ids(self):
f_id = 0
for c in self.contigs.values():
c.features.sort()
for f in c.features:
f.id = self.delimiter.join((self.id, c.id, f'{f_id:05d}'))
f_id += 1
def get_feature(self, feature_id):
id = feature_id.split(self.delimiter)
return self.contigs[id[1]].features[int(id[2])]
def compute_properties(self):
self.properties['size'] = len(self)
self.properties['percent GC'] = int(sum((c.seq.count('G') + c.seq.count('G') for c in
self.contigs.values())) / self.properties['size'] + 0.5)
cum_size = 0
for contig in sorted(self.contigs.values(), key=len, reverse=True):
cum_size += len(contig)
if cum_size >+ self.properties['size'] / 2:
self.properties["N50"] = len(contig)
break
self.properties['#proteins'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.CDS)
self.properties['percent coding'] = int(sum(len(f) for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.CDS) / self.properties['size'] * 100 + 0.5)
self.properties['mean protein length (aa)'] = int(self.properties['percent coding'] * self.properties['size']
/ 3 / self.properties['#proteins'])
self.properties['#ribosomal RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.rRNA)
self.properties['#transfer RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.tRNA)
self.properties['#non coding RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.ncRNA)
self.properties['#retrotransposons'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.retrotransposon)
self.properties['#CRISPR repeats'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.crispr_repeat)
self.properties['#other repeats'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.repeat)
self.properties['percent repeats'] = int(100 * sum(len(f) for contig in self.contigs.values() for f in
contig.features if f.type in (FeatureType.repeat,
FeatureType.retrotransposon, FeatureType.crispr_repeat))
/ self.properties['size'] + 0.5)
self.properties['total # features'] = sum(len(contig.features) for contig in self.contigs.values())
taxon_counts = Counter()
taxon_counts.update(f.taxon for contig in self.contigs.values() for f in contig.features)
dominant_taxon, highest_count = taxon_counts.most_common(1)[0]
self.properties['dominant taxon'] = f'{dominant_taxon} ({highest_count/sum(taxon_counts.values()) * 100:.1f}%)'
return self.properties | import re
from pathlib import Path
from enum import Enum, auto
from collections import Counter
from metaerg.run_and_read import subsystems_data
class DBentry:
def __init__(self, *, domain: str, descr: str, taxon: str = '', ncbi: str='', gene: str='', length: int=0,
pos: int=0):
self.domain = domain
self.descr = descr
self.taxon = taxon
self.ncbi = ncbi
self.gene = gene
self.length = length
self.pos = pos
def __iter__(self):
return ((k, v) for k, v in zip(('domain', 'descr', 'taxon', 'ncbi', 'gene', 'length', 'pos'),
(self.domain, self.descr, self.taxon, self.ncbi, self.gene, self.length, self.pos)))
def __repr__(self):
return '{}({})'.format(type(self).__name__, ', '.join(f'{k}={v:!r}' for k, v in self if v))
def __len__(self):
return self.length
def taxon_at_genus(self) -> str:
for t in reversed(self.taxon.split("; ")):
if " " not in t:
return t
return ''
class BlastHit:
def __init__(self, query: str, hit: DBentry, percent_id: float, aligned_length: int, mismatches: int, gaps: int,
query_start: int, query_end: int, hit_start: int, hit_end: int, evalue: float, score: float):
self.query = query
self.hit = hit
self.percent_id = percent_id
self.aligned_length = aligned_length
self.mismatches = mismatches
self.gaps = gaps
self.query_start = query_start
self.query_end = query_end
self.hit_start = hit_start
self.hit_end = hit_end
self.evalue = evalue
self.score = score
def __repr__(self):
return '{}({!r}, {!r}, {.1f}, {}, {}, {}, {}, {}, {}, {}, {.1e}, {.1f})'.format(type(self).__name__,
self.query,
self.hit,
self.percent_id,
self.aligned_length,
self.mismatches,
self.gaps,
self.query_start,
self.query_end,
self.hit_start,
self.hit_end,
self.evalue,
self.score)
def __len__(self):
return self.aligned_length
class BlastResult:
def __init__(self, hits: tuple[BlastHit]):
self.hits = hits
if not len(hits):
raise Exception('Attempt to create empty blast result.')
def __iter__(self):
return self.hits.__iter__()
def __len__(self):
return len(self.hits)
def __repr__(self):
return '{}({})'.format(type(self).__name__, ',\n'.join(f'{h!r}' for h in self))
def query(self):
return self.hits[0].query
def percent_aligned(self) -> float:
return 100 * len(self.hits[0]) / len(self.hits[0].hit)
def percent_recall(self) -> float:
return 100 * sum((1 for h in self.hits[1:] if h.hit.descr == self.hits[0].hit.descr)) / len(self)
def summary(self) -> str:
identical_function_count = sum((1 for h in self.hits[1:] if h.hit.descr == self.hits[0].hit.descr))
return '[{}/{}] aa@{}% [{}/{}] {}'.format(len(self.hits[0]),
len(self.hits[0].hit),
self.hits[0].percent_id,
identical_function_count,
len(self),
self.hits[0].hit.descr)
class FeatureType(Enum):
CDS = auto()
rRNA = auto()
tRNA = auto()
tmRNA = auto()
ncRNA = auto()
repeat = auto()
crispr_repeat = auto()
retrotransposon = auto()
def __repr__(self):
return '{}[{!r}]'.format(type(self).__name__, self.name)
RNA_FEATURES = (FeatureType.rRNA, FeatureType.tRNA, FeatureType.tmRNA, FeatureType.ncRNA, FeatureType.retrotransposon)
class SeqFeature:
"""Describes a sequence feature, such as a gene."""
displayed_keys = 'start end strand type inference product taxon antismash transmembrane_helixes signal_peptide' \
'subsystem notes'.split()
def __init__(self, start: int, end: int, strand: int, type, inference: str, seq: str, id: str = '', descr: str = '',
taxon: str = '', antismash: str = '', transmembrane_helixes: str = '', signal_peptide: str = '',
cdd: BlastResult = None, blast: BlastResult = None, subsystem = None, notes = None):
self.start = start
self.end = end
self.strand = strand
self.type = type if isinstance(type, FeatureType) else FeatureType[type]
self.inference = inference
self.seq = ''.join(seq.split())
self.id = id
self.descr = descr
self.taxon = taxon
self.antismash = antismash
self.transmembrane_helixes = transmembrane_helixes
self.signal_peptide = signal_peptide
self.cdd = cdd
self.blast = blast
self.subsystem = subsystem if subsystem else set()
self.notes = notes if notes else set()
def __len__(self):
return self.end - self.start
def __iter__(self):
return ((k, v) for k, v in zip(('id', 'type', 'start', 'end', 'strand', 'descr', 'notes', 'taxon', 'inference',
'antismash', 'transmembrane_helixes', 'signal_peptide', 'subsystem', 'seq',
'cdd', 'blast'),
(self.id, self.type, self.start, self.end, self.strand, self.descr, self.notes, self.taxon,
self.inference, self.antismash, self.transmembrane_helixes, self.signal_peptide, self.subsystem,
self.seq, self.cdd, self.blast)))
def __repr__(self):
return '\n{}({})'.format(type(self).__name__, ',\n '.join(f'{k}={v!r}' for k, v in self if v))
def __lt__(self, other):
return self.start < other.start
def __gt__(self, other):
return self.start > other.start
def __eq__(self, other):
return self.start == other.start
def __le__(self, other):
return self.start <= other.start
def __ge__(self, other):
return self.start >= other.start
def __ne__(self, other):
return self.start != other.start
def tmh_count(self):
try:
return int(self.transmembrane_helixes.split()[0])
except ValueError:
return 0
def taxon_at_genus(self) -> str:
for t in reversed(self.taxon.split("; ")):
if " " not in t:
return t
return ''
class SubSystem:
def __init__(self, id: str, targets: [str] = None, hits = None):
self.id = id
self.targets = targets if targets else list()
self.hits = hits if hits else dict()
def __repr__(self):
return '{}({!r},{!r},{!r})'.format(type(self).__name__, self.id, self.targets, self.hits)
def add_hit(self, feature_id: str, target: str = 'none'):
self.hits.setdefault(feature_id, set()).add(target)
def get_hits(self, target):
return (k for k, v in self.hits.items() if target in v)
def get_stats(self):
if self.targets:
genes_present = len(set(self.hits.values()))
return genes_present, len(self.targets), genes_present / len(self.targets)
else:
return len(self.hits), 0, 1
class SubSystems:
def __init__(self, subsystems: dict[str, SubSystem] = None):
self.subsystems = {}
self.cues = {}
current_subsystem = None
for line in subsystems_data.subsystem_data().split('\n'):
line = line.strip()
if line.startswith("#") or not len(line):
continue
elif line.startswith(">"):
current_subsystem = SubSystem(line[1:])
self.subsystems[current_subsystem.id] = current_subsystem
elif current_subsystem is not None:
current_subsystem.targets.append(line)
self.cues[line] = current_subsystem
if subsystems:
self.subsystems = subsystems
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.subsystems)
def match(self, feature: SeqFeature, descriptions):
for d in descriptions:
for cue, subsystem in self.cues.items():
if len(d.descr) > len(cue) + 20:
continue
match = re.search(r'\b' + cue + r'\b', d)
if match and match.start() < 10:
subsystem.add_hit(feature.id, cue)
feature.subsystem.add(subsystem.id)
return True
return False
class SeqRecord:
def __init__(self, id: str, seq: str, descr: str = '', features: list[SeqFeature] = None):
self.id = id
self.seq = ''.join(seq.split())
self.descr = descr
self.features = features if features else list()
def __repr__(self):
seq_lines = (self.seq[i:i+80] for i in range(0, len(self.seq), 80))
return "{}(id={!r},descr={!r},features={!r},\nseq='''{}''')\n".format(type(self).__name__, self.id, self.descr,
self.features, '\n'.join(seq_lines))
def __len__(self):
return len(self.seq)
class Masker:
def __init__(self, mask=True, exceptions=None, min_length=50):
self.apply_mask = mask
self.exceptions = exceptions if exceptions else list()
self.min_length = min_length
self.nt_total = 0
self.nt_masked = 0
def mask(self, seq_record: SeqRecord) -> SeqRecord:
seq = seq_record.seq
seq_record.nt_masked = 0
if self.apply_mask:
for f in seq_record.features:
if f.inference not in self.exceptions and len(f) >= self.min_length:
seq = seq[:f.start] + 'N' * len(f) + seq[f.end:]
self.nt_masked += len(f)
self.nt_total += len(seq_record)
return SeqRecord(id=seq_record.id, descr=seq_record.descr, seq=seq)
# record.annotations['molecule_type'] = 'DNA'
def stats(self):
return f'Masked {self.nt_masked / max(self.nt_total, 1) * 100:.1f}% of sequence data.'
class Genome:
def __init__(self, id: str, contigs: dict[str, SeqRecord]=None, delimiter: str = '.',
translation_table: int = 11, properties: dict = None, subsystems: SubSystems = None):
self.id = id
self.contigs = contigs if contigs else dict()
self.delimiter = delimiter
self.translation_table = translation_table
self.properties = properties if properties else dict()
self.subsystems = subsystems if subsystems else SubSystems()
def __len__(self):
return sum(len(c) for c in self.contigs.values())
def __repr__(self):
return '{}(id={!r},\ndelimiter={!r},\ntranslation_table={!r},\n' \
'properties={!r},\nsubsystems={!r},\ncontigs={!r})\n'.format(type(self).__name__,
self.id,
self.delimiter,
self.translation_table,
self.properties,
self.subsystems,
self.contigs)
def validate_ids(self):
if self.delimiter in self.id:
raise Exception(f'Genome id {self.id} contains {self.delimiter}; change using --delimiter')
for c_id in self.contigs.keys():
if self.delimiter in c_id:
raise Exception(f'Contig id {c_id} contains {self.delimiter}; change using --delimiter')
def rename_contigs(self, mappings_file:Path):
i = 0
with open(mappings_file, 'w') as mapping_writer:
for c in self.contigs.values():
new_id = f'{self.id}.c{i:0>4}'
mapping_writer.write(f'{c.id}\t{new_id}\n')
c.id = new_id
i += 1
def generate_feature_ids(self):
f_id = 0
for c in self.contigs.values():
c.features.sort()
for f in c.features:
f.id = self.delimiter.join((self.id, c.id, f'{f_id:05d}'))
f_id += 1
def get_feature(self, feature_id):
id = feature_id.split(self.delimiter)
return self.contigs[id[1]].features[int(id[2])]
def compute_properties(self):
self.properties['size'] = len(self)
self.properties['percent GC'] = int(sum((c.seq.count('G') + c.seq.count('G') for c in
self.contigs.values())) / self.properties['size'] + 0.5)
cum_size = 0
for contig in sorted(self.contigs.values(), key=len, reverse=True):
cum_size += len(contig)
if cum_size >+ self.properties['size'] / 2:
self.properties["N50"] = len(contig)
break
self.properties['#proteins'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.CDS)
self.properties['percent coding'] = int(sum(len(f) for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.CDS) / self.properties['size'] * 100 + 0.5)
self.properties['mean protein length (aa)'] = int(self.properties['percent coding'] * self.properties['size']
/ 3 / self.properties['#proteins'])
self.properties['#ribosomal RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.rRNA)
self.properties['#transfer RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.tRNA)
self.properties['#non coding RNA'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.ncRNA)
self.properties['#retrotransposons'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.retrotransposon)
self.properties['#CRISPR repeats'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.crispr_repeat)
self.properties['#other repeats'] = sum(1 for contig in self.contigs.values() for f in contig.features
if f.type == FeatureType.repeat)
self.properties['percent repeats'] = int(100 * sum(len(f) for contig in self.contigs.values() for f in
contig.features if f.type in (FeatureType.repeat,
FeatureType.retrotransposon, FeatureType.crispr_repeat))
/ self.properties['size'] + 0.5)
self.properties['total # features'] = sum(len(contig.features) for contig in self.contigs.values())
taxon_counts = Counter()
taxon_counts.update(f.taxon for contig in self.contigs.values() for f in contig.features)
dominant_taxon, highest_count = taxon_counts.most_common(1)[0]
self.properties['dominant taxon'] = f'{dominant_taxon} ({highest_count/sum(taxon_counts.values()) * 100:.1f}%)'
return self.properties | en | 0.547589 | Describes a sequence feature, such as a gene. {} # record.annotations['molecule_type'] = 'DNA' # features'] = sum(len(contig.features) for contig in self.contigs.values()) | 2.203886 | 2 |
terrascript/junos/r.py | mjuenema/python-terrascript | 507 | 6619002 | # terrascript/junos/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class junos_aggregate_route(terrascript.Resource):
pass
class junos_application(terrascript.Resource):
pass
class junos_application_set(terrascript.Resource):
pass
class junos_bgp_group(terrascript.Resource):
pass
class junos_bgp_neighbor(terrascript.Resource):
pass
class junos_bridge_domain(terrascript.Resource):
pass
class junos_chassis_cluster(terrascript.Resource):
pass
class junos_eventoptions_destination(terrascript.Resource):
pass
class junos_eventoptions_generate_event(terrascript.Resource):
pass
class junos_eventoptions_policy(terrascript.Resource):
pass
class junos_evpn(terrascript.Resource):
pass
class junos_firewall_filter(terrascript.Resource):
pass
class junos_firewall_policer(terrascript.Resource):
pass
class junos_forwardingoptions_sampling_instance(terrascript.Resource):
pass
class junos_generate_route(terrascript.Resource):
pass
class junos_group_dual_system(terrascript.Resource):
pass
class junos_interface(terrascript.Resource):
pass
class junos_interface_logical(terrascript.Resource):
pass
class junos_interface_physical(terrascript.Resource):
pass
class junos_interface_st0_unit(terrascript.Resource):
pass
class junos_null_commit_file(terrascript.Resource):
pass
class junos_ospf(terrascript.Resource):
pass
class junos_ospf_area(terrascript.Resource):
pass
class junos_policyoptions_as_path(terrascript.Resource):
pass
class junos_policyoptions_as_path_group(terrascript.Resource):
pass
class junos_policyoptions_community(terrascript.Resource):
pass
class junos_policyoptions_policy_statement(terrascript.Resource):
pass
class junos_policyoptions_prefix_list(terrascript.Resource):
pass
class junos_rib_group(terrascript.Resource):
pass
class junos_routing_instance(terrascript.Resource):
pass
class junos_routing_options(terrascript.Resource):
pass
class junos_security(terrascript.Resource):
pass
class junos_security_address_book(terrascript.Resource):
pass
class junos_security_dynamic_address_feed_server(terrascript.Resource):
pass
class junos_security_dynamic_address_name(terrascript.Resource):
pass
class junos_security_global_policy(terrascript.Resource):
pass
class junos_security_idp_custom_attack(terrascript.Resource):
pass
class junos_security_idp_custom_attack_group(terrascript.Resource):
pass
class junos_security_idp_policy(terrascript.Resource):
pass
class junos_security_ike_gateway(terrascript.Resource):
pass
class junos_security_ike_policy(terrascript.Resource):
pass
class junos_security_ike_proposal(terrascript.Resource):
pass
class junos_security_ipsec_policy(terrascript.Resource):
pass
class junos_security_ipsec_proposal(terrascript.Resource):
pass
class junos_security_ipsec_vpn(terrascript.Resource):
pass
class junos_security_log_stream(terrascript.Resource):
pass
class junos_security_nat_destination(terrascript.Resource):
pass
class junos_security_nat_destination_pool(terrascript.Resource):
pass
class junos_security_nat_source(terrascript.Resource):
pass
class junos_security_nat_source_pool(terrascript.Resource):
pass
class junos_security_nat_static(terrascript.Resource):
pass
class junos_security_policy(terrascript.Resource):
pass
class junos_security_policy_tunnel_pair_policy(terrascript.Resource):
pass
class junos_security_screen(terrascript.Resource):
pass
class junos_security_screen_whitelist(terrascript.Resource):
pass
class junos_security_utm_custom_url_category(terrascript.Resource):
pass
class junos_security_utm_custom_url_pattern(terrascript.Resource):
pass
class junos_security_utm_policy(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_juniper_enhanced(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_juniper_local(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_websense_redirect(terrascript.Resource):
pass
class junos_security_zone(terrascript.Resource):
pass
class junos_security_zone_book_address(terrascript.Resource):
pass
class junos_security_zone_book_address_set(terrascript.Resource):
pass
class junos_services(terrascript.Resource):
pass
class junos_services_advanced_anti_malware_policy(terrascript.Resource):
pass
class junos_services_flowmonitoring_vipfix_template(terrascript.Resource):
pass
class junos_services_proxy_profile(terrascript.Resource):
pass
class junos_services_rpm_probe(terrascript.Resource):
pass
class junos_services_security_intelligence_policy(terrascript.Resource):
pass
class junos_services_security_intelligence_profile(terrascript.Resource):
pass
class junos_services_ssl_initiation_profile(terrascript.Resource):
pass
class junos_services_user_identification_ad_access_domain(terrascript.Resource):
pass
class junos_services_user_identification_device_identity_profile(terrascript.Resource):
pass
class junos_snmp(terrascript.Resource):
pass
class junos_snmp_clientlist(terrascript.Resource):
pass
class junos_snmp_community(terrascript.Resource):
pass
class junos_snmp_view(terrascript.Resource):
pass
class junos_static_route(terrascript.Resource):
pass
class junos_switch_options(terrascript.Resource):
pass
class junos_system(terrascript.Resource):
pass
class junos_system_login_class(terrascript.Resource):
pass
class junos_system_login_user(terrascript.Resource):
pass
class junos_system_ntp_server(terrascript.Resource):
pass
class junos_system_radius_server(terrascript.Resource):
pass
class junos_system_root_authentication(terrascript.Resource):
pass
class junos_system_syslog_file(terrascript.Resource):
pass
class junos_system_syslog_host(terrascript.Resource):
pass
class junos_vlan(terrascript.Resource):
pass
| # terrascript/junos/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class junos_aggregate_route(terrascript.Resource):
pass
class junos_application(terrascript.Resource):
pass
class junos_application_set(terrascript.Resource):
pass
class junos_bgp_group(terrascript.Resource):
pass
class junos_bgp_neighbor(terrascript.Resource):
pass
class junos_bridge_domain(terrascript.Resource):
pass
class junos_chassis_cluster(terrascript.Resource):
pass
class junos_eventoptions_destination(terrascript.Resource):
pass
class junos_eventoptions_generate_event(terrascript.Resource):
pass
class junos_eventoptions_policy(terrascript.Resource):
pass
class junos_evpn(terrascript.Resource):
pass
class junos_firewall_filter(terrascript.Resource):
pass
class junos_firewall_policer(terrascript.Resource):
pass
class junos_forwardingoptions_sampling_instance(terrascript.Resource):
pass
class junos_generate_route(terrascript.Resource):
pass
class junos_group_dual_system(terrascript.Resource):
pass
class junos_interface(terrascript.Resource):
pass
class junos_interface_logical(terrascript.Resource):
pass
class junos_interface_physical(terrascript.Resource):
pass
class junos_interface_st0_unit(terrascript.Resource):
pass
class junos_null_commit_file(terrascript.Resource):
pass
class junos_ospf(terrascript.Resource):
pass
class junos_ospf_area(terrascript.Resource):
pass
class junos_policyoptions_as_path(terrascript.Resource):
pass
class junos_policyoptions_as_path_group(terrascript.Resource):
pass
class junos_policyoptions_community(terrascript.Resource):
pass
class junos_policyoptions_policy_statement(terrascript.Resource):
pass
class junos_policyoptions_prefix_list(terrascript.Resource):
pass
class junos_rib_group(terrascript.Resource):
pass
class junos_routing_instance(terrascript.Resource):
pass
class junos_routing_options(terrascript.Resource):
pass
class junos_security(terrascript.Resource):
pass
class junos_security_address_book(terrascript.Resource):
pass
class junos_security_dynamic_address_feed_server(terrascript.Resource):
pass
class junos_security_dynamic_address_name(terrascript.Resource):
pass
class junos_security_global_policy(terrascript.Resource):
pass
class junos_security_idp_custom_attack(terrascript.Resource):
pass
class junos_security_idp_custom_attack_group(terrascript.Resource):
pass
class junos_security_idp_policy(terrascript.Resource):
pass
class junos_security_ike_gateway(terrascript.Resource):
pass
class junos_security_ike_policy(terrascript.Resource):
pass
class junos_security_ike_proposal(terrascript.Resource):
pass
class junos_security_ipsec_policy(terrascript.Resource):
pass
class junos_security_ipsec_proposal(terrascript.Resource):
pass
class junos_security_ipsec_vpn(terrascript.Resource):
pass
class junos_security_log_stream(terrascript.Resource):
pass
class junos_security_nat_destination(terrascript.Resource):
pass
class junos_security_nat_destination_pool(terrascript.Resource):
pass
class junos_security_nat_source(terrascript.Resource):
pass
class junos_security_nat_source_pool(terrascript.Resource):
pass
class junos_security_nat_static(terrascript.Resource):
pass
class junos_security_policy(terrascript.Resource):
pass
class junos_security_policy_tunnel_pair_policy(terrascript.Resource):
pass
class junos_security_screen(terrascript.Resource):
pass
class junos_security_screen_whitelist(terrascript.Resource):
pass
class junos_security_utm_custom_url_category(terrascript.Resource):
pass
class junos_security_utm_custom_url_pattern(terrascript.Resource):
pass
class junos_security_utm_policy(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_juniper_enhanced(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_juniper_local(terrascript.Resource):
pass
class junos_security_utm_profile_web_filtering_websense_redirect(terrascript.Resource):
pass
class junos_security_zone(terrascript.Resource):
pass
class junos_security_zone_book_address(terrascript.Resource):
pass
class junos_security_zone_book_address_set(terrascript.Resource):
pass
class junos_services(terrascript.Resource):
pass
class junos_services_advanced_anti_malware_policy(terrascript.Resource):
pass
class junos_services_flowmonitoring_vipfix_template(terrascript.Resource):
pass
class junos_services_proxy_profile(terrascript.Resource):
pass
class junos_services_rpm_probe(terrascript.Resource):
pass
class junos_services_security_intelligence_policy(terrascript.Resource):
pass
class junos_services_security_intelligence_profile(terrascript.Resource):
pass
class junos_services_ssl_initiation_profile(terrascript.Resource):
pass
class junos_services_user_identification_ad_access_domain(terrascript.Resource):
pass
class junos_services_user_identification_device_identity_profile(terrascript.Resource):
pass
class junos_snmp(terrascript.Resource):
pass
class junos_snmp_clientlist(terrascript.Resource):
pass
class junos_snmp_community(terrascript.Resource):
pass
class junos_snmp_view(terrascript.Resource):
pass
class junos_static_route(terrascript.Resource):
pass
class junos_switch_options(terrascript.Resource):
pass
class junos_system(terrascript.Resource):
pass
class junos_system_login_class(terrascript.Resource):
pass
class junos_system_login_user(terrascript.Resource):
pass
class junos_system_ntp_server(terrascript.Resource):
pass
class junos_system_radius_server(terrascript.Resource):
pass
class junos_system_root_authentication(terrascript.Resource):
pass
class junos_system_syslog_file(terrascript.Resource):
pass
class junos_system_syslog_host(terrascript.Resource):
pass
class junos_vlan(terrascript.Resource):
pass
| en | 0.675166 | # terrascript/junos/r.py # Automatically generated by tools/makecode.py () | 1.587863 | 2 |
app/lib/models/activity.py | FrankGrimm/omen | 4 | 6619003 | """
Model for generic activities (e.g. change events, comments).
"""
import logging
import json
from sqlalchemy import Column, Integer, String, desc, func, ForeignKey, and_, or_, not_
from sqlalchemy.orm import relationship
from sqlalchemy.types import DateTime
from app.lib.database_internals import Base
import app.lib.database as db
class Activity(Base):
__tablename__ = "activity"
event_id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey("users.uid"))
owner = relationship("User", lazy="joined")
created = Column(DateTime(timezone=True), server_default=func.now())
edited = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
# the target column encodes which item (dataset, sample, ...) this activity event refers to
target = Column(String, nullable=False)
# the scope may be used to restrict an activitiy, e.g. to switch a comment
# between public/curator-only visibility
scope = Column(String, nullable=False, default="")
content = Column(String, nullable=False)
def load_target(self, dbsession):
if self.target is None:
return None
if self.target.startswith(db.User.activity_prefix()):
return db.User.by_id(dbsession, int(self.target[len(db.User.activity_prefix()) :]), no_error=True)
if self.target.startswith(db.Dataset.activity_prefix()):
return db.Dataset.by_id(dbsession, int(self.target[len(db.Dataset.activity_prefix()) :]), no_error=True)
return "unknown target %s" % self.target
@staticmethod
def user_history(dbsession, owner, scope_in=None, limit=None):
qry = dbsession.query(Activity)
if owner is None:
raise Exception("Activity::user_history requires a non-null user object or ID")
if isinstance(owner, int):
owner = db.User.by_id(dbsession, owner)
user_target_filter = owner.activity_target()
other_accessible_datasets = db.datasets.accessible_datasets(
dbsession, owner, include_owned=False, has_role=["curator", "owner"]
)
other_accessible_datasets = [dataset.activity_target() for dsid, dataset in other_accessible_datasets.items()]
excluded_scopes = ["event", "upload_file"]
if isinstance(owner, db.User):
qry = qry.filter(
or_(
Activity.target == user_target_filter,
Activity.owner == owner,
and_(Activity.target.in_(other_accessible_datasets),
not_(Activity.scope == "comment_note"),
not_(Activity.scope == "rename_tag")),
)
)
else:
raise Exception("Activity::user_history requires the owner argument by of type User or int")
qry = qry.filter(not_(Activity.scope.in_(excluded_scopes)))
# # hides imports that did not affect the dataset
# if activity.scope == "import_complete" and \
# activity.content is not None and \
# activity.content == "total: 0, merged: 0, skipped: 0":
# continue
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(Activity.event_id.desc())
if limit is not None:
qry = qry.limit(limit)
return qry.all()
def formatted_create(self):
if self.created is None:
return None
return self.created.strftime("%Y-%m-%d")
@staticmethod
def for_user(dbsession, target_user, limit=20):
"""
Gather relevant activity elements to display in the feed on the homepage.
"""
user_history = Activity.user_history(dbsession, target_user, limit=limit)
result_history = []
# filter duplicate events of the same type
for activity in user_history:
if activity is None:
continue
if len(result_history) == 0:
result_history.append(activity)
continue
previous_activity = result_history[-1]
if (
previous_activity.owner == activity.owner
and previous_activity.scope == activity.scope
and previous_activity.target == activity.target
):
continue
result_history.append(activity)
result_history = [[activity, activity.load_target(dbsession)] for activity in result_history]
return result_history
@staticmethod
def by_owner(dbsession, owner, scope_in=None, limit=None):
qry = dbsession.query(Activity)
if owner is None:
raise Exception("Activity::by_owner requires a non-null user object or ID")
if isinstance(owner, db.User):
qry = qry.filter(Activity.owner == owner)
elif isinstance(owner, int):
qry = qry.filter(Activity.owner_id == owner.uid)
else:
raise Exception("Activity::by_owner requires the owner argument by of type User or int")
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(Activity.event_id.desc())
if limit is not None:
qry = qry.limit(limit)
return qry.all()
@staticmethod
def by_target(dbsession, target, scope_in=None, like_target=False):
qry = dbsession.query(Activity)
if like_target:
qry = qry.filter(Activity.target.like(target))
else:
qry = qry.filter_by(target=target)
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(desc(Activity.event_id))
return qry.all()
@staticmethod
def to_activity_target(target):
if target is None:
raise ValueError("target cannot be null")
if isinstance(target, str):
return target
try:
return target.activity_target()
except AttributeError:
return str(target)
@staticmethod
def create(dbsession, owner, target, scope, content):
target = Activity.to_activity_target(target)
if not isinstance(content, str):
content = json.dumps(content)
log_activity = Activity()
log_activity.owner = owner
log_activity.target = target
log_activity.scope = scope
log_activity.content = content
dbsession.add(log_activity)
logging.debug("activity created for target %s", target)
dbsession.flush()
return log_activity
def __str__(self):
return "[Activity #%s (%s) %s => %s]" % (self.event_id, self.owner, self.target, self.scope)
| """
Model for generic activities (e.g. change events, comments).
"""
import logging
import json
from sqlalchemy import Column, Integer, String, desc, func, ForeignKey, and_, or_, not_
from sqlalchemy.orm import relationship
from sqlalchemy.types import DateTime
from app.lib.database_internals import Base
import app.lib.database as db
class Activity(Base):
__tablename__ = "activity"
event_id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey("users.uid"))
owner = relationship("User", lazy="joined")
created = Column(DateTime(timezone=True), server_default=func.now())
edited = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
# the target column encodes which item (dataset, sample, ...) this activity event refers to
target = Column(String, nullable=False)
# the scope may be used to restrict an activitiy, e.g. to switch a comment
# between public/curator-only visibility
scope = Column(String, nullable=False, default="")
content = Column(String, nullable=False)
def load_target(self, dbsession):
if self.target is None:
return None
if self.target.startswith(db.User.activity_prefix()):
return db.User.by_id(dbsession, int(self.target[len(db.User.activity_prefix()) :]), no_error=True)
if self.target.startswith(db.Dataset.activity_prefix()):
return db.Dataset.by_id(dbsession, int(self.target[len(db.Dataset.activity_prefix()) :]), no_error=True)
return "unknown target %s" % self.target
@staticmethod
def user_history(dbsession, owner, scope_in=None, limit=None):
qry = dbsession.query(Activity)
if owner is None:
raise Exception("Activity::user_history requires a non-null user object or ID")
if isinstance(owner, int):
owner = db.User.by_id(dbsession, owner)
user_target_filter = owner.activity_target()
other_accessible_datasets = db.datasets.accessible_datasets(
dbsession, owner, include_owned=False, has_role=["curator", "owner"]
)
other_accessible_datasets = [dataset.activity_target() for dsid, dataset in other_accessible_datasets.items()]
excluded_scopes = ["event", "upload_file"]
if isinstance(owner, db.User):
qry = qry.filter(
or_(
Activity.target == user_target_filter,
Activity.owner == owner,
and_(Activity.target.in_(other_accessible_datasets),
not_(Activity.scope == "comment_note"),
not_(Activity.scope == "rename_tag")),
)
)
else:
raise Exception("Activity::user_history requires the owner argument by of type User or int")
qry = qry.filter(not_(Activity.scope.in_(excluded_scopes)))
# # hides imports that did not affect the dataset
# if activity.scope == "import_complete" and \
# activity.content is not None and \
# activity.content == "total: 0, merged: 0, skipped: 0":
# continue
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(Activity.event_id.desc())
if limit is not None:
qry = qry.limit(limit)
return qry.all()
def formatted_create(self):
if self.created is None:
return None
return self.created.strftime("%Y-%m-%d")
@staticmethod
def for_user(dbsession, target_user, limit=20):
"""
Gather relevant activity elements to display in the feed on the homepage.
"""
user_history = Activity.user_history(dbsession, target_user, limit=limit)
result_history = []
# filter duplicate events of the same type
for activity in user_history:
if activity is None:
continue
if len(result_history) == 0:
result_history.append(activity)
continue
previous_activity = result_history[-1]
if (
previous_activity.owner == activity.owner
and previous_activity.scope == activity.scope
and previous_activity.target == activity.target
):
continue
result_history.append(activity)
result_history = [[activity, activity.load_target(dbsession)] for activity in result_history]
return result_history
@staticmethod
def by_owner(dbsession, owner, scope_in=None, limit=None):
qry = dbsession.query(Activity)
if owner is None:
raise Exception("Activity::by_owner requires a non-null user object or ID")
if isinstance(owner, db.User):
qry = qry.filter(Activity.owner == owner)
elif isinstance(owner, int):
qry = qry.filter(Activity.owner_id == owner.uid)
else:
raise Exception("Activity::by_owner requires the owner argument by of type User or int")
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(Activity.event_id.desc())
if limit is not None:
qry = qry.limit(limit)
return qry.all()
@staticmethod
def by_target(dbsession, target, scope_in=None, like_target=False):
qry = dbsession.query(Activity)
if like_target:
qry = qry.filter(Activity.target.like(target))
else:
qry = qry.filter_by(target=target)
if scope_in is not None and len(scope_in) > 0:
qry = qry.filter(Activity.scope.in_(scope_in))
qry = qry.order_by(desc(Activity.event_id))
return qry.all()
@staticmethod
def to_activity_target(target):
if target is None:
raise ValueError("target cannot be null")
if isinstance(target, str):
return target
try:
return target.activity_target()
except AttributeError:
return str(target)
@staticmethod
def create(dbsession, owner, target, scope, content):
target = Activity.to_activity_target(target)
if not isinstance(content, str):
content = json.dumps(content)
log_activity = Activity()
log_activity.owner = owner
log_activity.target = target
log_activity.scope = scope
log_activity.content = content
dbsession.add(log_activity)
logging.debug("activity created for target %s", target)
dbsession.flush()
return log_activity
def __str__(self):
return "[Activity #%s (%s) %s => %s]" % (self.event_id, self.owner, self.target, self.scope)
| en | 0.770444 | Model for generic activities (e.g. change events, comments). # the target column encodes which item (dataset, sample, ...) this activity event refers to # the scope may be used to restrict an activitiy, e.g. to switch a comment # between public/curator-only visibility # # hides imports that did not affect the dataset # if activity.scope == "import_complete" and \ # activity.content is not None and \ # activity.content == "total: 0, merged: 0, skipped: 0": # continue Gather relevant activity elements to display in the feed on the homepage. # filter duplicate events of the same type #%s (%s) %s => %s]" % (self.event_id, self.owner, self.target, self.scope) | 2.501149 | 3 |
Code/data_structures/list/doubly_linked_list/doubly_linked_list.py | Kevinjadia/Hacktoberfest_DSA_2021 | 4 | 6619004 | <gh_stars>1-10
# Creating a node class
class Node:
def __init__(self, data):
self.data = data #adding an element to the node
self.next = None # Initally this node will not be linked with any other node
self.prev = None # It will not be linked in either direction
# Creating a doubly linked list class
class DoublyLinkedList:
def __init__(self):
self.head = None # Initally there are no elements in the list
self.tail = None
def push_front(self, new_data): # Adding an element before the first element
new_node = Node(new_data) # creating a new node with the desired value
new_node.next = self.head # newly created node's next pointer will refer to the old head
if self.head != None: # Checks whether list is empty or not
self.head.prev = new_node # old head's previous pointer will refer to newly created node
self.head = new_node # new node becomes the new head
new_node.prev = None
else: # If the list is empty, make new node both head and tail
self.head = new_node
self.tail = new_node
new_node.prev = None # There's only one element so both pointers refer to null
def push_back(self, new_data): # Adding an element after the last element
new_node = Node(new_data)
new_node.prev = self.tail
if self.tail == None: # checks whether the list is empty, if so make both head and tail as new node
self.head = new_node
self.tail = new_node
new_node.next = None # the first element's previous pointer has to refer to null
else: # If list is not empty, change pointers accordingly
self.tail.next = new_node
new_node.next = None
self.tail = new_node # Make new node the new tail
def peek_front(self): # returns first element
if self.head == None: # checks whether list is empty or not
print("List is empty")
else:
return self.head.data
def peek_back(self): # returns last element
if self.tail == None: # checks whether list is empty or not
print("List is empty")
else:
return self.tail.data
def pop_front(self): # removes and returns the first element
if self.head == None:
print("List is empty")
else:
temp = self.head
temp.next.prev = None # remove previous pointer referring to old head
self.head = temp.next # make second element the new head
temp.next = None # remove next pointer referring to new head
return temp.data
def pop_back(self): # removes and returns the last element
if self.tail == None:
print("List is empty")
else:
temp = self.tail
temp.prev.next = None # removes next pointer referring to old tail
self.tail = temp.prev # make second to last element the new tail
temp.prev = None # remove previous pointer referring to new tail
return temp.data
def insert_after(self, temp_node, new_data): # Inserting a new node after a given node
if temp_node == None:
print("Given node is empty")
if temp_node != None:
new_node = Node(new_data)
new_node.next = temp_node.next
temp_node.next = new_node
new_node.prev = temp_node
if new_node.next != None:
new_node.next.prev = new_node
if temp_node == self.tail: # checks whether new node is being added to the last element
self.tail = new_node # makes new node the new tail
def insert_before(self, temp_node, new_data): # Inserting a new node before a given node
if temp_node == None:
print("Given node is empty")
if temp_node != None:
new_node.prev = temp_node.prev
temp_node.prev = new_node
new_node.next = temp_node
if new_node.prev != None:
new_node.prev.next = new_node
if temp_node == self.head: # checks whether new node is being added before the first element
self.head = new_node # makes new node the new head | # Creating a node class
class Node:
def __init__(self, data):
self.data = data #adding an element to the node
self.next = None # Initally this node will not be linked with any other node
self.prev = None # It will not be linked in either direction
# Creating a doubly linked list class
class DoublyLinkedList:
def __init__(self):
self.head = None # Initally there are no elements in the list
self.tail = None
def push_front(self, new_data): # Adding an element before the first element
new_node = Node(new_data) # creating a new node with the desired value
new_node.next = self.head # newly created node's next pointer will refer to the old head
if self.head != None: # Checks whether list is empty or not
self.head.prev = new_node # old head's previous pointer will refer to newly created node
self.head = new_node # new node becomes the new head
new_node.prev = None
else: # If the list is empty, make new node both head and tail
self.head = new_node
self.tail = new_node
new_node.prev = None # There's only one element so both pointers refer to null
def push_back(self, new_data): # Adding an element after the last element
new_node = Node(new_data)
new_node.prev = self.tail
if self.tail == None: # checks whether the list is empty, if so make both head and tail as new node
self.head = new_node
self.tail = new_node
new_node.next = None # the first element's previous pointer has to refer to null
else: # If list is not empty, change pointers accordingly
self.tail.next = new_node
new_node.next = None
self.tail = new_node # Make new node the new tail
def peek_front(self): # returns first element
if self.head == None: # checks whether list is empty or not
print("List is empty")
else:
return self.head.data
def peek_back(self): # returns last element
if self.tail == None: # checks whether list is empty or not
print("List is empty")
else:
return self.tail.data
def pop_front(self): # removes and returns the first element
if self.head == None:
print("List is empty")
else:
temp = self.head
temp.next.prev = None # remove previous pointer referring to old head
self.head = temp.next # make second element the new head
temp.next = None # remove next pointer referring to new head
return temp.data
def pop_back(self): # removes and returns the last element
if self.tail == None:
print("List is empty")
else:
temp = self.tail
temp.prev.next = None # removes next pointer referring to old tail
self.tail = temp.prev # make second to last element the new tail
temp.prev = None # remove previous pointer referring to new tail
return temp.data
def insert_after(self, temp_node, new_data): # Inserting a new node after a given node
if temp_node == None:
print("Given node is empty")
if temp_node != None:
new_node = Node(new_data)
new_node.next = temp_node.next
temp_node.next = new_node
new_node.prev = temp_node
if new_node.next != None:
new_node.next.prev = new_node
if temp_node == self.tail: # checks whether new node is being added to the last element
self.tail = new_node # makes new node the new tail
def insert_before(self, temp_node, new_data): # Inserting a new node before a given node
if temp_node == None:
print("Given node is empty")
if temp_node != None:
new_node.prev = temp_node.prev
temp_node.prev = new_node
new_node.next = temp_node
if new_node.prev != None:
new_node.prev.next = new_node
if temp_node == self.head: # checks whether new node is being added before the first element
self.head = new_node # makes new node the new head | en | 0.86587 | # Creating a node class #adding an element to the node # Initally this node will not be linked with any other node # It will not be linked in either direction # Creating a doubly linked list class # Initally there are no elements in the list # Adding an element before the first element # creating a new node with the desired value # newly created node's next pointer will refer to the old head # Checks whether list is empty or not # old head's previous pointer will refer to newly created node # new node becomes the new head # If the list is empty, make new node both head and tail # There's only one element so both pointers refer to null # Adding an element after the last element # checks whether the list is empty, if so make both head and tail as new node # the first element's previous pointer has to refer to null # If list is not empty, change pointers accordingly # Make new node the new tail # returns first element # checks whether list is empty or not # returns last element # checks whether list is empty or not # removes and returns the first element # remove previous pointer referring to old head # make second element the new head # remove next pointer referring to new head # removes and returns the last element # removes next pointer referring to old tail # make second to last element the new tail # remove previous pointer referring to new tail # Inserting a new node after a given node # checks whether new node is being added to the last element # makes new node the new tail # Inserting a new node before a given node # checks whether new node is being added before the first element # makes new node the new head | 4.614446 | 5 |
archiv/management/commands/split_csv.py | acdh-oeaw/fwm | 0 | 6619005 | from django.core.management.base import BaseCommand
# imports for custom things
from tqdm import tqdm
import pandas as pd
SOURCE_FILE = './media/archiv/data/FWM_Daten.xlsx'
OUT_DIR = './archiv/data/'
class Command(BaseCommand):
help = "Splits Execl with multiple sheets into CSV files"
def handle(self, *args, **kwargs):
excel = pd.ExcelFile(SOURCE_FILE)
for x in tqdm(excel.sheet_names, total=len(excel.sheet_names)):
df = pd.read_excel(SOURCE_FILE, sheet_name=x)
df.to_csv(f'{OUT_DIR}/{x}.csv', index=False)
| from django.core.management.base import BaseCommand
# imports for custom things
from tqdm import tqdm
import pandas as pd
SOURCE_FILE = './media/archiv/data/FWM_Daten.xlsx'
OUT_DIR = './archiv/data/'
class Command(BaseCommand):
help = "Splits Execl with multiple sheets into CSV files"
def handle(self, *args, **kwargs):
excel = pd.ExcelFile(SOURCE_FILE)
for x in tqdm(excel.sheet_names, total=len(excel.sheet_names)):
df = pd.read_excel(SOURCE_FILE, sheet_name=x)
df.to_csv(f'{OUT_DIR}/{x}.csv', index=False)
| en | 0.671807 | # imports for custom things | 1.963908 | 2 |
EXC/CW1/task5/reducer.py | easyCZ/UoE-Projects | 0 | 6619006 | <filename>EXC/CW1/task5/reducer.py
#!/usr/bin/python
# reducer.py
import sys
from collections import Counter
from ast import literal_eval
counter = Counter()
counter_size = 0
last_key = ""
counter = None
def write(key, counter):
"""
Write data into stdout if we have iterated something
"""
if not last_key: return
for (second, value) in counter.iteritems():
print("{0}\t{1} {2}".format(value, key, second))
for line in sys.stdin:
line = line.strip()
key, values = line.split('\t', 1)
values = dict(literal_eval(values))
if key != last_key:
write(last_key, counter)
last_key = key
counter = Counter(values)
else:
counter.update(values)
write(last_key, counter) | <filename>EXC/CW1/task5/reducer.py
#!/usr/bin/python
# reducer.py
import sys
from collections import Counter
from ast import literal_eval
counter = Counter()
counter_size = 0
last_key = ""
counter = None
def write(key, counter):
"""
Write data into stdout if we have iterated something
"""
if not last_key: return
for (second, value) in counter.iteritems():
print("{0}\t{1} {2}".format(value, key, second))
for line in sys.stdin:
line = line.strip()
key, values = line.split('\t', 1)
values = dict(literal_eval(values))
if key != last_key:
write(last_key, counter)
last_key = key
counter = Counter(values)
else:
counter.update(values)
write(last_key, counter) | en | 0.799217 | #!/usr/bin/python # reducer.py Write data into stdout if we have iterated something | 3.047465 | 3 |
src/esclient.py | KaiPeng21/AWS-Serverless-ESML | 4 | 6619007 | <filename>src/esclient.py
import requests
from http import HTTPStatus
import json
class ESClientBase:
def __init__(self, host : str, port : int, index : str, doc_type : str, mapping : dict):
self._host = host
self._port = port
self._es_endpoint = f"{host}:{port}"
self._index = index
self._doc_type = doc_type
self._mapping = mapping
if self._es_endpoint[:4] != "http":
if self._port == 443:
self._es_endpoint = f"https://{self._es_endpoint}"
else:
self._es_endpoint = f"http://{self._es_endpoint}"
@property
def index(self):
return self._index
@property
def doc_type(self):
return self._doc_type
@property
def mapping(self):
return self._mapping
def put_index(self, ignore_exist_error=True) -> requests.Response:
""" Add an elasticsearch index by sending a put request
Keyword Arguments:
ignore_exist_error {bool} -- ignore index exist error (default: {True})
Returns:
requests.Response -- put index http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}")
if ignore_exist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.BAD_REQUEST]
else:
assert HTTPStatus.OK == res.status_code
return res
def delete_index(self, ignore_nonexist_error=True) -> requests.Response:
""" Delete an elasticsearch index by sending a delete request
Keyword Arguments:
ignore_nonexist_error {bool} -- ignore index not found error (default: {True})
Returns:
requests.Response -- delete index http response
"""
res = requests.delete(url=f"{self._es_endpoint}/{self._index}")
if ignore_nonexist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.NOT_FOUND]
else:
assert HTTPStatus.OK == res.status_code
return res
def put_mapping(self) -> requests.Response:
""" Add an elasticsearch mapping by sending a put request
Returns:
requests.Response -- put mapping http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}/_mapping/{self._doc_type}", json=self._mapping)
assert HTTPStatus.OK == res.status_code
return res
def get_document(self, pid : str) -> requests.Response:
""" Retrieve document by sending a get request
Arguments:
pid {str} -- primary id
Returns:
requests.Response -- get document http response
"""
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}")
assert HTTPStatus.OK == res.status_code
return res
def put_document(self, pid : str, document : dict) -> requests.Response:
""" Add document by sending a put request
Arguments:
pid {str} -- primary id
document {dict} -- document
Returns:
requests.Response -- put document http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}", json=document)
assert HTTPStatus.CREATED == res.status_code
return res
def put_document_bulk(self, pid_list : list, document_list : list) -> requests.Response:
""" Put multiple documents using batching
Arguments:
pid_list {list} -- list of primary ids
document_list {list} -- list of documents
Returns:
requests.Response -- put request http response
"""
assert len(pid_list) == len(document_list)
data_list = [
"\n".join([
json.dumps({ "create" : {"_id" : pid, "_type" : self._doc_type, "_index" : self._index} }),
json.dumps(document)
]) for pid, document in zip(pid_list, document_list)
]
data = "\n".join(data_list) + "\n"
headers = {"Content-Type": "application/x-ndjson"}
res = requests.post(url=f"{self._es_endpoint}/_bulk?pretty", data=data, headers=headers)
assert HTTPStatus.OK == res.status_code
return res
def delete_document(self, pid : str, ignore_nonexist_error=True) -> requests.Response:
""" Delete document by sending a delete request
Arguments:
pid {str} -- Primary id
Keyword Arguments:
ignore_nonexist_error {bool} -- ignore document not found error (default: {True})
Returns:
requests.Response -- delete request http response
"""
res = requests.delete(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}")
if ignore_nonexist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.NOT_FOUND]
else:
assert HTTPStatus.OK == res.status_code
return res
def delete_document_bulk(self, pid_list : list) -> requests.Response:
""" Delete multiple documents using batching
Arguments:
pid_list {list} -- list of primary ids
Returns:
requests.Response -- post request http response
"""
# TODO: Need Unittest to Verify If Functionalities are achieved
data_list = [
json.dumps({ "delete" : {"_id" : pid, "_type" : self._doc_type, "_index" : self._index} })
for pid in pid_list
]
data = "\n".join(data_list) + "\n"
headers = {"Content-Type": "application/x-ndjson"}
res = requests.post(url=f"{self._es_endpoint}/_bulk?pretty", data=data, headers=headers)
assert HTTPStatus.OK == res.status_code
return res
def delete_document_by_query(self, body : dict) -> requests.Response:
""" Delete queried document
Arguments:
body {dict} -- query body
Returns:
requests.Response -- http response
"""
res = self.search_document(body=body)
data = res.json()
if data["hits"]["total"] > 0:
pid_list = [document["_id"] for document in data["hits"]["hits"]]
return self.delete_document_bulk(pid_list=pid_list)
return res
def search_document(self, body : dict) -> requests.Response:
""" Search document in elasticsearch
Arguments:
body {dict} -- query body
Returns:
requests.Response -- search document http response
"""
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/_search", json=body)
return res
def query_all(self) -> requests.Response:
""" Select all elements in the index
Returns:
requests.Response -- search document http response
"""
query_param = {
"query" : {
"match_all" : {}
}
}
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/_search", json=query_param)
assert HTTPStatus.OK == res.status_code
return res
class TextfileDocument(ESClientBase):
def __init__(self, host : str = "http://localhost", port : int = 9200, aws_region : str = "us-east-1"):
self.aws_region = aws_region
index = "textfilesearch"
doc_type = "textfile"
mapping = {
"properties" : {
"title" : {
"type" : "text"
},
"extension" : {
"type" : "keyword"
},
"s3_url" : {
"type" : "text"
},
"filesize" : {
"type" : "integer"
},
"content" : {
"type" : "text"
}
}
}
return super().__init__(host, port, index, doc_type, mapping)
def create_pid(self, s3_tuple : tuple) -> str:
""" Get primary id from s3 bucket and object name
Arguments:
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
Returns:
str -- primary id
"""
return "-".join(s3_tuple[:2])
def create_doc_entry(self, title : str, extension : str, s3_tuple : tuple, content : str) -> dict:
""" Create document entry
Arguments:
title {str} -- file title
extension {str} -- file extension
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
content {str} -- document body
Returns:
dict -- textfile document
"""
return {
"title" : title,
"extension" : extension,
"filesize" : s3_tuple[2],
"s3_url" : f"https://s3.amazonaws.com/{s3_tuple[0]}/{s3_tuple[1]}",
"content" : content
}
def search_and_highlight_document(self, keywords : list, num_of_docs : int = 3, num_of_highlights : int = 3, highlight_fragment_size : int = 100) -> dict:
""" Search document by keywords and returns searched highlights
Arguments:
keywords {list} -- list of strings to be searched
Keyword Arguments:
num_of_docs {int} -- max number of searched document (default: {3})
num_of_highlights {int} -- number of highlight fragments (default: {3})
highlight_fragment_size {int} -- chars display per highlight fragment (default: {100})
Returns:
dict -- textfile document in the form of
{
"..." : ...,
"hits": {
"total": n,
"max_scoxre": x.xxxxxxx,
"hits": [
{
"_index" : "...",
"_type" : "...",
"_id" : "...",
"_score" : x.xxxxxxx,
"_source" : {...mapping...},
"highlight" : {
"content" : [xxx , xxx , xxx]
}
},
]
}
}
"""
body = {
"from" : 0,
"size" : num_of_docs,
"query" : {
"multi_match" : {
"query" : " ".join(keywords),
"fields" : ["content", "title"]
}
},
"highlight" : {
"number_of_fragments" : num_of_highlights,
"fragment_size" : highlight_fragment_size,
"fields" : {
"content" : {}
}
}
}
print(f"search and highlight using body: {body}")
res = self.search_document(body=body)
return res
class ImagefileDocument(ESClientBase):
def __init__(self, host : str = "http://localhost", port : int = 9200, aws_region : str = "us-east-1"):
self.aws_region = aws_region
index = "imagefilesearch"
doc_type = "imagefile"
mapping = {
"properties" : {
"extension" : {
"type" : "keyword"
},
"s3_url" : {
"type" : "text"
},
"filesize" : {
"type" : "integer"
},
"tags" : {
"type" : "text"
}
}
}
return super().__init__(host, port, index, doc_type, mapping)
def create_pid(self, s3_tuple : tuple) -> str:
""" Get primary id from s3 bucket and object name
Arguments:
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
Returns:
str -- primary id
"""
return "-".join(s3_tuple[:2])
def create_doc_entry(self, extension : str, s3_tuple : tuple, image_labels : list, image_texts : list, celebrities : list) -> dict:
""" Create document entry
Arguments:
extension {str} -- file extension
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
image_labels {list} -- list of image labels
image_texts {list} -- list of image texts
celebrities {list} -- list of celebrities in image
Returns:
dict -- document entry
"""
tags = image_labels
tags[0:0] = image_texts
tags[0:0] = celebrities
return {
"extension" : extension,
"filesize" : s3_tuple[2],
"s3_url" : f"https://s3.amazonaws.com/{s3_tuple[0]}/{s3_tuple[1]}",
"tags" : tags
}
def search_document_by_tags(self, tag_list : list, num_of_docs : int = 3) -> dict:
""" Search document by image tags (labels, text, celebrities)
Arguments:
tag_list {list} -- list of tags
Keyword Arguments:
num_of_docs {int} -- max number of searched document (default: {3})
Returns:
dict -- imagefile document in the form of
{
"..." : ...,
"hits": {
"total": n,
"max_scoxre": x.xxxxxxx,
"hits": [
{
"_index" : "...",
"_type" : "...",
"_id" : "...",
"_score" : x.xxxxxxx,
"_source" : {...mapping...}
},
]
}
}
"""
res = self.search_document(body={
"from" : 0,
"size" : num_of_docs,
"query" : {
"bool" : {
"should" : [
{
"match": {
"tags": tag
}
}
for tag in tag_list
]
}
}
})
return res
if __name__ == "__main__":
tx = TextfileDocument()
tx.put_index()
tx.put_mapping()
im = ImagefileDocument()
im.put_index()
im.put_mapping()
pid_list = range(3)
document_list = [
tx.create_doc_entry(
title="test_pdf.pdf",
extension="pdf",
s3_tuple=("bucket", "test_pdf.pdf", 1024),
content="This is a dummy PDF"
),
tx.create_doc_entry(
title="amazon.pdf",
extension="pdf",
s3_tuple=("bucket", "amazon.pdf", 2048),
content="Amazon.com, Inc. is located in Seattle, WA and was founded July 5th, 1994 by <NAME>, allowing customers to buy everything from books to blenders. Seattle is north of Portland and south of Vancouver, BC. Other notable Seattle - based companies are Starbucks and Boeing."
),
tx.create_doc_entry(
title="test_hello.pdf",
extension="pdf",
s3_tuple=("bucket", "test_hello.pdf", 100),
content="Hello world"
)
]
tx.put_document_bulk([1, 2, 3], document_list)
| <filename>src/esclient.py
import requests
from http import HTTPStatus
import json
class ESClientBase:
def __init__(self, host : str, port : int, index : str, doc_type : str, mapping : dict):
self._host = host
self._port = port
self._es_endpoint = f"{host}:{port}"
self._index = index
self._doc_type = doc_type
self._mapping = mapping
if self._es_endpoint[:4] != "http":
if self._port == 443:
self._es_endpoint = f"https://{self._es_endpoint}"
else:
self._es_endpoint = f"http://{self._es_endpoint}"
@property
def index(self):
return self._index
@property
def doc_type(self):
return self._doc_type
@property
def mapping(self):
return self._mapping
def put_index(self, ignore_exist_error=True) -> requests.Response:
""" Add an elasticsearch index by sending a put request
Keyword Arguments:
ignore_exist_error {bool} -- ignore index exist error (default: {True})
Returns:
requests.Response -- put index http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}")
if ignore_exist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.BAD_REQUEST]
else:
assert HTTPStatus.OK == res.status_code
return res
def delete_index(self, ignore_nonexist_error=True) -> requests.Response:
""" Delete an elasticsearch index by sending a delete request
Keyword Arguments:
ignore_nonexist_error {bool} -- ignore index not found error (default: {True})
Returns:
requests.Response -- delete index http response
"""
res = requests.delete(url=f"{self._es_endpoint}/{self._index}")
if ignore_nonexist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.NOT_FOUND]
else:
assert HTTPStatus.OK == res.status_code
return res
def put_mapping(self) -> requests.Response:
""" Add an elasticsearch mapping by sending a put request
Returns:
requests.Response -- put mapping http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}/_mapping/{self._doc_type}", json=self._mapping)
assert HTTPStatus.OK == res.status_code
return res
def get_document(self, pid : str) -> requests.Response:
""" Retrieve document by sending a get request
Arguments:
pid {str} -- primary id
Returns:
requests.Response -- get document http response
"""
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}")
assert HTTPStatus.OK == res.status_code
return res
def put_document(self, pid : str, document : dict) -> requests.Response:
""" Add document by sending a put request
Arguments:
pid {str} -- primary id
document {dict} -- document
Returns:
requests.Response -- put document http response
"""
res = requests.put(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}", json=document)
assert HTTPStatus.CREATED == res.status_code
return res
def put_document_bulk(self, pid_list : list, document_list : list) -> requests.Response:
""" Put multiple documents using batching
Arguments:
pid_list {list} -- list of primary ids
document_list {list} -- list of documents
Returns:
requests.Response -- put request http response
"""
assert len(pid_list) == len(document_list)
data_list = [
"\n".join([
json.dumps({ "create" : {"_id" : pid, "_type" : self._doc_type, "_index" : self._index} }),
json.dumps(document)
]) for pid, document in zip(pid_list, document_list)
]
data = "\n".join(data_list) + "\n"
headers = {"Content-Type": "application/x-ndjson"}
res = requests.post(url=f"{self._es_endpoint}/_bulk?pretty", data=data, headers=headers)
assert HTTPStatus.OK == res.status_code
return res
def delete_document(self, pid : str, ignore_nonexist_error=True) -> requests.Response:
""" Delete document by sending a delete request
Arguments:
pid {str} -- Primary id
Keyword Arguments:
ignore_nonexist_error {bool} -- ignore document not found error (default: {True})
Returns:
requests.Response -- delete request http response
"""
res = requests.delete(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/{pid}")
if ignore_nonexist_error:
assert res.status_code in [HTTPStatus.OK, HTTPStatus.NOT_FOUND]
else:
assert HTTPStatus.OK == res.status_code
return res
def delete_document_bulk(self, pid_list : list) -> requests.Response:
""" Delete multiple documents using batching
Arguments:
pid_list {list} -- list of primary ids
Returns:
requests.Response -- post request http response
"""
# TODO: Need Unittest to Verify If Functionalities are achieved
data_list = [
json.dumps({ "delete" : {"_id" : pid, "_type" : self._doc_type, "_index" : self._index} })
for pid in pid_list
]
data = "\n".join(data_list) + "\n"
headers = {"Content-Type": "application/x-ndjson"}
res = requests.post(url=f"{self._es_endpoint}/_bulk?pretty", data=data, headers=headers)
assert HTTPStatus.OK == res.status_code
return res
def delete_document_by_query(self, body : dict) -> requests.Response:
""" Delete queried document
Arguments:
body {dict} -- query body
Returns:
requests.Response -- http response
"""
res = self.search_document(body=body)
data = res.json()
if data["hits"]["total"] > 0:
pid_list = [document["_id"] for document in data["hits"]["hits"]]
return self.delete_document_bulk(pid_list=pid_list)
return res
def search_document(self, body : dict) -> requests.Response:
""" Search document in elasticsearch
Arguments:
body {dict} -- query body
Returns:
requests.Response -- search document http response
"""
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/_search", json=body)
return res
def query_all(self) -> requests.Response:
""" Select all elements in the index
Returns:
requests.Response -- search document http response
"""
query_param = {
"query" : {
"match_all" : {}
}
}
res = requests.get(url=f"{self._es_endpoint}/{self._index}/{self._doc_type}/_search", json=query_param)
assert HTTPStatus.OK == res.status_code
return res
class TextfileDocument(ESClientBase):
def __init__(self, host : str = "http://localhost", port : int = 9200, aws_region : str = "us-east-1"):
self.aws_region = aws_region
index = "textfilesearch"
doc_type = "textfile"
mapping = {
"properties" : {
"title" : {
"type" : "text"
},
"extension" : {
"type" : "keyword"
},
"s3_url" : {
"type" : "text"
},
"filesize" : {
"type" : "integer"
},
"content" : {
"type" : "text"
}
}
}
return super().__init__(host, port, index, doc_type, mapping)
def create_pid(self, s3_tuple : tuple) -> str:
""" Get primary id from s3 bucket and object name
Arguments:
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
Returns:
str -- primary id
"""
return "-".join(s3_tuple[:2])
def create_doc_entry(self, title : str, extension : str, s3_tuple : tuple, content : str) -> dict:
""" Create document entry
Arguments:
title {str} -- file title
extension {str} -- file extension
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
content {str} -- document body
Returns:
dict -- textfile document
"""
return {
"title" : title,
"extension" : extension,
"filesize" : s3_tuple[2],
"s3_url" : f"https://s3.amazonaws.com/{s3_tuple[0]}/{s3_tuple[1]}",
"content" : content
}
def search_and_highlight_document(self, keywords : list, num_of_docs : int = 3, num_of_highlights : int = 3, highlight_fragment_size : int = 100) -> dict:
""" Search document by keywords and returns searched highlights
Arguments:
keywords {list} -- list of strings to be searched
Keyword Arguments:
num_of_docs {int} -- max number of searched document (default: {3})
num_of_highlights {int} -- number of highlight fragments (default: {3})
highlight_fragment_size {int} -- chars display per highlight fragment (default: {100})
Returns:
dict -- textfile document in the form of
{
"..." : ...,
"hits": {
"total": n,
"max_scoxre": x.xxxxxxx,
"hits": [
{
"_index" : "...",
"_type" : "...",
"_id" : "...",
"_score" : x.xxxxxxx,
"_source" : {...mapping...},
"highlight" : {
"content" : [xxx , xxx , xxx]
}
},
]
}
}
"""
body = {
"from" : 0,
"size" : num_of_docs,
"query" : {
"multi_match" : {
"query" : " ".join(keywords),
"fields" : ["content", "title"]
}
},
"highlight" : {
"number_of_fragments" : num_of_highlights,
"fragment_size" : highlight_fragment_size,
"fields" : {
"content" : {}
}
}
}
print(f"search and highlight using body: {body}")
res = self.search_document(body=body)
return res
class ImagefileDocument(ESClientBase):
def __init__(self, host : str = "http://localhost", port : int = 9200, aws_region : str = "us-east-1"):
self.aws_region = aws_region
index = "imagefilesearch"
doc_type = "imagefile"
mapping = {
"properties" : {
"extension" : {
"type" : "keyword"
},
"s3_url" : {
"type" : "text"
},
"filesize" : {
"type" : "integer"
},
"tags" : {
"type" : "text"
}
}
}
return super().__init__(host, port, index, doc_type, mapping)
def create_pid(self, s3_tuple : tuple) -> str:
""" Get primary id from s3 bucket and object name
Arguments:
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
Returns:
str -- primary id
"""
return "-".join(s3_tuple[:2])
def create_doc_entry(self, extension : str, s3_tuple : tuple, image_labels : list, image_texts : list, celebrities : list) -> dict:
""" Create document entry
Arguments:
extension {str} -- file extension
s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size)
image_labels {list} -- list of image labels
image_texts {list} -- list of image texts
celebrities {list} -- list of celebrities in image
Returns:
dict -- document entry
"""
tags = image_labels
tags[0:0] = image_texts
tags[0:0] = celebrities
return {
"extension" : extension,
"filesize" : s3_tuple[2],
"s3_url" : f"https://s3.amazonaws.com/{s3_tuple[0]}/{s3_tuple[1]}",
"tags" : tags
}
def search_document_by_tags(self, tag_list : list, num_of_docs : int = 3) -> dict:
""" Search document by image tags (labels, text, celebrities)
Arguments:
tag_list {list} -- list of tags
Keyword Arguments:
num_of_docs {int} -- max number of searched document (default: {3})
Returns:
dict -- imagefile document in the form of
{
"..." : ...,
"hits": {
"total": n,
"max_scoxre": x.xxxxxxx,
"hits": [
{
"_index" : "...",
"_type" : "...",
"_id" : "...",
"_score" : x.xxxxxxx,
"_source" : {...mapping...}
},
]
}
}
"""
res = self.search_document(body={
"from" : 0,
"size" : num_of_docs,
"query" : {
"bool" : {
"should" : [
{
"match": {
"tags": tag
}
}
for tag in tag_list
]
}
}
})
return res
if __name__ == "__main__":
tx = TextfileDocument()
tx.put_index()
tx.put_mapping()
im = ImagefileDocument()
im.put_index()
im.put_mapping()
pid_list = range(3)
document_list = [
tx.create_doc_entry(
title="test_pdf.pdf",
extension="pdf",
s3_tuple=("bucket", "test_pdf.pdf", 1024),
content="This is a dummy PDF"
),
tx.create_doc_entry(
title="amazon.pdf",
extension="pdf",
s3_tuple=("bucket", "amazon.pdf", 2048),
content="Amazon.com, Inc. is located in Seattle, WA and was founded July 5th, 1994 by <NAME>, allowing customers to buy everything from books to blenders. Seattle is north of Portland and south of Vancouver, BC. Other notable Seattle - based companies are Starbucks and Boeing."
),
tx.create_doc_entry(
title="test_hello.pdf",
extension="pdf",
s3_tuple=("bucket", "test_hello.pdf", 100),
content="Hello world"
)
]
tx.put_document_bulk([1, 2, 3], document_list)
| en | 0.467242 | Add an elasticsearch index by sending a put request Keyword Arguments: ignore_exist_error {bool} -- ignore index exist error (default: {True}) Returns: requests.Response -- put index http response Delete an elasticsearch index by sending a delete request Keyword Arguments: ignore_nonexist_error {bool} -- ignore index not found error (default: {True}) Returns: requests.Response -- delete index http response Add an elasticsearch mapping by sending a put request Returns: requests.Response -- put mapping http response Retrieve document by sending a get request Arguments: pid {str} -- primary id Returns: requests.Response -- get document http response Add document by sending a put request Arguments: pid {str} -- primary id document {dict} -- document Returns: requests.Response -- put document http response Put multiple documents using batching Arguments: pid_list {list} -- list of primary ids document_list {list} -- list of documents Returns: requests.Response -- put request http response Delete document by sending a delete request Arguments: pid {str} -- Primary id Keyword Arguments: ignore_nonexist_error {bool} -- ignore document not found error (default: {True}) Returns: requests.Response -- delete request http response Delete multiple documents using batching Arguments: pid_list {list} -- list of primary ids Returns: requests.Response -- post request http response # TODO: Need Unittest to Verify If Functionalities are achieved Delete queried document Arguments: body {dict} -- query body Returns: requests.Response -- http response Search document in elasticsearch Arguments: body {dict} -- query body Returns: requests.Response -- search document http response Select all elements in the index Returns: requests.Response -- search document http response Get primary id from s3 bucket and object name Arguments: s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size) Returns: str -- primary id Create document entry Arguments: title {str} -- file title extension {str} -- file extension s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size) content {str} -- document body Returns: dict -- textfile document Search document by keywords and returns searched highlights Arguments: keywords {list} -- list of strings to be searched Keyword Arguments: num_of_docs {int} -- max number of searched document (default: {3}) num_of_highlights {int} -- number of highlight fragments (default: {3}) highlight_fragment_size {int} -- chars display per highlight fragment (default: {100}) Returns: dict -- textfile document in the form of { "..." : ..., "hits": { "total": n, "max_scoxre": x.xxxxxxx, "hits": [ { "_index" : "...", "_type" : "...", "_id" : "...", "_score" : x.xxxxxxx, "_source" : {...mapping...}, "highlight" : { "content" : [xxx , xxx , xxx] } }, ] } } Get primary id from s3 bucket and object name Arguments: s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size) Returns: str -- primary id Create document entry Arguments: extension {str} -- file extension s3_tuple {tuple} -- tuple of (s3 bucket, object key, object size) image_labels {list} -- list of image labels image_texts {list} -- list of image texts celebrities {list} -- list of celebrities in image Returns: dict -- document entry Search document by image tags (labels, text, celebrities) Arguments: tag_list {list} -- list of tags Keyword Arguments: num_of_docs {int} -- max number of searched document (default: {3}) Returns: dict -- imagefile document in the form of { "..." : ..., "hits": { "total": n, "max_scoxre": x.xxxxxxx, "hits": [ { "_index" : "...", "_type" : "...", "_id" : "...", "_score" : x.xxxxxxx, "_source" : {...mapping...} }, ] } } | 2.863909 | 3 |
exercicios/Curso_Udemy_Python/sec3_aula50.py | IgoPereiraBarros/maratona-data-science-brasil | 0 | 6619008 | <reponame>IgoPereiraBarros/maratona-data-science-brasil
'''def algo():
raise Exception('exceção')
print('Depois do raise') # mesmo um print após o raise, ainda assim esse print não
# será executado...neste caso
try:
algo()
except:
print('Peguei uma exceção')
print('Após a exceção')'''
def divisao(divisor):
try:
if divisor == 15:
raise ValueError('Não gosto do 15')
return 10 / divisor
except ZeroDivisionError:
return 'Erro ao dividir por zero(0)'
except TypeError:
return 'Apenas números'
except ValueError:
print('Não entre com o valor 15')
raise
#else:
#print('Não ocorreu nenhuma exceção')
finally:
print('O finally sempre será executado')
print(divisao(12))
| '''def algo():
raise Exception('exceção')
print('Depois do raise') # mesmo um print após o raise, ainda assim esse print não
# será executado...neste caso
try:
algo()
except:
print('Peguei uma exceção')
print('Após a exceção')'''
def divisao(divisor):
try:
if divisor == 15:
raise ValueError('Não gosto do 15')
return 10 / divisor
except ZeroDivisionError:
return 'Erro ao dividir por zero(0)'
except TypeError:
return 'Apenas números'
except ValueError:
print('Não entre com o valor 15')
raise
#else:
#print('Não ocorreu nenhuma exceção')
finally:
print('O finally sempre será executado')
print(divisao(12)) | pt | 0.986576 | def algo(): raise Exception('exceção') print('Depois do raise') # mesmo um print após o raise, ainda assim esse print não # será executado...neste caso try: algo() except: print('Peguei uma exceção') print('Após a exceção') #else: #print('Não ocorreu nenhuma exceção') | 3.842829 | 4 |
python/testsuite/certifications/svc/analyze_regression_suite.py | jiportilla/ontology | 0 | 6619009 | <reponame>jiportilla/ontology
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class AnalyzeRegressionSuite(BaseObject):
__df_gold_analysis = None
__df_standard_analysis = None
def __init__(self,
df_results: DataFrame,
is_debug: bool = False):
"""
Created:
12-Aug-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/680
:param df_results:
the regression test results
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._process(df_results)
def results(self) -> (DataFrame, DataFrame):
return self.__df_gold_analysis, self.__df_standard_analysis
def _process(self,
df_results: DataFrame) -> None:
"""
Purpose:
Split the Regression Results into
Gold vs. Standard
and perform a summarized analysis on each
:param df_results:
the regression test results
"""
from testsuite.certifications.dmo import RegressionTestSplitter
from testsuite.certifications.dmo import RegressionResultAnalysis
df_gold, df_standard = RegressionTestSplitter(df_results).results()
def analyze_gold_regression():
if df_gold.empty:
self.logger.warning("Gold Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_gold,
is_debug=self._is_debug).results()
def analyze_standard_regression():
if df_standard.empty:
self.logger.warning("Standard Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_standard,
is_debug=self._is_debug).results()
self.__df_gold_analysis = analyze_gold_regression()
self.__df_standard_analysis = analyze_standard_regression()
| # !/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class AnalyzeRegressionSuite(BaseObject):
__df_gold_analysis = None
__df_standard_analysis = None
def __init__(self,
df_results: DataFrame,
is_debug: bool = False):
"""
Created:
12-Aug-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/680
:param df_results:
the regression test results
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._process(df_results)
def results(self) -> (DataFrame, DataFrame):
return self.__df_gold_analysis, self.__df_standard_analysis
def _process(self,
df_results: DataFrame) -> None:
"""
Purpose:
Split the Regression Results into
Gold vs. Standard
and perform a summarized analysis on each
:param df_results:
the regression test results
"""
from testsuite.certifications.dmo import RegressionTestSplitter
from testsuite.certifications.dmo import RegressionResultAnalysis
df_gold, df_standard = RegressionTestSplitter(df_results).results()
def analyze_gold_regression():
if df_gold.empty:
self.logger.warning("Gold Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_gold,
is_debug=self._is_debug).results()
def analyze_standard_regression():
if df_standard.empty:
self.logger.warning("Standard Regression is empty")
return pd.DataFrame([{
"Result": None,
"Vendor": None,
"Total": 0,
"Failed": 0,
"SuccessRate": 0}])
return RegressionResultAnalysis(df_standard,
is_debug=self._is_debug).results()
self.__df_gold_analysis = analyze_gold_regression()
self.__df_standard_analysis = analyze_standard_regression() | en | 0.611247 | # !/usr/bin/env python # -*- coding: UTF-8 -*- Created: 12-Aug-2019 <EMAIL> * https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/680 :param df_results: the regression test results Purpose: Split the Regression Results into Gold vs. Standard and perform a summarized analysis on each :param df_results: the regression test results | 2.853302 | 3 |
fastapi_third_party_auth/idtoken_types.py | jokurz/fastapi-third-party-auth | 3 | 6619010 | from typing import List
from typing import Union
from pydantic import BaseModel
from pydantic import Extra
class IDToken(BaseModel):
"""Pydantic model representing an OIDC ID Token.
ID Tokens are polymorphic and may have many attributes not defined in the spec thus this model accepts
all addition fields. Only required fields are listed in the attributes section of this docstring or
enforced by pydantic.
See the specifications here. https://openid.net/specs/openid-connect-core-1_0.html#IDToken
Parameters:
iss (str): Issuer Identifier for the Issuer of the response.
sub (str): Subject Identifier.
aud (Union[str, List[str]]): Audience(s) that this ID Token is intended for.
exp (str): Expiration time on or after which the ID Token MUST NOT be accepted for processing.
iat (iat): Time at which the JWT was issued.
"""
iss: str
sub: str
aud: Union[str, List[str]]
exp: int
iat: int
class Config:
extra = Extra.allow
class OktaIDToken(IDToken):
"""Pydantic Model for the IDToken returned by Okta's OIDC implementation."""
auth_time: int
ver: int
jti: str
amr: List[str]
idp: str
nonce: str
at_hash: str
name: str
email: str
preferred_username: str
class KeycloakIDToken(IDToken):
"""Pydantic Model for the IDToken returned by Keycloak's OIDC implementation."""
jti: str
name: str
email: str
email_verified: bool
preferred_username: str
| from typing import List
from typing import Union
from pydantic import BaseModel
from pydantic import Extra
class IDToken(BaseModel):
"""Pydantic model representing an OIDC ID Token.
ID Tokens are polymorphic and may have many attributes not defined in the spec thus this model accepts
all addition fields. Only required fields are listed in the attributes section of this docstring or
enforced by pydantic.
See the specifications here. https://openid.net/specs/openid-connect-core-1_0.html#IDToken
Parameters:
iss (str): Issuer Identifier for the Issuer of the response.
sub (str): Subject Identifier.
aud (Union[str, List[str]]): Audience(s) that this ID Token is intended for.
exp (str): Expiration time on or after which the ID Token MUST NOT be accepted for processing.
iat (iat): Time at which the JWT was issued.
"""
iss: str
sub: str
aud: Union[str, List[str]]
exp: int
iat: int
class Config:
extra = Extra.allow
class OktaIDToken(IDToken):
"""Pydantic Model for the IDToken returned by Okta's OIDC implementation."""
auth_time: int
ver: int
jti: str
amr: List[str]
idp: str
nonce: str
at_hash: str
name: str
email: str
preferred_username: str
class KeycloakIDToken(IDToken):
"""Pydantic Model for the IDToken returned by Keycloak's OIDC implementation."""
jti: str
name: str
email: str
email_verified: bool
preferred_username: str
| en | 0.864424 | Pydantic model representing an OIDC ID Token. ID Tokens are polymorphic and may have many attributes not defined in the spec thus this model accepts all addition fields. Only required fields are listed in the attributes section of this docstring or enforced by pydantic. See the specifications here. https://openid.net/specs/openid-connect-core-1_0.html#IDToken Parameters: iss (str): Issuer Identifier for the Issuer of the response. sub (str): Subject Identifier. aud (Union[str, List[str]]): Audience(s) that this ID Token is intended for. exp (str): Expiration time on or after which the ID Token MUST NOT be accepted for processing. iat (iat): Time at which the JWT was issued. Pydantic Model for the IDToken returned by Okta's OIDC implementation. Pydantic Model for the IDToken returned by Keycloak's OIDC implementation. | 2.606539 | 3 |
cli-face-capture.py | khreez/face-recognition-stream | 0 | 6619011 | import os
import cv2
import argparse
import time
import requests
import tempfile
from imutils.video import VideoStream
API_URL = 'http://localhost:5000/upload'
MESSAGE_COLOR = (0, 0, 255)
def capture_stream(label):
source_sample_count = 0
vs = VideoStream(src=0).start()
time.sleep(2.0)
while True:
frame = vs.read()
source = frame.copy()
frame_message = 'Face enrollment mode'
cv2.putText(frame, frame_message, (1, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, MESSAGE_COLOR, 2)
cv2.imshow(frame_message, frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
destination_file = os.path.join(tempfile.gettempdir(), '{}-{}.jpg'.format(label, int(time.time())))
cv2.imwrite(destination_file, source)
try:
response = requests.post(API_URL, files={'image': open(destination_file, 'rb')}, data={'label': label})
if response and response.ok:
source_sample_count += 1
capture_message = 'Captured face image sample for: {}'.format(label)
print(capture_message)
cv2.putText(frame, capture_message, (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, MESSAGE_COLOR, 2)
time.sleep(2.0)
else:
print('unable to submit face capture')
except requests.RequestException:
print('unreachable endpoint')
elif key == ord('q'):
cv2.destroyAllWindows()
vs.stop()
break
if source_sample_count > 0:
print('Took {} sample face image(s) for: {}'.format(source_sample_count, label))
else:
print('No sample face image enrolled for: {}'.format(label))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-l', '--label', required=True, help='name or label for the image')
args = vars(ap.parse_args())
capture_stream(args['label'])
| import os
import cv2
import argparse
import time
import requests
import tempfile
from imutils.video import VideoStream
API_URL = 'http://localhost:5000/upload'
MESSAGE_COLOR = (0, 0, 255)
def capture_stream(label):
source_sample_count = 0
vs = VideoStream(src=0).start()
time.sleep(2.0)
while True:
frame = vs.read()
source = frame.copy()
frame_message = 'Face enrollment mode'
cv2.putText(frame, frame_message, (1, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, MESSAGE_COLOR, 2)
cv2.imshow(frame_message, frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
destination_file = os.path.join(tempfile.gettempdir(), '{}-{}.jpg'.format(label, int(time.time())))
cv2.imwrite(destination_file, source)
try:
response = requests.post(API_URL, files={'image': open(destination_file, 'rb')}, data={'label': label})
if response and response.ok:
source_sample_count += 1
capture_message = 'Captured face image sample for: {}'.format(label)
print(capture_message)
cv2.putText(frame, capture_message, (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, MESSAGE_COLOR, 2)
time.sleep(2.0)
else:
print('unable to submit face capture')
except requests.RequestException:
print('unreachable endpoint')
elif key == ord('q'):
cv2.destroyAllWindows()
vs.stop()
break
if source_sample_count > 0:
print('Took {} sample face image(s) for: {}'.format(source_sample_count, label))
else:
print('No sample face image enrolled for: {}'.format(label))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-l', '--label', required=True, help='name or label for the image')
args = vars(ap.parse_args())
capture_stream(args['label'])
| none | 1 | 2.806448 | 3 | |
tests/integrations/providers/AppProvider.py | girardinsamuel/pyexceptions | 0 | 6619012 | from masonite.providers import Provider
from masonite.helpers import optional
from masonite import __version__
from src.pyexceptions.Handler import Handler
from src.pyexceptions.tabs.blocks.Block import Block
from src.pyexceptions.tabs.Tab import Tab
from src.pyexceptions.tabs.ContextTab import ContextTab
from ..app.ExceptionHandler import ExceptionHandler
class SolutionsTab(Tab):
name = "Solutions"
component = "SolutionsTab"
class AppBlock(Block):
name = "Application"
icon = "DesktopComputerIcon"
component = "KeyValBlockWithSections"
def build(self):
request = self.handler.app.make("request")
return {
"Route": {
"controller": optional(request).route.controller,
"route_name": optional(request).route._name,
"route_middlewares": optional(request).route.get_middlewares(),
},
"Info": {
"Masonite version": __version__,
},
}
class RequestBlock(Block):
name = "Request"
icon = "SwitchHorizontalIcon"
component = "KeyValBlockWithSections"
def build(self):
request = self.handler.app.make("request")
return {
"Parameters": {
"Path": request.get_path(),
"Input": request.input_bag.all_as_values() or None,
"Request Method": request.get_request_method(),
},
"Headers": request.header_bag.to_dict(),
}
class AppProvider(Provider):
def __init__(self, application):
self.application = application
def register(self):
exception_handler = ExceptionHandler(self.application)
config = {
"editor": "vscode",
"stack": {"offset": 10},
"tabs": {"context": {"packages_update": False}, "solutions": False},
}
handler = Handler().set_options(config)
handler.app = self.application
handler.add_tab(ContextTab)
handler.add_tab(SolutionsTab)
handler.get_tab("Context").add_block(RequestBlock)
handler.get_tab("Context").add_block(AppBlock)
exception_handler.add_driver("debug", handler)
self.application.bind("exception_handler", exception_handler)
def boot(self):
pass
| from masonite.providers import Provider
from masonite.helpers import optional
from masonite import __version__
from src.pyexceptions.Handler import Handler
from src.pyexceptions.tabs.blocks.Block import Block
from src.pyexceptions.tabs.Tab import Tab
from src.pyexceptions.tabs.ContextTab import ContextTab
from ..app.ExceptionHandler import ExceptionHandler
class SolutionsTab(Tab):
name = "Solutions"
component = "SolutionsTab"
class AppBlock(Block):
name = "Application"
icon = "DesktopComputerIcon"
component = "KeyValBlockWithSections"
def build(self):
request = self.handler.app.make("request")
return {
"Route": {
"controller": optional(request).route.controller,
"route_name": optional(request).route._name,
"route_middlewares": optional(request).route.get_middlewares(),
},
"Info": {
"Masonite version": __version__,
},
}
class RequestBlock(Block):
name = "Request"
icon = "SwitchHorizontalIcon"
component = "KeyValBlockWithSections"
def build(self):
request = self.handler.app.make("request")
return {
"Parameters": {
"Path": request.get_path(),
"Input": request.input_bag.all_as_values() or None,
"Request Method": request.get_request_method(),
},
"Headers": request.header_bag.to_dict(),
}
class AppProvider(Provider):
def __init__(self, application):
self.application = application
def register(self):
exception_handler = ExceptionHandler(self.application)
config = {
"editor": "vscode",
"stack": {"offset": 10},
"tabs": {"context": {"packages_update": False}, "solutions": False},
}
handler = Handler().set_options(config)
handler.app = self.application
handler.add_tab(ContextTab)
handler.add_tab(SolutionsTab)
handler.get_tab("Context").add_block(RequestBlock)
handler.get_tab("Context").add_block(AppBlock)
exception_handler.add_driver("debug", handler)
self.application.bind("exception_handler", exception_handler)
def boot(self):
pass
| none | 1 | 2.033218 | 2 | |
zip2kml.py | ehardacre/zipcode2kml | 0 | 6619013 | import chemdrydatasheet as cdd
#find the given zip codes and write them to the output file
def writezips(zips,out):
for z in zips:
try:
f = open('all-zips/zip{0}.kml'.format(z),"r")
#out.writelines([l for l in open("style.kml").readlines()])
placemarkOpen = False
trashOpen = False
for l in f:
tempstr = l
if("<Placemark" in l):
placemarkOpen = True
tempstr = "<Placemark>\n<styleUrl>#KMLStyler</styleUrl>"
if("</Placemark>" in l):
placemarkOpen = False
out.writelines(l)
if(placemarkOpen):
if("<description>" in l):
trashOpen = True
if("</ExtendedData>" in l):
trashOpen = False
tempstr = ""
if("<name>" in l):
tempstr = '<name>{0}</name>\n'.format(z)
if not trashOpen:
out.writelines(tempstr)
f.close()
except:
print(z)
data = sys.argv[1]
name = sys.argv[2]
zs = []
data = data.split(',')
for d in data:
if d.isdigit() and (len(d) == 5):
zs.append(d)
#print(zips)
output = open('output/{0}.kml'.format(name),"w")
output.writelines([l for l in open("header.kml").readlines()])
output.writelines('<name>{0}</name>\n'.format(name))
#zips = [80301,80302,80303,80304,80305]
writezips(zs,output)
#todo give file paths as argument?
#will python even run on windows
output.writelines([l for l in open("kml_parts/footer.kml").readlines()])
output.close()
| import chemdrydatasheet as cdd
#find the given zip codes and write them to the output file
def writezips(zips,out):
for z in zips:
try:
f = open('all-zips/zip{0}.kml'.format(z),"r")
#out.writelines([l for l in open("style.kml").readlines()])
placemarkOpen = False
trashOpen = False
for l in f:
tempstr = l
if("<Placemark" in l):
placemarkOpen = True
tempstr = "<Placemark>\n<styleUrl>#KMLStyler</styleUrl>"
if("</Placemark>" in l):
placemarkOpen = False
out.writelines(l)
if(placemarkOpen):
if("<description>" in l):
trashOpen = True
if("</ExtendedData>" in l):
trashOpen = False
tempstr = ""
if("<name>" in l):
tempstr = '<name>{0}</name>\n'.format(z)
if not trashOpen:
out.writelines(tempstr)
f.close()
except:
print(z)
data = sys.argv[1]
name = sys.argv[2]
zs = []
data = data.split(',')
for d in data:
if d.isdigit() and (len(d) == 5):
zs.append(d)
#print(zips)
output = open('output/{0}.kml'.format(name),"w")
output.writelines([l for l in open("header.kml").readlines()])
output.writelines('<name>{0}</name>\n'.format(name))
#zips = [80301,80302,80303,80304,80305]
writezips(zs,output)
#todo give file paths as argument?
#will python even run on windows
output.writelines([l for l in open("kml_parts/footer.kml").readlines()])
output.close()
| en | 0.643157 | #find the given zip codes and write them to the output file #out.writelines([l for l in open("style.kml").readlines()]) #KMLStyler</styleUrl>" #print(zips) #zips = [80301,80302,80303,80304,80305] #todo give file paths as argument? #will python even run on windows | 2.843516 | 3 |
core_modules/preprocessing/change_size.py | sdrdis/patch_generator | 7 | 6619014 | import numpy as np
import scipy.misc
def process(item_data, params):
size = params['size']
item_data['X'] = scipy.misc.imresize(item_data['X'], size)
y = []
if isinstance(item_data['y'], (list,)):
for j in range(len(item_data['y'])):
y.append(scipy.misc.imresize(item_data['y'][j].astype(float), size))
else:
for j in range(item_data['y'].shape[2]):
y.append(scipy.misc.imresize(item_data['y'][:,:,j].astype(float), size))
y = np.array(y)
y = np.moveaxis(y, 0, 2)
y = y > 0.5
item_data['y'] = y | import numpy as np
import scipy.misc
def process(item_data, params):
size = params['size']
item_data['X'] = scipy.misc.imresize(item_data['X'], size)
y = []
if isinstance(item_data['y'], (list,)):
for j in range(len(item_data['y'])):
y.append(scipy.misc.imresize(item_data['y'][j].astype(float), size))
else:
for j in range(item_data['y'].shape[2]):
y.append(scipy.misc.imresize(item_data['y'][:,:,j].astype(float), size))
y = np.array(y)
y = np.moveaxis(y, 0, 2)
y = y > 0.5
item_data['y'] = y | none | 1 | 2.727626 | 3 | |
testScripts/checkTO.py | ryanemerson/JGroups-HiTab | 0 | 6619015 | #!/usr/bin/env python
import os
from collections import defaultdict
hosts = {'mill001', 'mill004', 'mill005'}
user = 'a7109534'
file_location = '/work/a7109534/'
#file_location = '/home/ryan/workspace/JGroups'
#file_location = '/home/pg/p11/a7109534/'
file_wildcard = '*'
extension = "Delivered*.csv"
get_file = file_location + file_wildcard + extension
destination = '.'
os.system("rm *" + extension)
for hostname in hosts:
cmd = "scp " + user + "@" + hostname + ":" + get_file + " " + destination
print cmd
os.system(cmd)
host_files = defaultdict(list)
for file in os.listdir(destination):
for hostname in hosts:
if hostname in file:
host_files[hostname].append(file)
host_files[hostname].sort()
x = 0
while True:
host_files_iter = iter(host_files)
next_host = host_files_iter.next()
try:
first_host = host_files.get(next_host)[x]
except IndexError:
break
for host in host_files_iter:
second_host = host_files.get(host)[x]
cmd = "diff " + first_host + " " + second_host + " -usa"
os.system(cmd)
x += 1
#os.system("rm " + extension)
| #!/usr/bin/env python
import os
from collections import defaultdict
hosts = {'mill001', 'mill004', 'mill005'}
user = 'a7109534'
file_location = '/work/a7109534/'
#file_location = '/home/ryan/workspace/JGroups'
#file_location = '/home/pg/p11/a7109534/'
file_wildcard = '*'
extension = "Delivered*.csv"
get_file = file_location + file_wildcard + extension
destination = '.'
os.system("rm *" + extension)
for hostname in hosts:
cmd = "scp " + user + "@" + hostname + ":" + get_file + " " + destination
print cmd
os.system(cmd)
host_files = defaultdict(list)
for file in os.listdir(destination):
for hostname in hosts:
if hostname in file:
host_files[hostname].append(file)
host_files[hostname].sort()
x = 0
while True:
host_files_iter = iter(host_files)
next_host = host_files_iter.next()
try:
first_host = host_files.get(next_host)[x]
except IndexError:
break
for host in host_files_iter:
second_host = host_files.get(host)[x]
cmd = "diff " + first_host + " " + second_host + " -usa"
os.system(cmd)
x += 1
#os.system("rm " + extension)
| en | 0.395119 | #!/usr/bin/env python #file_location = '/home/ryan/workspace/JGroups' #file_location = '/home/pg/p11/a7109534/' #os.system("rm " + extension) | 2.533966 | 3 |
rendezvous/productionRestApi/rv/restApi/migrations/0004_notifications_from_friend_name.py | gazh1987/Rendezvous | 0 | 6619016 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-01 17:02
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('restApi', '0003_friends_tracking_enabled'),
]
operations = [
migrations.AddField(
model_name='notifications',
name='from_friend_name',
field=models.CharField(default=datetime.datetime(2016, 3, 1, 17, 2, 34, 619112, tzinfo=utc), max_length=255),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-01 17:02
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('restApi', '0003_friends_tracking_enabled'),
]
operations = [
migrations.AddField(
model_name='notifications',
name='from_friend_name',
field=models.CharField(default=datetime.datetime(2016, 3, 1, 17, 2, 34, 619112, tzinfo=utc), max_length=255),
preserve_default=False,
),
]
| en | 0.853727 | # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-03-01 17:02 | 1.803168 | 2 |
examples/treelstm/utils.py | ruyimarone/dynet | 3,307 | 6619017 | import codecs
import numpy as np
import dynet as dy
def acc_eval(dataset, model):
dataset.reset(shuffle=False)
good = bad = 0.0
for tree in dataset:
dy.renew_cg()
pred = np.argmax(model.predict_for_tree(tree, decorate=False, training=False))
if pred == tree.label:
good += 1
else:
bad += 1
acc = good / (good + bad)
return acc
def get_embeds(embed_path):
word_embeds, w2i = [np.random.randn(300)], {'_UNK_': 0}
with codecs.open(embed_path) as f:
for line in f:
line = line.strip().split(' ')
word, embed = line[0], line[1:]
w2i[word] = len(word_embeds)
word_embeds.append(np.array(embed, dtype=np.float32))
w2i['-LRB-'] = w2i['(']
w2i['-RRB-'] = w2i[')']
return np.array(word_embeds), w2i
| import codecs
import numpy as np
import dynet as dy
def acc_eval(dataset, model):
dataset.reset(shuffle=False)
good = bad = 0.0
for tree in dataset:
dy.renew_cg()
pred = np.argmax(model.predict_for_tree(tree, decorate=False, training=False))
if pred == tree.label:
good += 1
else:
bad += 1
acc = good / (good + bad)
return acc
def get_embeds(embed_path):
word_embeds, w2i = [np.random.randn(300)], {'_UNK_': 0}
with codecs.open(embed_path) as f:
for line in f:
line = line.strip().split(' ')
word, embed = line[0], line[1:]
w2i[word] = len(word_embeds)
word_embeds.append(np.array(embed, dtype=np.float32))
w2i['-LRB-'] = w2i['(']
w2i['-RRB-'] = w2i[')']
return np.array(word_embeds), w2i
| none | 1 | 2.160595 | 2 | |
shipmi/vbmc.py | lion7/virtualbmc | 1 | 6619018 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import subprocess
from time import sleep
import pyghmi.ipmi.bmc as bmc
from shipmi import log
from shipmi.exception import VirtualBMCCommandFailed
from shipmi.provider import get_provider
LOG = log.get_logger()
class VirtualBMC(bmc.Bmc):
def __init__(self, username, password, port, address, name, provider, **kwargs):
super(VirtualBMC, self).__init__({username: password},
port=port, address=address)
self.name = name
self.provider_config = get_provider(provider)
def cmdline(self, section, option, kwargs=None):
cmd = self.provider_config.get(section, option)
if not cmd:
raise NotImplementedError
workingdir = os.path.dirname(self.provider_config.path)
substitutions = {'name': self.name}
if kwargs:
substitutions.update(kwargs)
cmdline = ['sh', '-c', cmd % substitutions]
LOG.debug('Cmdline arguments: %(cmdline)s', {'cmdline': cmdline})
process = subprocess.run(cmdline,
cwd=workingdir,
stdout=subprocess.PIPE,
universal_newlines=True)
if process.returncode != 0:
raise VirtualBMCCommandFailed(command=' '.join(cmdline), exitcode=process.returncode)
output = process.stdout.strip()
LOG.debug('Cmdline output : %(output)s', {'output': output})
return output
def cold_reset(self):
LOG.info('BMC reset called for VirtualBMC %(name)s', {'name': self.name})
def power_off(self):
LOG.info('Power off called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'off')
def power_on(self):
LOG.info('Power on called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'on')
def power_cycle(self):
self.power_off()
for i in range(10):
if self.get_power_state() == 'off':
break
else:
sleep(1)
self.power_on()
def power_reset(self):
LOG.info('Power reset called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'reset')
def pulse_diag(self):
LOG.info('Power diag called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'diag')
def power_shutdown(self):
LOG.info('Soft power off called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'shutdown')
def get_power_state(self):
LOG.info('Get power state called for %(name)s', {'name': self.name})
return self.cmdline('POWER', 'status')
def is_active(self):
return self.get_power_state() == 'on'
def get_boot_device(self):
LOG.info('Get boot device called for %(name)s', {'name': self.name})
boot_device = self.cmdline('BOOT', 'get')
return boot_device
def set_boot_device(self, bootdev):
LOG.info('Set boot device called for %(name)s with boot device "%(bootdev)s"',
{'name': self.name, 'bootdev': bootdev})
self.cmdline('BOOT', 'set', {'bootdev': bootdev})
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import subprocess
from time import sleep
import pyghmi.ipmi.bmc as bmc
from shipmi import log
from shipmi.exception import VirtualBMCCommandFailed
from shipmi.provider import get_provider
LOG = log.get_logger()
class VirtualBMC(bmc.Bmc):
def __init__(self, username, password, port, address, name, provider, **kwargs):
super(VirtualBMC, self).__init__({username: password},
port=port, address=address)
self.name = name
self.provider_config = get_provider(provider)
def cmdline(self, section, option, kwargs=None):
cmd = self.provider_config.get(section, option)
if not cmd:
raise NotImplementedError
workingdir = os.path.dirname(self.provider_config.path)
substitutions = {'name': self.name}
if kwargs:
substitutions.update(kwargs)
cmdline = ['sh', '-c', cmd % substitutions]
LOG.debug('Cmdline arguments: %(cmdline)s', {'cmdline': cmdline})
process = subprocess.run(cmdline,
cwd=workingdir,
stdout=subprocess.PIPE,
universal_newlines=True)
if process.returncode != 0:
raise VirtualBMCCommandFailed(command=' '.join(cmdline), exitcode=process.returncode)
output = process.stdout.strip()
LOG.debug('Cmdline output : %(output)s', {'output': output})
return output
def cold_reset(self):
LOG.info('BMC reset called for VirtualBMC %(name)s', {'name': self.name})
def power_off(self):
LOG.info('Power off called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'off')
def power_on(self):
LOG.info('Power on called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'on')
def power_cycle(self):
self.power_off()
for i in range(10):
if self.get_power_state() == 'off':
break
else:
sleep(1)
self.power_on()
def power_reset(self):
LOG.info('Power reset called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'reset')
def pulse_diag(self):
LOG.info('Power diag called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'diag')
def power_shutdown(self):
LOG.info('Soft power off called for %(name)s', {'name': self.name})
self.cmdline('POWER', 'shutdown')
def get_power_state(self):
LOG.info('Get power state called for %(name)s', {'name': self.name})
return self.cmdline('POWER', 'status')
def is_active(self):
return self.get_power_state() == 'on'
def get_boot_device(self):
LOG.info('Get boot device called for %(name)s', {'name': self.name})
boot_device = self.cmdline('BOOT', 'get')
return boot_device
def set_boot_device(self, bootdev):
LOG.info('Set boot device called for %(name)s with boot device "%(bootdev)s"',
{'name': self.name, 'bootdev': bootdev})
self.cmdline('BOOT', 'set', {'bootdev': bootdev})
| en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.869004 | 2 |
SimNetReportParser.py | jcausey-astate/SimNet-Report-Parser | 0 | 6619019 | <reponame>jcausey-astate/SimNet-Report-Parser
#!/usr/bin/env python
#
# SimNetExamReportParser.py
#
# <NAME> 2009-2016 <EMAIL>
#
# Parses a SimNet exam, lesson, and project report (.csv) files and
# produces a corresponding .csv file with one line per student, such that
# all assignments and attempts for each assignment are listed (grouped by
# assignment type and assignment title) on the student's row.
#
# Usage:
# SimNetExamReportParser.py
################################################################################
import sys
import csv
import os.path
from Tkinter import *
import tkMessageBox
from tkColorChooser import askcolor
from tkFileDialog import askopenfilename, asksaveasfilename
# getInputFile will show a "File Open" dialog, returning the filename
# of the .csv file.
def getInputFile(prompt):
print 'Please use the "Open" dialog to choose the input file.'
print "NOTE: The dialog may appear behind this terminal window."
print
mask = [('CSV Files', '.csv')]
prompt = "Choose SimNet " + prompt
filename = askopenfilename(title=prompt, filetypes=mask)
return filename
# getOutputFile will show a "File Save" dialog, returning the filename
# of the .csv file.
def getOutputFile():
print 'Please use the "Save As" dialog to choose the output file.'
print "NOTE: The dialog may appear behind this terminal window."
print
mask = [('CSV Files', '.csv')]
filename = asksaveasfilename(title="Save Output File As", filetypes=mask)
return filename
# Makes a good (easily sorted) key from a string by making it all lower-case,
# removing whitespace, and removing periods.
def cleanKey(key):
key = key.lower().lstrip().replace(' ', '').replace('.','')
return key
def readLessonFile(file):
lessonInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
titles = {} # The titles themselves, keyed by a cleaned version.
names = {} # To get a sorted list of names for output
possible = {} # Total tasks by title
earned = {} # Points earned by title and student ID.
percent = {} # Stores percent by title and student ID.
# We need to watch for each new lesson name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Minutes,Date,Date,NumberComplete,TotalTasks,PercentComplete
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
if lineNo > 0:
SID = str(line[0])
# Add this line to the proper student's record (by ID).
if(not SID in records.keys()):
records[SID] = []
records[SID].append(line)
# Add this lesson title to the lesson's record:
# Lessons only have one attempt...
key = cleanKey(str(line[3]))
if(not str(key) in titles.keys()):
titles[key] = {}
titles[key]['title'] = str(line[3])
if(not SID in percent.keys()):
percent[SID] = {}
percent[SID][key] = line[9] # Store percent by ID and title.
if(not SID in earned.keys()):
earned[SID] = {}
earned[SID][key] = line[7] # Store earned by ID and title.
if(not key in possible.keys()):
possible[key] = line[8] # Store possible by title
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = SID
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
lessonInfo['records'] = records
lessonInfo['titles'] = titles
lessonInfo['possible'] = possible
lessonInfo['earned'] = earned
lessonInfo['percent'] = percent
lessonInfo['names'] = names
return lessonInfo
def readExamFile(file):
examInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
attempts = {} # To keep track of highest value of attempts per title
titles = {} # The titles themselves, keyed by a cleaned version.
possible = {} # Points possible, by exam
names = {} # To get a sorted list of names for output
# We need to watch for each new exam name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# For now we ignore the "status"... it seems better to give students the
# points they've "partially" earned instead of a zero...
if lineNo > 0:
# Add this line to the proper student's record (by ID).
SID = str(line[0])
if(not SID in records.keys()):
records[SID] = []
records[SID].append(line)
# Add this exam title to the exam's record:
title_key = cleanKey(str(line[3]))
if(not title_key in titles.keys()):
key = title_key
attempts[key] = 1
titles[key] = str(line[3])
# If we see a new highest attempt number, that is the new max
# value stored at attempts[examname].
if(int(line[4]) > int(attempts[cleanKey(str(line[3]))])):
attempts[cleanKey(str(line[3]))] = int(line[4])
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = SID
if(not title_key in possible.keys()):
possible[title_key] = line[11]
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
examInfo['records'] = records
examInfo['attempts'] = attempts
examInfo['titles'] = titles
examInfo['possible'] = possible
examInfo['names'] = names
return examInfo
def readProjectFile(file):
projectInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
attempts = {} # To keep track of highest value of attempts per title
titles = {} # The titles themselves, keyed by a cleaned version.
names = {} # To get a sorted list of names for output
possible = {} # Points possible
percent = {} # Stores percent by title and student ID.
# We need to watch for each new lesson name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10
if lineNo > 0:
# Add this line to the proper student's record (by ID).
if(not str(line[0]) in records.keys()):
records[str(line[0])] = []
records[str(line[0])].append(line)
# Add this project title to the project's record:
title_key = cleanKey(str(line[3]))
if(not title_key in titles.keys()):
key = title_key
attempts[key] = 1
titles[key] = str(line[3])
# If we see a new highest attempt number, that is the new max
# value stored at attempts[projectname].
if(int(line[4]) > int(attempts[cleanKey(str(line[3]))])):
attempts[cleanKey(str(line[3]))] = int(line[4])
if(not str(line[0]) in percent.keys()):
percent[str(line[0])] = {}
percent[str(line[0])][key] = line[9] # Store percent by ID and title.
if(not title_key in possible.keys()):
possible[title_key] = line[8] # Store possible points by title.
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = str(line[0])
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
projectInfo['records'] = records
projectInfo['titles'] = titles
projectInfo['attempts'] = attempts
projectInfo['percent'] = percent
projectInfo['names'] = names
return projectInfo
def writeCombinedFile(file, lessonInfo, examInfo, projectInfo, takeHighestExam, selectPointsOrCorrect, takeHighestProject, missingScoreMark = "", usePoints=False):
# PRE-PROCESS: Sort the student names list and exam names list:
#First make sure we have the 'names' key in both examInfo and lessonInfo and projectInfo;
if(not 'names' in lessonInfo.keys()):
lessonInfo['names'] = {}
if(not 'names' in examInfo.keys()):
examInfo['names'] = {}
if(not 'names' in projectInfo.keys()):
projectInfo['names'] = {}
#Now do the same for 'titles'
if(not 'titles' in lessonInfo.keys()):
lessonInfo['titles'] = {}
if(not 'titles' in examInfo.keys()):
examInfo['titles'] = {}
if(not 'titles' in projectInfo.keys()):
projectInfo['titles'] = {}
# Merge names from exams and lessons into a single list of names:
d = {}
for k in examInfo['names'].keys(), lessonInfo['names'].keys(), projectInfo['names'].keys():
for x in k:
d[x] = 1
sortedNames = d.keys()
del d
sortedNames.sort()
sortedExamTitles = examInfo['titles'].keys()
sortedExamTitles.sort()
sortedLessonTitles = lessonInfo['titles'].keys()
sortedLessonTitles.sort()
sortedProjectTitles = projectInfo['titles'].keys()
sortedProjectTitles.sort()
# BEGIN OUTPUT PHASE:
# Get the output file going and do output.
if(file != ''):
# Ensure a .csv extension.
if(file.rfind('.csv') == -1):
file = file + '.csv'
# Open the output file:
csvfile = csv.writer(open(file, 'w'), quoting=csv.QUOTE_ALL)
# Input data order (exams)
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# Data for lessons is all in lesson structure:
# titles, names, percent
# percent is stored by [ID][titleKey]
examPctColumn = 12 # Use "percent correct" by default (instructor can edit this one)
if(selectPointsOrCorrect == True):
examPctColumn = 15 # Use "percent points" instead (instructor CANNOT edit this field)
# Output to a new CSV file such that each student (by ID) has a single
# row. Each row has:
# StudentID,LastName,FirstName,Lesson1....LessonN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN
# The first line will be headers. Build them. The headers will
# Depend on the lessons, exams, and number of attempts for each exam.
outputHeaders = []
outputHeaders.append("Student ID")
outputHeaders.append("<NAME>")
outputHeaders.append("<NAME>")
# Lessons first
for key in sortedLessonTitles:
outputHeaders.append(str(lessonInfo['titles'][key]['title']))
# Then projects
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestProject):
while(currentAttempt < nAttempts):
outputHeaders.append(str(projectInfo['titles'][key]) + str(" [Attempt ") \
+ str(currentAttempt + 1) + "]")
currentAttempt += 1
else:
outputHeaders.append(str(projectInfo['titles'][key]))
# Then exams
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestExam):
while(currentAttempt < nAttempts):
outputHeaders.append(str(examInfo['titles'][key]) + str(" [Attempt ") \
+ str(currentAttempt + 1) + "]")
currentAttempt += 1
else:
outputHeaders.append(str(examInfo['titles'][key]))
csvfile.writerow(outputHeaders)
# If we want points-based output, we need a row for max points for each assignment:
if usePoints:
maxPtsRow = []
maxPtsRow.append("Pts. Possible") # SID (use as label)
maxPtsRow.append("") # Last Name (empty cell)
maxPtsRow.append("") # First Name (empty cell)
# Lessons first
for key in sortedLessonTitles:
maxPtsRow.append(str(lessonInfo['possible'][key]))
# Then projects
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestProject):
while(currentAttempt < nAttempts):
maxPtsRow.append(str(projectInfo['possible'][key]))
currentAttempt += 1
else:
maxPtsRow.append(str(projectInfo['possible'][key]))
# Then exams
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestExam):
while(currentAttempt < nAttempts):
maxPtsRow.append(str(examInfo['possible'][key]))
currentAttempt += 1
else:
maxPtsRow.append(str(examInfo['possible'][key]))
csvfile.writerow(maxPtsRow)
# For each student (in sorted order), create exactly 1 row:
for name in sortedNames:
outputrow = []
SID = ''
# Each row has:
# StudentID,LastName,FirstName,Lesson1...LessonN,Project1attempt1..attemptN...ProjectNattempt1,...attemptN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN
if(len(examInfo) > 0 and name in examInfo['names'].keys()):
outputrow.append(examInfo['records'][examInfo['names'][name]][0][0])
outputrow.append(examInfo['records'][examInfo['names'][name]][0][1])
outputrow.append(examInfo['records'][examInfo['names'][name]][0][2])
SID = examInfo['records'][examInfo['names'][name]][0][0]
elif(len(projectInfo) > 0 and name in projectInfo['names'].keys()):
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][0])
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][1])
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][2])
SID = projectInfo['records'][projectInfo['names'][name]][0][0]
elif(len(lessonInfo) > 0):
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][0])
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][1])
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][2])
SID = lessonInfo['records'][lessonInfo['names'][name]][0][0]
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10
# For each lesson, output its percent points (or points):
for key in sortedLessonTitles:
if(SID in lessonInfo['percent'].keys() and key in lessonInfo['percent'][SID].keys()):
scoreKey = 'percent' if not usePoints else 'earned'
outputrow.append(lessonInfo[scoreKey][SID][key])
else:
outputrow.append(missingScoreMark)
# Get the list of Percent Points in order for this Project title:
if(len(projectInfo['titles']) > 0):
ppts = {}
for key in sortedProjectTitles:
ppts[key] = {}
if(name in projectInfo['names'].keys()):
for record in projectInfo['records'][projectInfo['names'][name]]:
scoreKey = 9 if not usePoints else 7
key = cleanKey(record[3])
attempt = record[4]
ppts[key][attempt] = record[scoreKey]
# Now output the PercentPoints field for each project title:
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 1
if(not takeHighestProject):
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
outputrow.append(ppts[key][str(currentAttempt)])
else:
outputrow.append(missingScoreMark)
currentAttempt += 1
else:
highest = None
currentAttempt = 1
if(str(currentAttempt) in ppts[key].keys() and ppts[key][str(currentAttempt)] != ''):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
# Project attempt scores can be empty, be careful of that:
if(highest == None or (ppts[key][str(currentAttempt)] != '' and float(ppts[key][str(currentAttempt)]) > float(highest))):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
if(highest != None):
if highest == '':
print("Error: blank score for {0}".format(currentAttempt))
outputrow.append(highest)
else:
outputrow.append(missingScoreMark)
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# Get the list of Percent Points in order for this exam title:
if(len(examInfo['titles']) > 0):
ppts = {}
for key in sortedExamTitles:
ppts[key] = {}
if(name in examInfo['names'].keys()):
for record in examInfo['records'][examInfo['names'][name]]:
scoreKey = examPctColumn if not usePoints else 13
key = cleanKey(record[3])
attempt = record[4]
ppts[key][attempt] = record[scoreKey]
if round((float(record[examPctColumn]) / 100.0) * float(record[14])) > ppts[key][attempt]:
print("Error: higher score for ({0} {1} {2}: {3}) according to Percent than for points".format(record[0], record[1], record[2], record[3]))
# Now output the PercentPoints field for each exam title:
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 1
if(not takeHighestExam):
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
outputrow.append(ppts[key][str(currentAttempt)])
else:
outputrow.append(missingScoreMark)
currentAttempt += 1
else:
highest = None
currentAttempt = 1
if(str(currentAttempt) in ppts[key].keys()):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
if(highest == None or float(ppts[key][str(currentAttempt)]) > float(highest)):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
if(highest != None):
outputrow.append(highest)
else:
outputrow.append(missingScoreMark)
csvfile.writerow(outputrow)
return True
# If the user doesn't choose an output file, we can't continue.
else:
return False
class SNRParser(Frame):
def __init__(self, master=None):
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
Frame.__init__(self, master)
self.grid()
self.createWidgets()
def reInit(self):
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
self.examNameBox.delete(0,END)
self.lessonNameBox.delete(0,END)
self.projectNameBox.delete(0,END)
self.goButton.configure(state=DISABLED)
def createWidgets(self):
instText = "Choose Exam, Lesson, and/or Project reports below\n"
instText += "then, click \"Generate!\" to create the output\n"
instText += "workbook.\n"
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
self.instructions = Label(self, text=instText, justify=LEFT)
self.instructions.grid(columnspan=3, row=0)
self.examNameLabel = Label(self, text="Exam Report:")
self.examNameLabel.grid(column=0,row=1,sticky=W)
self.examNameBox = Entry(self)
self.examNameBox.grid(column=1,row=1)
self.getExamNameButton = Button(self, text="Browse", command=self.getExamName)
self.getExamNameButton.grid(column=2, row=1)
self.examTakeHighestAttempt = IntVar()
self.examTakeHighestAttemptCheckbox = Checkbutton(self, text="Keep only the best exam attempt.", variable=self.examTakeHighestAttempt)
self.examTakeHighestAttemptCheckbox.grid(column=0,row=2,sticky=W,padx=25, columnspan=3)
self.usePctPoints = BooleanVar()
self.examUsePctPointsCheckbox = Checkbutton(self, text="Use % Points column not % Correct (DANGER).", variable=self.usePctPoints, command=self.warnPctPoints)
self.examUsePctPointsCheckbox.grid(column=0,row=3,sticky=W,padx=25, columnspan=3)
self.lessonNameLabel = Label(self, text="Lesson Report:")
self.lessonNameLabel.grid(column=0,row=4,sticky=W)
self.lessonNameBox = Entry(self)
self.lessonNameBox.grid(column=1,row=4)
self.getLessonNameButton = Button(self, text="Browse", command=self.getLessonName)
self.getLessonNameButton.grid(column=2,row=4)
self.projectNameLabel = Label(self, text="Project Report:")
self.projectNameLabel.grid(column=0,row=5,sticky=W)
self.projectNameBox = Entry(self)
self.projectNameBox.grid(column=1,row=5)
self.getProjectNameButton = Button(self, text="Browse", command=self.getProjectName)
self.getProjectNameButton.grid(column=2, row=5)
self.projectTakeHighestAttempt = IntVar()
self.projectTakeHighestAttemptCheckbox = Checkbutton(self, text="Keep only the best project attempt.", variable=self.projectTakeHighestAttempt)
self.projectTakeHighestAttemptCheckbox.grid(column=0,row=6,sticky=W,padx=25, columnspan=3)
self.usePoints = BooleanVar()
self.usePointsCheckbox = Checkbutton(self, text="Use points, not percents.", variable=self.usePoints)
self.usePointsCheckbox.grid(column=0,row=7,sticky=W, columnspan=3)
self.missingScoreValueBox = Entry(self, width=10)
self.missingScoreValueBox.grid(column=2,row=8, sticky=W)
self.missingScoreLabel = Label(self, text="Insert this value for missing scores:")
self.missingScoreLabel.grid(column=0,row=8,sticky=W, columnspan=2)
self.goButton = Button ( self, text="Generate!",command=self.generate, state=DISABLED)
self.goButton.grid(columnspan=3, row=9, rowspan=2, sticky=S, pady=15)
def warnPctPoints(self):
if(self.usePctPoints.get() == True):
tkMessageBox.showinfo("Percent Points Warning", "Due to a SimNet bug, using the \"Percent Points\" column may cause manually entered scores not to appear in the final report.")
def getExamName(self):
self.examFileName = getInputFile("Exam Report")
if(self.examFileName != ''):
self.goButton.configure(state=NORMAL)
self.examNameBox.insert(0,os.path.basename(self.examFileName))
else:
self.examNameBox.delete(0,END)
self.examNameBox.update()
def getLessonName(self):
self.lessonFileName = getInputFile("Lesson Report")
if(self.lessonFileName != ''):
self.goButton.configure(state=NORMAL)
self.lessonNameBox.insert(0, os.path.basename(self.lessonFileName))
else:
self.lessonNameBox.delete(0, END)
self.lessonNameBox.update()
def getProjectName(self):
self.projectFileName = getInputFile("Project Report")
if(self.projectFileName != ''):
self.goButton.configure(state=NORMAL)
self.projectNameBox.insert(0, os.path.basename(self.projectFileName))
else:
self.projectNameBox.delete(0, END)
self.projectNameBox.update()
def generate(self):
lessonInfo = readLessonFile(self.lessonFileName)
examInfo = readExamFile(self.examFileName)
projectInfo = readProjectFile(self.projectFileName)
outputFileName = getOutputFile()
if(writeCombinedFile(outputFileName, lessonInfo, examInfo, projectInfo,
self.examTakeHighestAttempt.get(), self.usePctPoints.get(), self.projectTakeHighestAttempt.get(),
self.missingScoreValueBox.get(), self.usePoints.get())):
self.msg = Message(self,text="Finished. Output file generated OK.")
#self.msg.grid()
else:
self.msg = Message(self,text="No output file specified. Cannot continue.")
#self.msg.grid()
if(not tkMessageBox.askyesno("Finished", "Would you like to convert another file set?")):
self.destroy()
exit(0)
else:
self.reInit()
# Main execution:
if __name__ == "__main__":
app = SNRParser()
app.master.title("SimNet Report Parser")
app.mainloop()
| #!/usr/bin/env python
#
# SimNetExamReportParser.py
#
# <NAME> 2009-2016 <EMAIL>
#
# Parses a SimNet exam, lesson, and project report (.csv) files and
# produces a corresponding .csv file with one line per student, such that
# all assignments and attempts for each assignment are listed (grouped by
# assignment type and assignment title) on the student's row.
#
# Usage:
# SimNetExamReportParser.py
################################################################################
import sys
import csv
import os.path
from Tkinter import *
import tkMessageBox
from tkColorChooser import askcolor
from tkFileDialog import askopenfilename, asksaveasfilename
# getInputFile will show a "File Open" dialog, returning the filename
# of the .csv file.
def getInputFile(prompt):
print 'Please use the "Open" dialog to choose the input file.'
print "NOTE: The dialog may appear behind this terminal window."
print
mask = [('CSV Files', '.csv')]
prompt = "Choose SimNet " + prompt
filename = askopenfilename(title=prompt, filetypes=mask)
return filename
# getOutputFile will show a "File Save" dialog, returning the filename
# of the .csv file.
def getOutputFile():
print 'Please use the "Save As" dialog to choose the output file.'
print "NOTE: The dialog may appear behind this terminal window."
print
mask = [('CSV Files', '.csv')]
filename = asksaveasfilename(title="Save Output File As", filetypes=mask)
return filename
# Makes a good (easily sorted) key from a string by making it all lower-case,
# removing whitespace, and removing periods.
def cleanKey(key):
key = key.lower().lstrip().replace(' ', '').replace('.','')
return key
def readLessonFile(file):
lessonInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
titles = {} # The titles themselves, keyed by a cleaned version.
names = {} # To get a sorted list of names for output
possible = {} # Total tasks by title
earned = {} # Points earned by title and student ID.
percent = {} # Stores percent by title and student ID.
# We need to watch for each new lesson name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Minutes,Date,Date,NumberComplete,TotalTasks,PercentComplete
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
if lineNo > 0:
SID = str(line[0])
# Add this line to the proper student's record (by ID).
if(not SID in records.keys()):
records[SID] = []
records[SID].append(line)
# Add this lesson title to the lesson's record:
# Lessons only have one attempt...
key = cleanKey(str(line[3]))
if(not str(key) in titles.keys()):
titles[key] = {}
titles[key]['title'] = str(line[3])
if(not SID in percent.keys()):
percent[SID] = {}
percent[SID][key] = line[9] # Store percent by ID and title.
if(not SID in earned.keys()):
earned[SID] = {}
earned[SID][key] = line[7] # Store earned by ID and title.
if(not key in possible.keys()):
possible[key] = line[8] # Store possible by title
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = SID
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
lessonInfo['records'] = records
lessonInfo['titles'] = titles
lessonInfo['possible'] = possible
lessonInfo['earned'] = earned
lessonInfo['percent'] = percent
lessonInfo['names'] = names
return lessonInfo
def readExamFile(file):
examInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
attempts = {} # To keep track of highest value of attempts per title
titles = {} # The titles themselves, keyed by a cleaned version.
possible = {} # Points possible, by exam
names = {} # To get a sorted list of names for output
# We need to watch for each new exam name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# For now we ignore the "status"... it seems better to give students the
# points they've "partially" earned instead of a zero...
if lineNo > 0:
# Add this line to the proper student's record (by ID).
SID = str(line[0])
if(not SID in records.keys()):
records[SID] = []
records[SID].append(line)
# Add this exam title to the exam's record:
title_key = cleanKey(str(line[3]))
if(not title_key in titles.keys()):
key = title_key
attempts[key] = 1
titles[key] = str(line[3])
# If we see a new highest attempt number, that is the new max
# value stored at attempts[examname].
if(int(line[4]) > int(attempts[cleanKey(str(line[3]))])):
attempts[cleanKey(str(line[3]))] = int(line[4])
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = SID
if(not title_key in possible.keys()):
possible[title_key] = line[11]
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
examInfo['records'] = records
examInfo['attempts'] = attempts
examInfo['titles'] = titles
examInfo['possible'] = possible
examInfo['names'] = names
return examInfo
def readProjectFile(file):
projectInfo = {}
# If we got a filename (with a .csv extension), process it.
if(file != '' and file.rfind('.csv') != -1):
csvfile = open(file, "rU")
ourDialect = csv.Sniffer().sniff(csvfile.read(2048))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect=ourDialect)
lineNo = 0 # Count lines
records = {} # Full record for each item
attempts = {} # To keep track of highest value of attempts per title
titles = {} # The titles themselves, keyed by a cleaned version.
names = {} # To get a sorted list of names for output
possible = {} # Points possible
percent = {} # Stores percent by title and student ID.
# We need to watch for each new lesson name and also find the largest
# number of attempts for each. This info will be used in creating the
# output table later.
for line in reader:
# Ignore header line and put lines in a dict:
# Lines are of the form:
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10
if lineNo > 0:
# Add this line to the proper student's record (by ID).
if(not str(line[0]) in records.keys()):
records[str(line[0])] = []
records[str(line[0])].append(line)
# Add this project title to the project's record:
title_key = cleanKey(str(line[3]))
if(not title_key in titles.keys()):
key = title_key
attempts[key] = 1
titles[key] = str(line[3])
# If we see a new highest attempt number, that is the new max
# value stored at attempts[projectname].
if(int(line[4]) > int(attempts[cleanKey(str(line[3]))])):
attempts[cleanKey(str(line[3]))] = int(line[4])
if(not str(line[0]) in percent.keys()):
percent[str(line[0])] = {}
percent[str(line[0])][key] = line[9] # Store percent by ID and title.
if(not title_key in possible.keys()):
possible[title_key] = line[8] # Store possible points by title.
# Add this student's name to the names list as a key. Value is
# the ID number (used for alphabetical reverse-mapping).
if(not str(line[1])+str(line[2]) in names.keys()):
key = str(line[1])+str(line[2])
#clean up the name to make a good alphabetize-able key:
key = cleanKey(key)
names[key] = str(line[0])
else:
# The first line is headers.
headers = line
lineNo = lineNo + 1
csvfile.close() # We're done with this file.
projectInfo['records'] = records
projectInfo['titles'] = titles
projectInfo['attempts'] = attempts
projectInfo['percent'] = percent
projectInfo['names'] = names
return projectInfo
def writeCombinedFile(file, lessonInfo, examInfo, projectInfo, takeHighestExam, selectPointsOrCorrect, takeHighestProject, missingScoreMark = "", usePoints=False):
# PRE-PROCESS: Sort the student names list and exam names list:
#First make sure we have the 'names' key in both examInfo and lessonInfo and projectInfo;
if(not 'names' in lessonInfo.keys()):
lessonInfo['names'] = {}
if(not 'names' in examInfo.keys()):
examInfo['names'] = {}
if(not 'names' in projectInfo.keys()):
projectInfo['names'] = {}
#Now do the same for 'titles'
if(not 'titles' in lessonInfo.keys()):
lessonInfo['titles'] = {}
if(not 'titles' in examInfo.keys()):
examInfo['titles'] = {}
if(not 'titles' in projectInfo.keys()):
projectInfo['titles'] = {}
# Merge names from exams and lessons into a single list of names:
d = {}
for k in examInfo['names'].keys(), lessonInfo['names'].keys(), projectInfo['names'].keys():
for x in k:
d[x] = 1
sortedNames = d.keys()
del d
sortedNames.sort()
sortedExamTitles = examInfo['titles'].keys()
sortedExamTitles.sort()
sortedLessonTitles = lessonInfo['titles'].keys()
sortedLessonTitles.sort()
sortedProjectTitles = projectInfo['titles'].keys()
sortedProjectTitles.sort()
# BEGIN OUTPUT PHASE:
# Get the output file going and do output.
if(file != ''):
# Ensure a .csv extension.
if(file.rfind('.csv') == -1):
file = file + '.csv'
# Open the output file:
csvfile = csv.writer(open(file, 'w'), quoting=csv.QUOTE_ALL)
# Input data order (exams)
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# Data for lessons is all in lesson structure:
# titles, names, percent
# percent is stored by [ID][titleKey]
examPctColumn = 12 # Use "percent correct" by default (instructor can edit this one)
if(selectPointsOrCorrect == True):
examPctColumn = 15 # Use "percent points" instead (instructor CANNOT edit this field)
# Output to a new CSV file such that each student (by ID) has a single
# row. Each row has:
# StudentID,LastName,FirstName,Lesson1....LessonN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN
# The first line will be headers. Build them. The headers will
# Depend on the lessons, exams, and number of attempts for each exam.
outputHeaders = []
outputHeaders.append("Student ID")
outputHeaders.append("<NAME>")
outputHeaders.append("<NAME>")
# Lessons first
for key in sortedLessonTitles:
outputHeaders.append(str(lessonInfo['titles'][key]['title']))
# Then projects
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestProject):
while(currentAttempt < nAttempts):
outputHeaders.append(str(projectInfo['titles'][key]) + str(" [Attempt ") \
+ str(currentAttempt + 1) + "]")
currentAttempt += 1
else:
outputHeaders.append(str(projectInfo['titles'][key]))
# Then exams
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestExam):
while(currentAttempt < nAttempts):
outputHeaders.append(str(examInfo['titles'][key]) + str(" [Attempt ") \
+ str(currentAttempt + 1) + "]")
currentAttempt += 1
else:
outputHeaders.append(str(examInfo['titles'][key]))
csvfile.writerow(outputHeaders)
# If we want points-based output, we need a row for max points for each assignment:
if usePoints:
maxPtsRow = []
maxPtsRow.append("Pts. Possible") # SID (use as label)
maxPtsRow.append("") # Last Name (empty cell)
maxPtsRow.append("") # First Name (empty cell)
# Lessons first
for key in sortedLessonTitles:
maxPtsRow.append(str(lessonInfo['possible'][key]))
# Then projects
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestProject):
while(currentAttempt < nAttempts):
maxPtsRow.append(str(projectInfo['possible'][key]))
currentAttempt += 1
else:
maxPtsRow.append(str(projectInfo['possible'][key]))
# Then exams
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 0
if(nAttempts > 1 and not takeHighestExam):
while(currentAttempt < nAttempts):
maxPtsRow.append(str(examInfo['possible'][key]))
currentAttempt += 1
else:
maxPtsRow.append(str(examInfo['possible'][key]))
csvfile.writerow(maxPtsRow)
# For each student (in sorted order), create exactly 1 row:
for name in sortedNames:
outputrow = []
SID = ''
# Each row has:
# StudentID,LastName,FirstName,Lesson1...LessonN,Project1attempt1..attemptN...ProjectNattempt1,...attemptN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN
if(len(examInfo) > 0 and name in examInfo['names'].keys()):
outputrow.append(examInfo['records'][examInfo['names'][name]][0][0])
outputrow.append(examInfo['records'][examInfo['names'][name]][0][1])
outputrow.append(examInfo['records'][examInfo['names'][name]][0][2])
SID = examInfo['records'][examInfo['names'][name]][0][0]
elif(len(projectInfo) > 0 and name in projectInfo['names'].keys()):
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][0])
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][1])
outputrow.append(projectInfo['records'][projectInfo['names'][name]][0][2])
SID = projectInfo['records'][projectInfo['names'][name]][0][0]
elif(len(lessonInfo) > 0):
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][0])
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][1])
outputrow.append(lessonInfo['records'][lessonInfo['names'][name]][0][2])
SID = lessonInfo['records'][lessonInfo['names'][name]][0][0]
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10
# For each lesson, output its percent points (or points):
for key in sortedLessonTitles:
if(SID in lessonInfo['percent'].keys() and key in lessonInfo['percent'][SID].keys()):
scoreKey = 'percent' if not usePoints else 'earned'
outputrow.append(lessonInfo[scoreKey][SID][key])
else:
outputrow.append(missingScoreMark)
# Get the list of Percent Points in order for this Project title:
if(len(projectInfo['titles']) > 0):
ppts = {}
for key in sortedProjectTitles:
ppts[key] = {}
if(name in projectInfo['names'].keys()):
for record in projectInfo['records'][projectInfo['names'][name]]:
scoreKey = 9 if not usePoints else 7
key = cleanKey(record[3])
attempt = record[4]
ppts[key][attempt] = record[scoreKey]
# Now output the PercentPoints field for each project title:
for key in sortedProjectTitles:
nAttempts = projectInfo['attempts'][key]
currentAttempt = 1
if(not takeHighestProject):
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
outputrow.append(ppts[key][str(currentAttempt)])
else:
outputrow.append(missingScoreMark)
currentAttempt += 1
else:
highest = None
currentAttempt = 1
if(str(currentAttempt) in ppts[key].keys() and ppts[key][str(currentAttempt)] != ''):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
# Project attempt scores can be empty, be careful of that:
if(highest == None or (ppts[key][str(currentAttempt)] != '' and float(ppts[key][str(currentAttempt)]) > float(highest))):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
if(highest != None):
if highest == '':
print("Error: blank score for {0}".format(currentAttempt))
outputrow.append(highest)
else:
outputrow.append(missingScoreMark)
#StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status
# 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16
# Get the list of Percent Points in order for this exam title:
if(len(examInfo['titles']) > 0):
ppts = {}
for key in sortedExamTitles:
ppts[key] = {}
if(name in examInfo['names'].keys()):
for record in examInfo['records'][examInfo['names'][name]]:
scoreKey = examPctColumn if not usePoints else 13
key = cleanKey(record[3])
attempt = record[4]
ppts[key][attempt] = record[scoreKey]
if round((float(record[examPctColumn]) / 100.0) * float(record[14])) > ppts[key][attempt]:
print("Error: higher score for ({0} {1} {2}: {3}) according to Percent than for points".format(record[0], record[1], record[2], record[3]))
# Now output the PercentPoints field for each exam title:
for key in sortedExamTitles:
nAttempts = examInfo['attempts'][key]
currentAttempt = 1
if(not takeHighestExam):
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
outputrow.append(ppts[key][str(currentAttempt)])
else:
outputrow.append(missingScoreMark)
currentAttempt += 1
else:
highest = None
currentAttempt = 1
if(str(currentAttempt) in ppts[key].keys()):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
while(currentAttempt <= nAttempts):
if(str(currentAttempt) in ppts[key].keys()):
if(highest == None or float(ppts[key][str(currentAttempt)]) > float(highest)):
highest = ppts[key][str(currentAttempt)]
currentAttempt += 1
if(highest != None):
outputrow.append(highest)
else:
outputrow.append(missingScoreMark)
csvfile.writerow(outputrow)
return True
# If the user doesn't choose an output file, we can't continue.
else:
return False
class SNRParser(Frame):
def __init__(self, master=None):
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
Frame.__init__(self, master)
self.grid()
self.createWidgets()
def reInit(self):
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
self.examNameBox.delete(0,END)
self.lessonNameBox.delete(0,END)
self.projectNameBox.delete(0,END)
self.goButton.configure(state=DISABLED)
def createWidgets(self):
instText = "Choose Exam, Lesson, and/or Project reports below\n"
instText += "then, click \"Generate!\" to create the output\n"
instText += "workbook.\n"
self.lessonFileName = ""
self.examFileName = ""
self.projectFileName = ""
self.instructions = Label(self, text=instText, justify=LEFT)
self.instructions.grid(columnspan=3, row=0)
self.examNameLabel = Label(self, text="Exam Report:")
self.examNameLabel.grid(column=0,row=1,sticky=W)
self.examNameBox = Entry(self)
self.examNameBox.grid(column=1,row=1)
self.getExamNameButton = Button(self, text="Browse", command=self.getExamName)
self.getExamNameButton.grid(column=2, row=1)
self.examTakeHighestAttempt = IntVar()
self.examTakeHighestAttemptCheckbox = Checkbutton(self, text="Keep only the best exam attempt.", variable=self.examTakeHighestAttempt)
self.examTakeHighestAttemptCheckbox.grid(column=0,row=2,sticky=W,padx=25, columnspan=3)
self.usePctPoints = BooleanVar()
self.examUsePctPointsCheckbox = Checkbutton(self, text="Use % Points column not % Correct (DANGER).", variable=self.usePctPoints, command=self.warnPctPoints)
self.examUsePctPointsCheckbox.grid(column=0,row=3,sticky=W,padx=25, columnspan=3)
self.lessonNameLabel = Label(self, text="Lesson Report:")
self.lessonNameLabel.grid(column=0,row=4,sticky=W)
self.lessonNameBox = Entry(self)
self.lessonNameBox.grid(column=1,row=4)
self.getLessonNameButton = Button(self, text="Browse", command=self.getLessonName)
self.getLessonNameButton.grid(column=2,row=4)
self.projectNameLabel = Label(self, text="Project Report:")
self.projectNameLabel.grid(column=0,row=5,sticky=W)
self.projectNameBox = Entry(self)
self.projectNameBox.grid(column=1,row=5)
self.getProjectNameButton = Button(self, text="Browse", command=self.getProjectName)
self.getProjectNameButton.grid(column=2, row=5)
self.projectTakeHighestAttempt = IntVar()
self.projectTakeHighestAttemptCheckbox = Checkbutton(self, text="Keep only the best project attempt.", variable=self.projectTakeHighestAttempt)
self.projectTakeHighestAttemptCheckbox.grid(column=0,row=6,sticky=W,padx=25, columnspan=3)
self.usePoints = BooleanVar()
self.usePointsCheckbox = Checkbutton(self, text="Use points, not percents.", variable=self.usePoints)
self.usePointsCheckbox.grid(column=0,row=7,sticky=W, columnspan=3)
self.missingScoreValueBox = Entry(self, width=10)
self.missingScoreValueBox.grid(column=2,row=8, sticky=W)
self.missingScoreLabel = Label(self, text="Insert this value for missing scores:")
self.missingScoreLabel.grid(column=0,row=8,sticky=W, columnspan=2)
self.goButton = Button ( self, text="Generate!",command=self.generate, state=DISABLED)
self.goButton.grid(columnspan=3, row=9, rowspan=2, sticky=S, pady=15)
def warnPctPoints(self):
if(self.usePctPoints.get() == True):
tkMessageBox.showinfo("Percent Points Warning", "Due to a SimNet bug, using the \"Percent Points\" column may cause manually entered scores not to appear in the final report.")
def getExamName(self):
self.examFileName = getInputFile("Exam Report")
if(self.examFileName != ''):
self.goButton.configure(state=NORMAL)
self.examNameBox.insert(0,os.path.basename(self.examFileName))
else:
self.examNameBox.delete(0,END)
self.examNameBox.update()
def getLessonName(self):
self.lessonFileName = getInputFile("Lesson Report")
if(self.lessonFileName != ''):
self.goButton.configure(state=NORMAL)
self.lessonNameBox.insert(0, os.path.basename(self.lessonFileName))
else:
self.lessonNameBox.delete(0, END)
self.lessonNameBox.update()
def getProjectName(self):
self.projectFileName = getInputFile("Project Report")
if(self.projectFileName != ''):
self.goButton.configure(state=NORMAL)
self.projectNameBox.insert(0, os.path.basename(self.projectFileName))
else:
self.projectNameBox.delete(0, END)
self.projectNameBox.update()
def generate(self):
lessonInfo = readLessonFile(self.lessonFileName)
examInfo = readExamFile(self.examFileName)
projectInfo = readProjectFile(self.projectFileName)
outputFileName = getOutputFile()
if(writeCombinedFile(outputFileName, lessonInfo, examInfo, projectInfo,
self.examTakeHighestAttempt.get(), self.usePctPoints.get(), self.projectTakeHighestAttempt.get(),
self.missingScoreValueBox.get(), self.usePoints.get())):
self.msg = Message(self,text="Finished. Output file generated OK.")
#self.msg.grid()
else:
self.msg = Message(self,text="No output file specified. Cannot continue.")
#self.msg.grid()
if(not tkMessageBox.askyesno("Finished", "Would you like to convert another file set?")):
self.destroy()
exit(0)
else:
self.reInit()
# Main execution:
if __name__ == "__main__":
app = SNRParser()
app.master.title("SimNet Report Parser")
app.mainloop() | en | 0.832416 | #!/usr/bin/env python # # SimNetExamReportParser.py # # <NAME> 2009-2016 <EMAIL> # # Parses a SimNet exam, lesson, and project report (.csv) files and # produces a corresponding .csv file with one line per student, such that # all assignments and attempts for each assignment are listed (grouped by # assignment type and assignment title) on the student's row. # # Usage: # SimNetExamReportParser.py ################################################################################ # getInputFile will show a "File Open" dialog, returning the filename # of the .csv file. # getOutputFile will show a "File Save" dialog, returning the filename # of the .csv file. # Makes a good (easily sorted) key from a string by making it all lower-case, # removing whitespace, and removing periods. # If we got a filename (with a .csv extension), process it. # Count lines # Full record for each item # The titles themselves, keyed by a cleaned version. # To get a sorted list of names for output # Total tasks by title # Points earned by title and student ID. # Stores percent by title and student ID. # We need to watch for each new lesson name and also find the largest # number of attempts for each. This info will be used in creating the # output table later. # Ignore header line and put lines in a dict: # Lines are of the form: #StudentID,LastName,FirstName,Title,Minutes,Date,Date,NumberComplete,TotalTasks,PercentComplete # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 # Add this line to the proper student's record (by ID). # Add this lesson title to the lesson's record: # Lessons only have one attempt... # Store percent by ID and title. # Store earned by ID and title. # Store possible by title # Add this student's name to the names list as a key. Value is # the ID number (used for alphabetical reverse-mapping). #clean up the name to make a good alphabetize-able key: # The first line is headers. # We're done with this file. # If we got a filename (with a .csv extension), process it. # Count lines # Full record for each item # To keep track of highest value of attempts per title # The titles themselves, keyed by a cleaned version. # Points possible, by exam # To get a sorted list of names for output # We need to watch for each new exam name and also find the largest # number of attempts for each. This info will be used in creating the # output table later. # Ignore header line and put lines in a dict: # Lines are of the form: #StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 # For now we ignore the "status"... it seems better to give students the # points they've "partially" earned instead of a zero... # Add this line to the proper student's record (by ID). # Add this exam title to the exam's record: # If we see a new highest attempt number, that is the new max # value stored at attempts[examname]. # Add this student's name to the names list as a key. Value is # the ID number (used for alphabetical reverse-mapping). #clean up the name to make a good alphabetize-able key: # The first line is headers. # We're done with this file. # If we got a filename (with a .csv extension), process it. # Count lines # Full record for each item # To keep track of highest value of attempts per title # The titles themselves, keyed by a cleaned version. # To get a sorted list of names for output # Points possible # Stores percent by title and student ID. # We need to watch for each new lesson name and also find the largest # number of attempts for each. This info will be used in creating the # output table later. # Ignore header line and put lines in a dict: # Lines are of the form: #StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 # Add this line to the proper student's record (by ID). # Add this project title to the project's record: # If we see a new highest attempt number, that is the new max # value stored at attempts[projectname]. # Store percent by ID and title. # Store possible points by title. # Add this student's name to the names list as a key. Value is # the ID number (used for alphabetical reverse-mapping). #clean up the name to make a good alphabetize-able key: # The first line is headers. # We're done with this file. # PRE-PROCESS: Sort the student names list and exam names list: #First make sure we have the 'names' key in both examInfo and lessonInfo and projectInfo; #Now do the same for 'titles' # Merge names from exams and lessons into a single list of names: # BEGIN OUTPUT PHASE: # Get the output file going and do output. # Ensure a .csv extension. # Open the output file: # Input data order (exams) #StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 # Data for lessons is all in lesson structure: # titles, names, percent # percent is stored by [ID][titleKey] # Use "percent correct" by default (instructor can edit this one) # Use "percent points" instead (instructor CANNOT edit this field) # Output to a new CSV file such that each student (by ID) has a single # row. Each row has: # StudentID,LastName,FirstName,Lesson1....LessonN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN # The first line will be headers. Build them. The headers will # Depend on the lessons, exams, and number of attempts for each exam. # Lessons first # Then projects # Then exams # If we want points-based output, we need a row for max points for each assignment: # SID (use as label) # Last Name (empty cell) # First Name (empty cell) # Lessons first # Then projects # Then exams # For each student (in sorted order), create exactly 1 row: # Each row has: # StudentID,LastName,FirstName,Lesson1...LessonN,Project1attempt1..attemptN...ProjectNattempt1,...attemptN,Exam1attempt1...attamptN,...ExamNAttempt1...attemptN #StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,Points,TotalPoints,Percent,Status # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 # For each lesson, output its percent points (or points): # Get the list of Percent Points in order for this Project title: # Now output the PercentPoints field for each project title: # Project attempt scores can be empty, be careful of that: #StudentID,LastName,FirstName,Title,Attempt,Minutes,Date,ExamStarted,ExamSpan(d.hh:mm:ss),ExamEnded,NumberCorrect,TotalQuestions,PercentCorrect,NumberPoints,TotalPoints,PercentPoints,Status # 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 # Get the list of Percent Points in order for this exam title: # Now output the PercentPoints field for each exam title: # If the user doesn't choose an output file, we can't continue. #self.msg.grid() #self.msg.grid() # Main execution: | 3.547354 | 4 |
appyter/ext/socketio/priority_queued_emit.py | MaayanLab/jupyter-template | 0 | 6619020 | <reponame>MaayanLab/jupyter-template
import asyncio
import itertools
import logging
logger = logging.getLogger(__name__)
class PriorityQueuedEmitMixin:
''' A mixin for queuing `emit` calls to get triggered sequentially when `emit_enabled` event is set
'''
async def __aenter__(self):
self._emit_enabled = asyncio.Event()
self._emit_counter = iter(itertools.count())
self._emit_queue = asyncio.PriorityQueue()
self._emit_dispatcher_task = asyncio.create_task(self._emit_dispatcher())
async def __aexit__(self, *args):
if self._emit_queue.qsize() != 0:
logger.warning(f"{self._emit_queue.qsize()} items in queue weren't processed...")
try:
self._emit_dispatcher_task.cancel()
await self._emit_dispatcher_task
except asyncio.CancelledError:
pass
async def _emit_dispatcher(self):
while True:
_, _, args, kwargs = await self._emit_queue.get()
try:
await self._emit_enabled.wait()
await super().emit(*args, **{k:v for k,v in kwargs.items() if v})
except asyncio.CancelledError:
raise
except:
import traceback
logger.error(traceback.format_exc())
finally:
self._emit_queue.task_done()
async def emit(self, evt, data, priority=0, **kwargs):
await self._emit_queue.put((
priority, next(self._emit_counter),
(evt, data),
kwargs,
))
async def disconnect(self):
if self._emit_enabled.is_set():
logger.debug('Ensuring emit queue has been fully processed...')
await self._emit_queue.join()
await super().disconnect()
| import asyncio
import itertools
import logging
logger = logging.getLogger(__name__)
class PriorityQueuedEmitMixin:
''' A mixin for queuing `emit` calls to get triggered sequentially when `emit_enabled` event is set
'''
async def __aenter__(self):
self._emit_enabled = asyncio.Event()
self._emit_counter = iter(itertools.count())
self._emit_queue = asyncio.PriorityQueue()
self._emit_dispatcher_task = asyncio.create_task(self._emit_dispatcher())
async def __aexit__(self, *args):
if self._emit_queue.qsize() != 0:
logger.warning(f"{self._emit_queue.qsize()} items in queue weren't processed...")
try:
self._emit_dispatcher_task.cancel()
await self._emit_dispatcher_task
except asyncio.CancelledError:
pass
async def _emit_dispatcher(self):
while True:
_, _, args, kwargs = await self._emit_queue.get()
try:
await self._emit_enabled.wait()
await super().emit(*args, **{k:v for k,v in kwargs.items() if v})
except asyncio.CancelledError:
raise
except:
import traceback
logger.error(traceback.format_exc())
finally:
self._emit_queue.task_done()
async def emit(self, evt, data, priority=0, **kwargs):
await self._emit_queue.put((
priority, next(self._emit_counter),
(evt, data),
kwargs,
))
async def disconnect(self):
if self._emit_enabled.is_set():
logger.debug('Ensuring emit queue has been fully processed...')
await self._emit_queue.join()
await super().disconnect() | en | 0.871659 | A mixin for queuing `emit` calls to get triggered sequentially when `emit_enabled` event is set | 2.423547 | 2 |
fakecam/fakecam/ui/gstreamer.py | stuartlangridge/fakecam | 2 | 6619021 | from typing import Tuple, Optional
from gi.repository import Gst
from gi.repository import Gtk
def create_gtk_widget() -> Tuple[Optional[Gst.Element], Optional[Gtk.Widget], Optional[str]]:
# gtkglsink = Gst.ElementFactory.make("gtkglsink", None)
# if gtkglsink is not None:
# glsinkbin = Gst.ElementFactory.make("glsinkbin", None)
# if glsinkbin is None:
# return None, None, None
# glsinkbin.set_property("sink", gtkglsink)
# sink = glsinkbin
# widget = gtkglsink.get_property("widget")
# name = "gtkglsink"
# else:
sink = Gst.ElementFactory.make("gtksink", None)
if sink is None:
return None, None, None
widget = sink.get_property("widget")
name = "gtksink"
widget.set_visible(True)
widget.set_property("expand", True)
return sink, widget, name
| from typing import Tuple, Optional
from gi.repository import Gst
from gi.repository import Gtk
def create_gtk_widget() -> Tuple[Optional[Gst.Element], Optional[Gtk.Widget], Optional[str]]:
# gtkglsink = Gst.ElementFactory.make("gtkglsink", None)
# if gtkglsink is not None:
# glsinkbin = Gst.ElementFactory.make("glsinkbin", None)
# if glsinkbin is None:
# return None, None, None
# glsinkbin.set_property("sink", gtkglsink)
# sink = glsinkbin
# widget = gtkglsink.get_property("widget")
# name = "gtkglsink"
# else:
sink = Gst.ElementFactory.make("gtksink", None)
if sink is None:
return None, None, None
widget = sink.get_property("widget")
name = "gtksink"
widget.set_visible(True)
widget.set_property("expand", True)
return sink, widget, name
| en | 0.295969 | # gtkglsink = Gst.ElementFactory.make("gtkglsink", None) # if gtkglsink is not None: # glsinkbin = Gst.ElementFactory.make("glsinkbin", None) # if glsinkbin is None: # return None, None, None # glsinkbin.set_property("sink", gtkglsink) # sink = glsinkbin # widget = gtkglsink.get_property("widget") # name = "gtkglsink" # else: | 2.353735 | 2 |
urls.py | Soul-Code/JustSearchBackend | 2 | 6619022 | from django.urls import path, include
from . import views
app_name = 'JustSearch'
urlpatterns_api = [
path('login', views.login_view, name='login'),
path('new_team', views.new_team, name='newteam'),
path('get_team', views.get_team, name='getteam'),
path('register', views.register, name='register'),
path('get_rank', views.get_rank, name='getrank'),
path('find_team', views.find_team, name='findteam'),
path('join_team', views.join_team, name='join_team'),
path('quit_team', views.quit_team, name='quit_team'),
path('del_team', views.del_team, name='del_team'),
path('logout', views.logout, name='logout'),
path('get_questions', views.get_questions, name='get_questions'),
path('get_questions/<int:page_num>', views.get_questions, name='get_questions'),
path('submit_answer', views.submit_answer, name='submit_answer'),
path('get_stages', views.get_stages, name='get_stages')
]
urlpatterns = [
path('', views.index, name='index'),
path('api/', include(urlpatterns_api)),
# 以上内容是just搜搜的嘤
# 以下内容是迎新晚会的嘤
path('<str:string>', views.index, name='index'),
path('yxwh/<str:txt>', views.yxwh, name='yxwh')
]
| from django.urls import path, include
from . import views
app_name = 'JustSearch'
urlpatterns_api = [
path('login', views.login_view, name='login'),
path('new_team', views.new_team, name='newteam'),
path('get_team', views.get_team, name='getteam'),
path('register', views.register, name='register'),
path('get_rank', views.get_rank, name='getrank'),
path('find_team', views.find_team, name='findteam'),
path('join_team', views.join_team, name='join_team'),
path('quit_team', views.quit_team, name='quit_team'),
path('del_team', views.del_team, name='del_team'),
path('logout', views.logout, name='logout'),
path('get_questions', views.get_questions, name='get_questions'),
path('get_questions/<int:page_num>', views.get_questions, name='get_questions'),
path('submit_answer', views.submit_answer, name='submit_answer'),
path('get_stages', views.get_stages, name='get_stages')
]
urlpatterns = [
path('', views.index, name='index'),
path('api/', include(urlpatterns_api)),
# 以上内容是just搜搜的嘤
# 以下内容是迎新晚会的嘤
path('<str:string>', views.index, name='index'),
path('yxwh/<str:txt>', views.yxwh, name='yxwh')
]
| zh | 0.809841 | # 以上内容是just搜搜的嘤 # 以下内容是迎新晚会的嘤 | 2.076382 | 2 |
core/backend/git/tests.py | Djacket/djacket | 85 | 6619023 | from git.repo import Repo
from git.object import GitObject
from git.statistics import GitStatistics
from git.statistics import DataPresentation
def run_tests():
pass
| from git.repo import Repo
from git.object import GitObject
from git.statistics import GitStatistics
from git.statistics import DataPresentation
def run_tests():
pass
| none | 1 | 1.104741 | 1 | |
app/api/views/news.py | Hackitect/See-the-Light | 2 | 6619024 | <filename>app/api/views/news.py
from flask import Flask, Blueprint, jsonify, request
import requests
news = Blueprint('news', __name__)
app = Flask(__name__)
from app.api.models import news_model
import json
import socket
import requests
# from app import app
# from app.api.v1.models.news_model import Sale
# from app.api.v1.models.store_model import Store
news = Blueprint('news', __name__)
app = Flask(__name__)
@news.route('/')
def hello():
return "This is a project by team STL: sample link(http://127.0.0.1:5000/news/get?link=https://www.bbc.com/news/uk-politics-46155403)"
@news.route('/get', methods = ['GET'])
def post_news_link():
link = request.args.get('link')
print('######## THIS IS THE LINK FROM GET METHOD', link)
response = requests.post('http://newsbreakers.herokuapp.com',
data={"text": link}
# content_type='application/json'
)
print("###########", response.content)
return (response.content)
data= {"text":link}
# content_type='application/json'
)
print("###########", response.content)
return (response.content)
@news.route('/url', methods = ['GET'])
def post_url_link():
link = request.args.get('link')
get_url = (link.split('/'))[2]
IP_addr = socket.gethostbyname(get_url)
Token_Charles = '<KEY>'
Token_Simon = '6ca9c9de-3b1e-4300-b85b-6501bf44717a'
headers = {
"Accept": "application/json",
"X-Auth-Token": Token_Simon,
}
fullip = requests.get('https://api.apility.net/v2.0/' +
IP_addr, headers=headers)
print(fullip)
return fullip.content
| <filename>app/api/views/news.py
from flask import Flask, Blueprint, jsonify, request
import requests
news = Blueprint('news', __name__)
app = Flask(__name__)
from app.api.models import news_model
import json
import socket
import requests
# from app import app
# from app.api.v1.models.news_model import Sale
# from app.api.v1.models.store_model import Store
news = Blueprint('news', __name__)
app = Flask(__name__)
@news.route('/')
def hello():
return "This is a project by team STL: sample link(http://127.0.0.1:5000/news/get?link=https://www.bbc.com/news/uk-politics-46155403)"
@news.route('/get', methods = ['GET'])
def post_news_link():
link = request.args.get('link')
print('######## THIS IS THE LINK FROM GET METHOD', link)
response = requests.post('http://newsbreakers.herokuapp.com',
data={"text": link}
# content_type='application/json'
)
print("###########", response.content)
return (response.content)
data= {"text":link}
# content_type='application/json'
)
print("###########", response.content)
return (response.content)
@news.route('/url', methods = ['GET'])
def post_url_link():
link = request.args.get('link')
get_url = (link.split('/'))[2]
IP_addr = socket.gethostbyname(get_url)
Token_Charles = '<KEY>'
Token_Simon = '6ca9c9de-3b1e-4300-b85b-6501bf44717a'
headers = {
"Accept": "application/json",
"X-Auth-Token": Token_Simon,
}
fullip = requests.get('https://api.apility.net/v2.0/' +
IP_addr, headers=headers)
print(fullip)
return fullip.content
| en | 0.237886 | # from app import app # from app.api.v1.models.news_model import Sale # from app.api.v1.models.store_model import Store ####### THIS IS THE LINK FROM GET METHOD', link) # content_type='application/json' ##########", response.content) # content_type='application/json' ##########", response.content) | 2.807735 | 3 |
tests/test_config.py | ArroyoDev-LLC/vertisee | 2 | 6619025 | <filename>tests/test_config.py
import pytest
from pytest import approx
from threedframe.config import _Config
# at 0.69 scale
computed_vals = [
(
"support_size",
17.53,
),
(
"core_size",
35.56,
),
(
"fixture_shell_thickness",
6.0,
),
(
"fixture_length",
38.1,
),
(
"fixture_size",
20.54,
),
("fixture_hole_size", 17.55),
(
"label_size",
7.15,
),
(
"label_width",
16.65,
),
]
@pytest.mark.parametrize("attr,expect", computed_vals)
def test_config(attr: str, expect: float):
c = _Config(SUPPORT_SCALE=0.69)
assert getattr(c, attr) == approx(expect, rel=1e-2)
| <filename>tests/test_config.py
import pytest
from pytest import approx
from threedframe.config import _Config
# at 0.69 scale
computed_vals = [
(
"support_size",
17.53,
),
(
"core_size",
35.56,
),
(
"fixture_shell_thickness",
6.0,
),
(
"fixture_length",
38.1,
),
(
"fixture_size",
20.54,
),
("fixture_hole_size", 17.55),
(
"label_size",
7.15,
),
(
"label_width",
16.65,
),
]
@pytest.mark.parametrize("attr,expect", computed_vals)
def test_config(attr: str, expect: float):
c = _Config(SUPPORT_SCALE=0.69)
assert getattr(c, attr) == approx(expect, rel=1e-2)
| en | 0.887119 | # at 0.69 scale | 2.434006 | 2 |
adsbxcot/classes.py | joshuafuller/adsbxcot | 18 | 6619026 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ADS-B Exchange Cursor-on-Target Class Definitions."""
import concurrent
import aiohttp
import asyncio
import configparser
import json
import logging
import os
import queue
import random
import threading
import time
import urllib
import pytak
import requests
import aircot
import adsbxcot
__author__ = "<NAME> W2GMD <<EMAIL>>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
class ADSBXWorker(pytak.MessageWorker):
"""Reads ADS-B Exchange Data, renders to CoT, and puts on queue."""
def __init__(self, event_queue: asyncio.Queue, opts):
super().__init__(event_queue)
self.url: urllib.parse.ParseResult = urllib.parse.urlparse(opts.get("ADSBX_URL"))
self.cot_stale = opts.get("COT_STALE")
self.poll_interval: int = int(opts.get("POLL_INTERVAL") or adsbxcot.DEFAULT_POLL_INTERVAL)
self.api_key: str = opts.get("API_KEY")
self.include_tisb = bool(opts.get("INCLUDE_TISB")) or False
self.include_all_craft = bool(opts.get("INCLUDE_ALL_CRAFT")) or False
self.filters = opts.get("FILTERS")
self.known_craft = opts.get("KNOWN_CRAFT")
self.known_craft_key = opts.get("KNOWN_CRAFT_KEY") or "HEX"
self.filter_type = ""
self.known_craft_db = None
async def handle_message(self, aircraft: list) -> None:
"""
Transforms Aircraft ADS-B data to CoT and puts it onto tx queue.
"""
if not isinstance(aircraft, list):
self._logger.warning(
"Invalid aircraft data, should be a Python list.")
return False
if not aircraft:
self._logger.warning("Empty aircraft list")
return False
_lac = len(aircraft)
_acn = 1
for craft in aircraft:
# self._logger.debug("craft=%s", craft)
icao = craft.get("hex", craft.get("icao")).strip().upper()
flight = craft.get("flight", "").strip().upper()
reg = craft.get("r", "").strip().upper()
if "~" in icao and not self.include_tisb:
continue
known_craft = {}
if self.filter_type:
if self.filter_type == "HEX":
filter_key: str = icao
elif self.filter_type == "FLIGHT":
filter_key: str = flight
elif self.filter_type == "REG":
filter_key: str = reg
else:
filter_key: str = ""
# self._logger.debug("filter_key=%s", filter_key)
if self.known_craft_db and filter_key:
known_craft = (list(filter(
lambda x: x[self.known_craft_key].strip().upper() == filter_key, self.known_craft_db)) or
[{}])[0]
# self._logger.debug("known_craft='%s'", known_craft)
elif filter_key:
if "include" in self.filters[self.filter_type] and filter_key not in self.filters.get(filter_type,
"include"):
continue
if "exclude" in self.filters[self.filter_type] and filter_key in self.filters.get(filter_type,
"exclude"):
continue
# If we're using a known_craft csv and this craft wasn't found, skip:
if self.known_craft_db and not known_craft and not self.include_all_craft:
continue
event = adsbxcot.adsbx_to_cot(
craft,
stale=self.cot_stale,
known_craft=known_craft
)
if not event:
self._logger.debug(f"Empty CoT Event for craft={craft}")
_acn += 1
continue
self._logger.debug(
"Handling %s/%s ICAO: %s Flight: %s Category: %s",
_acn,
_lac,
craft.get("hex"),
craft.get("flight"),
craft.get("category")
)
await self._put_event_queue(event)
_acn += 1
async def _get_adsbx_feed(self):
# Support for either direct ADSBX API, or RapidAPI
if "rapidapi" in self.url.geturl():
headers = {
"x-rapidapi-key": self.api_key,
"x-rapidapi-host": "adsbexchange-com1.p.rapidapi.com"
}
else:
headers = {"api-auth": self.api_key}
async with aiohttp.ClientSession() as session:
response = await session.request(
method="GET",
url=self.url.geturl(),
headers=headers
)
response.raise_for_status()
json_resp = await response.json()
aircraft = json_resp.get("ac")
self._logger.debug("Retrieved %s aircraft", len(aircraft))
await self.handle_message(aircraft)
async def run(self):
"""Runs this Thread, Reads from Pollers."""
self._logger.info(
"Running ADSBXWorker with URL '%s'", self.url.geturl())
if self.known_craft is not None:
self._logger.info("Using KNOWN_CRAFT File: '%s'", self.known_craft)
self.known_craft_db = aircot.read_known_craft(self.known_craft)
self.filters = configparser.ConfigParser()
self.filters.add_section(self.known_craft_key)
self.filters[self.known_craft_key]["include"] = \
str([x[self.known_craft_key].strip().upper() for x in self.known_craft_db])
if self.filters or self.known_craft_db:
filter_src = self.filters or self.known_craft_key
self._logger.debug("filter_src=%s", filter_src)
if filter_src:
if "HEX" in filter_src:
self.filter_type = "HEX"
elif "FLIGHT" in filter_src:
self.filter_type = "FLIGHT"
elif "REG" in filter_src:
self.filter_type = "REG"
self._logger.debug("filter_type=%s", self.filter_type)
while 1:
await self._get_adsbx_feed()
await asyncio.sleep(self.poll_interval)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ADS-B Exchange Cursor-on-Target Class Definitions."""
import concurrent
import aiohttp
import asyncio
import configparser
import json
import logging
import os
import queue
import random
import threading
import time
import urllib
import pytak
import requests
import aircot
import adsbxcot
__author__ = "<NAME> W2GMD <<EMAIL>>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
class ADSBXWorker(pytak.MessageWorker):
"""Reads ADS-B Exchange Data, renders to CoT, and puts on queue."""
def __init__(self, event_queue: asyncio.Queue, opts):
super().__init__(event_queue)
self.url: urllib.parse.ParseResult = urllib.parse.urlparse(opts.get("ADSBX_URL"))
self.cot_stale = opts.get("COT_STALE")
self.poll_interval: int = int(opts.get("POLL_INTERVAL") or adsbxcot.DEFAULT_POLL_INTERVAL)
self.api_key: str = opts.get("API_KEY")
self.include_tisb = bool(opts.get("INCLUDE_TISB")) or False
self.include_all_craft = bool(opts.get("INCLUDE_ALL_CRAFT")) or False
self.filters = opts.get("FILTERS")
self.known_craft = opts.get("KNOWN_CRAFT")
self.known_craft_key = opts.get("KNOWN_CRAFT_KEY") or "HEX"
self.filter_type = ""
self.known_craft_db = None
async def handle_message(self, aircraft: list) -> None:
"""
Transforms Aircraft ADS-B data to CoT and puts it onto tx queue.
"""
if not isinstance(aircraft, list):
self._logger.warning(
"Invalid aircraft data, should be a Python list.")
return False
if not aircraft:
self._logger.warning("Empty aircraft list")
return False
_lac = len(aircraft)
_acn = 1
for craft in aircraft:
# self._logger.debug("craft=%s", craft)
icao = craft.get("hex", craft.get("icao")).strip().upper()
flight = craft.get("flight", "").strip().upper()
reg = craft.get("r", "").strip().upper()
if "~" in icao and not self.include_tisb:
continue
known_craft = {}
if self.filter_type:
if self.filter_type == "HEX":
filter_key: str = icao
elif self.filter_type == "FLIGHT":
filter_key: str = flight
elif self.filter_type == "REG":
filter_key: str = reg
else:
filter_key: str = ""
# self._logger.debug("filter_key=%s", filter_key)
if self.known_craft_db and filter_key:
known_craft = (list(filter(
lambda x: x[self.known_craft_key].strip().upper() == filter_key, self.known_craft_db)) or
[{}])[0]
# self._logger.debug("known_craft='%s'", known_craft)
elif filter_key:
if "include" in self.filters[self.filter_type] and filter_key not in self.filters.get(filter_type,
"include"):
continue
if "exclude" in self.filters[self.filter_type] and filter_key in self.filters.get(filter_type,
"exclude"):
continue
# If we're using a known_craft csv and this craft wasn't found, skip:
if self.known_craft_db and not known_craft and not self.include_all_craft:
continue
event = adsbxcot.adsbx_to_cot(
craft,
stale=self.cot_stale,
known_craft=known_craft
)
if not event:
self._logger.debug(f"Empty CoT Event for craft={craft}")
_acn += 1
continue
self._logger.debug(
"Handling %s/%s ICAO: %s Flight: %s Category: %s",
_acn,
_lac,
craft.get("hex"),
craft.get("flight"),
craft.get("category")
)
await self._put_event_queue(event)
_acn += 1
async def _get_adsbx_feed(self):
# Support for either direct ADSBX API, or RapidAPI
if "rapidapi" in self.url.geturl():
headers = {
"x-rapidapi-key": self.api_key,
"x-rapidapi-host": "adsbexchange-com1.p.rapidapi.com"
}
else:
headers = {"api-auth": self.api_key}
async with aiohttp.ClientSession() as session:
response = await session.request(
method="GET",
url=self.url.geturl(),
headers=headers
)
response.raise_for_status()
json_resp = await response.json()
aircraft = json_resp.get("ac")
self._logger.debug("Retrieved %s aircraft", len(aircraft))
await self.handle_message(aircraft)
async def run(self):
"""Runs this Thread, Reads from Pollers."""
self._logger.info(
"Running ADSBXWorker with URL '%s'", self.url.geturl())
if self.known_craft is not None:
self._logger.info("Using KNOWN_CRAFT File: '%s'", self.known_craft)
self.known_craft_db = aircot.read_known_craft(self.known_craft)
self.filters = configparser.ConfigParser()
self.filters.add_section(self.known_craft_key)
self.filters[self.known_craft_key]["include"] = \
str([x[self.known_craft_key].strip().upper() for x in self.known_craft_db])
if self.filters or self.known_craft_db:
filter_src = self.filters or self.known_craft_key
self._logger.debug("filter_src=%s", filter_src)
if filter_src:
if "HEX" in filter_src:
self.filter_type = "HEX"
elif "FLIGHT" in filter_src:
self.filter_type = "FLIGHT"
elif "REG" in filter_src:
self.filter_type = "REG"
self._logger.debug("filter_type=%s", self.filter_type)
while 1:
await self._get_adsbx_feed()
await asyncio.sleep(self.poll_interval) | en | 0.665197 | #!/usr/bin/env python # -*- coding: utf-8 -*- ADS-B Exchange Cursor-on-Target Class Definitions. Reads ADS-B Exchange Data, renders to CoT, and puts on queue. Transforms Aircraft ADS-B data to CoT and puts it onto tx queue. # self._logger.debug("craft=%s", craft) # self._logger.debug("filter_key=%s", filter_key) # self._logger.debug("known_craft='%s'", known_craft) # If we're using a known_craft csv and this craft wasn't found, skip: # Support for either direct ADSBX API, or RapidAPI Runs this Thread, Reads from Pollers. | 2.362239 | 2 |
ratings/tests.py | asm3ft/cs3240-quickthooters | 0 | 6619027 | <filename>ratings/tests.py<gh_stars>0
from django.test import TestCase
from django.test import RequestFactory, TestCase
from .views import HomePageView
from django.contrib.auth.models import User
# class LoginViewsTestCase(TestCase):
# def setUp(self):
# # Every test needs access to the request factory.
# self.factory = RequestFactory()
# self.user = User.objects.create_user(
# username='jacob', email='jacob@…', password='<PASSWORD>')
# def test_homepage_get(self):
# request = self.factory.get("")
# request.user = self.user
# response = HomePageView.as_view()(request)
# self.assertEquals(response.status_code, 200) | <filename>ratings/tests.py<gh_stars>0
from django.test import TestCase
from django.test import RequestFactory, TestCase
from .views import HomePageView
from django.contrib.auth.models import User
# class LoginViewsTestCase(TestCase):
# def setUp(self):
# # Every test needs access to the request factory.
# self.factory = RequestFactory()
# self.user = User.objects.create_user(
# username='jacob', email='jacob@…', password='<PASSWORD>')
# def test_homepage_get(self):
# request = self.factory.get("")
# request.user = self.user
# response = HomePageView.as_view()(request)
# self.assertEquals(response.status_code, 200) | en | 0.527769 | # class LoginViewsTestCase(TestCase): # def setUp(self): # # Every test needs access to the request factory. # self.factory = RequestFactory() # self.user = User.objects.create_user( # username='jacob', email='jacob@…', password='<PASSWORD>') # def test_homepage_get(self): # request = self.factory.get("") # request.user = self.user # response = HomePageView.as_view()(request) # self.assertEquals(response.status_code, 200) | 2.31831 | 2 |
api/queries/vs_blueprint.py | Rafaelyot/slicer-catalogue | 0 | 6619028 | <gh_stars>0
import api.queries.vs_descriptor as vs_descriptor_queries
import uuid
from bson import ObjectId
from mongoengine.queryset.visitor import Q
from api.models.ns_descriptor import Nsd
from api.models.vnf import Vnfd
from api.models.vs_blueprint import VsBlueprintInfo, VsBlueprint, VsdNsdTranslationRule, VsbActions
from api.models.ns_template import Nst
from api.exceptions.exceptions import MalFormedException, FailedOperationException, AlreadyExistingEntityException, \
NotExistingEntityException
from api.exceptions.utils import exception_message_elements
from api.queries.utils import transaction, extract_file, download_file, get_json_in_folder, file_exists, move_file, \
remove_file_and_folder, convert_all_fields_to_snake, aggregate_transactions
from copy import deepcopy
from api.serializers.utils import pyangbind_load
from api.serializers.vnf import etsi_nfv_vnfd
from api.serializers.ns_descriptor import etsi_nfv_nsd
# noinspection PyBroadException
def _post_process_vsb(original_vs_blueprint_info, tenant_id):
target_vs_blueprint_info = deepcopy(original_vs_blueprint_info)
target_vs_blueprint_info.vs_blueprint = original_vs_blueprint_info.vs_blueprint
target_vs_blueprint_info.active_vsd_id = []
for id_ in original_vs_blueprint_info.active_vsd_id:
try:
target_vs_blueprint_info.active_vsd_id.append(vs_descriptor_queries.get_vs_descriptors(tenant_id, id_)[0])
except Exception:
continue
return target_vs_blueprint_info
# noinspection PyTypeChecker
def get_vs_blueprints(vsb_id=None, vsb_name=None, vsb_version=None, tenant_id=None, with_translation_rules=False):
arguments = locals()
arguments.pop('with_translation_rules', None)
parameters_size = len(dict(filter(lambda a: a[-1] is not None, arguments.items())))
if parameters_size == 1 and (vsb_id is not None):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
vsbi.vs_blueprint = VsBlueprint.get_or_404(blueprint_id=vsb_id)
if with_translation_rules:
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vsb_id)
return [vsbi]
elif parameters_size == 1 and (tenant_id is not None):
vsbi_list = []
for vsbi in VsBlueprintInfo.objects.all():
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsbi.name, version=vsbi.vs_blueprint_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
vsbi_list.append(_post_process_vsb(vsbi, tenant_id))
return vsbi_list
elif parameters_size == 2 and (vsb_name is not None) and (vsb_version is not None):
vsbi = VsBlueprintInfo.get_or_404(name=vsb_name, vs_blueprint_version=vsb_version)
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsb_name, version=vsb_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
return [vsbi]
elif parameters_size == 2 and (vsb_id is not None) and (tenant_id is not None):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
vsbi.vs_blueprint = VsBlueprint.get_or_404(blueprint_id=vsb_id)
if with_translation_rules:
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vsb_id)
return [_post_process_vsb(vsbi, tenant_id)]
elif parameters_size == 0:
all_vsbi = VsBlueprintInfo.objects.all()
for vsbi in all_vsbi:
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsbi.name, version=vsbi.vs_blueprint_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
return all_vsbi
raise MalFormedException()
def delete_vs_blueprint(vsb_id):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
if len(vsbi.active_vsd_id) > 0:
raise FailedOperationException("There are some VSDs associated to the VS Blueprint. Impossible to remove it.")
def delete_callback(session):
VsBlueprintInfo.get_collection().delete_one({
"vs_blueprint_id": vsb_id
}, session=session)
VsBlueprint.get_collection().delete_one({
"blueprint_id": vsb_id
}, session=session)
transaction(delete_callback)
def _store_vnfd(vnf, vnfd):
vnfd_id, vnfd_version = vnfd.pop('id', None), vnf.get('version')
if Vnfd.objects.filter((Q(vnfd_id=vnfd) & Q(version=vnfd_version)) |
(Q(name=vnf.get('name')) & Q(provider=vnf.get('provider')) &
Q(version=vnf.get('version')))).count() > 0:
raise AlreadyExistingEntityException(f"Vnfd with vnfdId: {vnfd.vndf_id} already present in DB")
vnfd['vnfd_id'] = vnfd_id
return vnfd
def _onboard_vnf_package(vnf):
downloaded_file = download_file(vnf.get('vnf_package_path'), str(uuid.uuid4()))
folder = extract_file(downloaded_file)
json_content = get_json_in_folder(folder)
if file_exists(f'{folder}/cloud-config.txt'):
# need to not delete cloud init
move_file(f'{folder}/cloud-config.txt')
remove_file_and_folder(downloaded_file, folder)
vnfd = pyangbind_load(etsi_nfv_vnfd(), json_content, "Invalid content for Vnfd object").get('etsi-nfv-vnfd:vnfd')
if vnfd is None:
raise MalFormedException('VNFD for onboarding is empty')
return _store_vnfd(vnf, convert_all_fields_to_snake(vnfd))
def _on_board_ns_template(nst, nsds, vnf_packages):
nsds = [] if nsds is None else nsds
vnf_packages = [] if vnf_packages is None else vnf_packages
# Vnf Packages
all_vnfd_data = []
for vnf in vnf_packages:
try:
vnfd_data = _onboard_vnf_package(vnf)
all_vnfd_data.append(vnfd_data)
except AlreadyExistingEntityException:
continue
transaction_data = []
if len(all_vnfd_data) > 0:
transaction_data += [
{
'collection': Vnfd.get_collection(),
'operation': 'insert_many',
'args': (all_vnfd_data,)
}
]
# Nsds
all_nsd_data = []
for nsd in nsds:
try:
nsd_data = convert_all_fields_to_snake(nsd)
all_nsd_data.append(nsd_data)
except AlreadyExistingEntityException:
continue
if len(all_nsd_data) > 0:
transaction_data += [
{
'collection': Nsd.get_collection(),
'operation': 'insert_many',
'args': (all_nsd_data,)
}
]
nst_name, nst_version, nst_id = nst.get('nst_name'), nst.get('nst_version'), nst.get('nst_id')
if Nst.objects.filter((Q(nst_name=nst_name) & Q(nst_version=nst_version)) | Q(nst_id=nst_id)).count() > 0:
raise AlreadyExistingEntityException(
f"NsTemplate with name {nst_name} and version {nst_version} or ID exists")
if len(nst) > 0:
transaction_data += [
{
'collection': Nst.get_collection(),
'operation': 'insert_one',
'args': (nst,)
}
]
return transaction_data
def _process_ns_descriptor_onboarding(data):
nsts, nsds, vnf_packages = data.get('nsts', []), data.get('nsds', []), data.get('vnf_packages', [])
if len(nsts) == 0 and len(nsds) == 0 and len(vnf_packages) == 0:
return
transaction_data = []
if len(nsts) > 0:
transaction_data += _on_board_ns_template(nsts[0], nsds, vnf_packages)
for nst in nsts[1:]:
transaction_data += _on_board_ns_template(nst, None, None)
return transaction_data
def _create_vs_blueprint(data):
transaction_data = _process_ns_descriptor_onboarding(data)
if transaction_data is None:
transaction_data = []
vs_blueprint = data.get('vs_blueprint', {})
name, version, owner = vs_blueprint.get('name'), vs_blueprint.get('version'), data.get('owner')
if VsBlueprintInfo.objects.filter(name=name, vs_blueprint_version=version).count() > 0 or \
VsBlueprint.objects.filter(name=name, version=version).count() > 0:
class_name, args = exception_message_elements(VsBlueprint, name=name, version=version)
raise AlreadyExistingEntityException(f"{class_name} with {args} already present in DB")
_id = ObjectId()
data['_id'] = _id
vs_blueprint_id = vs_blueprint['blueprint_id'] = str(_id)
translation_rules = data.get('translation_rules', [])
for translation_rule in translation_rules:
translation_rule['blueprint_id'] = vs_blueprint_id
transaction_data += [
{
'collection': VsBlueprint.get_collection(),
'operation': 'insert_one',
'args': (data.get('vs_blueprint'),)
},
{
'collection': VsBlueprintInfo.get_collection(),
'operation': 'insert_one',
'args': ({
'vs_blueprint_id': vs_blueprint_id,
'vs_blueprint_version': version,
'name': name,
'owner': owner
},)
}
]
if len(translation_rules) > 0:
transaction_data += [{
'collection': VsdNsdTranslationRule.get_collection(),
'operation': 'insert_many',
'args': (translation_rules,)
}]
available_actions = data.get('available_actions', [])
for available_action in available_actions:
available_action['blueprint_id'] = vs_blueprint_id
if len(available_actions) > 0:
transaction_data += [{
'collection': VsbActions.get_collection(),
'operation': 'insert_many',
'args': (available_actions,)
}]
return vs_blueprint_id, transaction_data
def create_vs_blueprint(data):
vs_blueprint_id, transaction_data = _create_vs_blueprint(data)
transaction(aggregate_transactions(transaction_data))
return vs_blueprint_id
def get_nst():
return Nst.objects.all()
def delete_nst(nst_id):
Nst.get_or_404(nst_id=nst_id)
def delete_callback(session):
Nst.get_collection().delete_one({
"nst_id": nst_id
}, session=session)
transaction(delete_callback)
| import api.queries.vs_descriptor as vs_descriptor_queries
import uuid
from bson import ObjectId
from mongoengine.queryset.visitor import Q
from api.models.ns_descriptor import Nsd
from api.models.vnf import Vnfd
from api.models.vs_blueprint import VsBlueprintInfo, VsBlueprint, VsdNsdTranslationRule, VsbActions
from api.models.ns_template import Nst
from api.exceptions.exceptions import MalFormedException, FailedOperationException, AlreadyExistingEntityException, \
NotExistingEntityException
from api.exceptions.utils import exception_message_elements
from api.queries.utils import transaction, extract_file, download_file, get_json_in_folder, file_exists, move_file, \
remove_file_and_folder, convert_all_fields_to_snake, aggregate_transactions
from copy import deepcopy
from api.serializers.utils import pyangbind_load
from api.serializers.vnf import etsi_nfv_vnfd
from api.serializers.ns_descriptor import etsi_nfv_nsd
# noinspection PyBroadException
def _post_process_vsb(original_vs_blueprint_info, tenant_id):
target_vs_blueprint_info = deepcopy(original_vs_blueprint_info)
target_vs_blueprint_info.vs_blueprint = original_vs_blueprint_info.vs_blueprint
target_vs_blueprint_info.active_vsd_id = []
for id_ in original_vs_blueprint_info.active_vsd_id:
try:
target_vs_blueprint_info.active_vsd_id.append(vs_descriptor_queries.get_vs_descriptors(tenant_id, id_)[0])
except Exception:
continue
return target_vs_blueprint_info
# noinspection PyTypeChecker
def get_vs_blueprints(vsb_id=None, vsb_name=None, vsb_version=None, tenant_id=None, with_translation_rules=False):
arguments = locals()
arguments.pop('with_translation_rules', None)
parameters_size = len(dict(filter(lambda a: a[-1] is not None, arguments.items())))
if parameters_size == 1 and (vsb_id is not None):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
vsbi.vs_blueprint = VsBlueprint.get_or_404(blueprint_id=vsb_id)
if with_translation_rules:
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vsb_id)
return [vsbi]
elif parameters_size == 1 and (tenant_id is not None):
vsbi_list = []
for vsbi in VsBlueprintInfo.objects.all():
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsbi.name, version=vsbi.vs_blueprint_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
vsbi_list.append(_post_process_vsb(vsbi, tenant_id))
return vsbi_list
elif parameters_size == 2 and (vsb_name is not None) and (vsb_version is not None):
vsbi = VsBlueprintInfo.get_or_404(name=vsb_name, vs_blueprint_version=vsb_version)
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsb_name, version=vsb_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
return [vsbi]
elif parameters_size == 2 and (vsb_id is not None) and (tenant_id is not None):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
vsbi.vs_blueprint = VsBlueprint.get_or_404(blueprint_id=vsb_id)
if with_translation_rules:
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vsb_id)
return [_post_process_vsb(vsbi, tenant_id)]
elif parameters_size == 0:
all_vsbi = VsBlueprintInfo.objects.all()
for vsbi in all_vsbi:
vsbi.vs_blueprint = VsBlueprint.get_or_404(name=vsbi.name, version=vsbi.vs_blueprint_version)
if with_translation_rules:
vs_blueprint_id = vsbi.vs_blueprint.blueprint_id
vsbi.vs_blueprint.translation_rules = VsdNsdTranslationRule.objects.filter(blueprint_id=vs_blueprint_id)
return all_vsbi
raise MalFormedException()
def delete_vs_blueprint(vsb_id):
vsbi = VsBlueprintInfo.get_or_404(vs_blueprint_id=vsb_id)
if len(vsbi.active_vsd_id) > 0:
raise FailedOperationException("There are some VSDs associated to the VS Blueprint. Impossible to remove it.")
def delete_callback(session):
VsBlueprintInfo.get_collection().delete_one({
"vs_blueprint_id": vsb_id
}, session=session)
VsBlueprint.get_collection().delete_one({
"blueprint_id": vsb_id
}, session=session)
transaction(delete_callback)
def _store_vnfd(vnf, vnfd):
vnfd_id, vnfd_version = vnfd.pop('id', None), vnf.get('version')
if Vnfd.objects.filter((Q(vnfd_id=vnfd) & Q(version=vnfd_version)) |
(Q(name=vnf.get('name')) & Q(provider=vnf.get('provider')) &
Q(version=vnf.get('version')))).count() > 0:
raise AlreadyExistingEntityException(f"Vnfd with vnfdId: {vnfd.vndf_id} already present in DB")
vnfd['vnfd_id'] = vnfd_id
return vnfd
def _onboard_vnf_package(vnf):
downloaded_file = download_file(vnf.get('vnf_package_path'), str(uuid.uuid4()))
folder = extract_file(downloaded_file)
json_content = get_json_in_folder(folder)
if file_exists(f'{folder}/cloud-config.txt'):
# need to not delete cloud init
move_file(f'{folder}/cloud-config.txt')
remove_file_and_folder(downloaded_file, folder)
vnfd = pyangbind_load(etsi_nfv_vnfd(), json_content, "Invalid content for Vnfd object").get('etsi-nfv-vnfd:vnfd')
if vnfd is None:
raise MalFormedException('VNFD for onboarding is empty')
return _store_vnfd(vnf, convert_all_fields_to_snake(vnfd))
def _on_board_ns_template(nst, nsds, vnf_packages):
nsds = [] if nsds is None else nsds
vnf_packages = [] if vnf_packages is None else vnf_packages
# Vnf Packages
all_vnfd_data = []
for vnf in vnf_packages:
try:
vnfd_data = _onboard_vnf_package(vnf)
all_vnfd_data.append(vnfd_data)
except AlreadyExistingEntityException:
continue
transaction_data = []
if len(all_vnfd_data) > 0:
transaction_data += [
{
'collection': Vnfd.get_collection(),
'operation': 'insert_many',
'args': (all_vnfd_data,)
}
]
# Nsds
all_nsd_data = []
for nsd in nsds:
try:
nsd_data = convert_all_fields_to_snake(nsd)
all_nsd_data.append(nsd_data)
except AlreadyExistingEntityException:
continue
if len(all_nsd_data) > 0:
transaction_data += [
{
'collection': Nsd.get_collection(),
'operation': 'insert_many',
'args': (all_nsd_data,)
}
]
nst_name, nst_version, nst_id = nst.get('nst_name'), nst.get('nst_version'), nst.get('nst_id')
if Nst.objects.filter((Q(nst_name=nst_name) & Q(nst_version=nst_version)) | Q(nst_id=nst_id)).count() > 0:
raise AlreadyExistingEntityException(
f"NsTemplate with name {nst_name} and version {nst_version} or ID exists")
if len(nst) > 0:
transaction_data += [
{
'collection': Nst.get_collection(),
'operation': 'insert_one',
'args': (nst,)
}
]
return transaction_data
def _process_ns_descriptor_onboarding(data):
nsts, nsds, vnf_packages = data.get('nsts', []), data.get('nsds', []), data.get('vnf_packages', [])
if len(nsts) == 0 and len(nsds) == 0 and len(vnf_packages) == 0:
return
transaction_data = []
if len(nsts) > 0:
transaction_data += _on_board_ns_template(nsts[0], nsds, vnf_packages)
for nst in nsts[1:]:
transaction_data += _on_board_ns_template(nst, None, None)
return transaction_data
def _create_vs_blueprint(data):
transaction_data = _process_ns_descriptor_onboarding(data)
if transaction_data is None:
transaction_data = []
vs_blueprint = data.get('vs_blueprint', {})
name, version, owner = vs_blueprint.get('name'), vs_blueprint.get('version'), data.get('owner')
if VsBlueprintInfo.objects.filter(name=name, vs_blueprint_version=version).count() > 0 or \
VsBlueprint.objects.filter(name=name, version=version).count() > 0:
class_name, args = exception_message_elements(VsBlueprint, name=name, version=version)
raise AlreadyExistingEntityException(f"{class_name} with {args} already present in DB")
_id = ObjectId()
data['_id'] = _id
vs_blueprint_id = vs_blueprint['blueprint_id'] = str(_id)
translation_rules = data.get('translation_rules', [])
for translation_rule in translation_rules:
translation_rule['blueprint_id'] = vs_blueprint_id
transaction_data += [
{
'collection': VsBlueprint.get_collection(),
'operation': 'insert_one',
'args': (data.get('vs_blueprint'),)
},
{
'collection': VsBlueprintInfo.get_collection(),
'operation': 'insert_one',
'args': ({
'vs_blueprint_id': vs_blueprint_id,
'vs_blueprint_version': version,
'name': name,
'owner': owner
},)
}
]
if len(translation_rules) > 0:
transaction_data += [{
'collection': VsdNsdTranslationRule.get_collection(),
'operation': 'insert_many',
'args': (translation_rules,)
}]
available_actions = data.get('available_actions', [])
for available_action in available_actions:
available_action['blueprint_id'] = vs_blueprint_id
if len(available_actions) > 0:
transaction_data += [{
'collection': VsbActions.get_collection(),
'operation': 'insert_many',
'args': (available_actions,)
}]
return vs_blueprint_id, transaction_data
def create_vs_blueprint(data):
vs_blueprint_id, transaction_data = _create_vs_blueprint(data)
transaction(aggregate_transactions(transaction_data))
return vs_blueprint_id
def get_nst():
return Nst.objects.all()
def delete_nst(nst_id):
Nst.get_or_404(nst_id=nst_id)
def delete_callback(session):
Nst.get_collection().delete_one({
"nst_id": nst_id
}, session=session)
transaction(delete_callback) | en | 0.593668 | # noinspection PyBroadException # noinspection PyTypeChecker # need to not delete cloud init # Vnf Packages # Nsds | 1.586444 | 2 |
photo/migrations/0003_image.py | Ken-mbira/PHOTO_BOOK | 0 | 6619029 | # Generated by Django 3.2.8 on 2021-10-06 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('photo', '0002_location'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('date_taken', models.DateTimeField()),
('descriptions', models.TextField(blank=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photo.category')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photo.location')),
],
),
]
| # Generated by Django 3.2.8 on 2021-10-06 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('photo', '0002_location'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('date_taken', models.DateTimeField()),
('descriptions', models.TextField(blank=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photo.category')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photo.location')),
],
),
]
| en | 0.926087 | # Generated by Django 3.2.8 on 2021-10-06 16:39 | 1.771764 | 2 |
hello_world.py | felixbillie/learning-python | 0 | 6619030 | <gh_stars>0
print("Hello World Using Python Programming Language! ") | print("Hello World Using Python Programming Language! ") | none | 1 | 2.063202 | 2 | |
testsuite/ui/views/admin/audience/support_emails.py | dlaso99/3scale-tests | 5 | 6619031 | """View representations of Email pages"""
from widgetastic.widget import TextInput
from testsuite.ui.views.admin.audience import BaseAudienceView
class SupportEmailsView(BaseAudienceView):
"""View representation of Support Emails page"""
path_pattern = '/site/emails/edit'
support_email = TextInput(id="account_support_email")
def prerequisite(self):
return BaseAudienceView
@property
def is_displayed(self):
return BaseAudienceView.is_displayed.fget(self) and self.support_email.is_displayed \
and self.path in self.browser.url
| """View representations of Email pages"""
from widgetastic.widget import TextInput
from testsuite.ui.views.admin.audience import BaseAudienceView
class SupportEmailsView(BaseAudienceView):
"""View representation of Support Emails page"""
path_pattern = '/site/emails/edit'
support_email = TextInput(id="account_support_email")
def prerequisite(self):
return BaseAudienceView
@property
def is_displayed(self):
return BaseAudienceView.is_displayed.fget(self) and self.support_email.is_displayed \
and self.path in self.browser.url
| en | 0.833515 | View representations of Email pages View representation of Support Emails page | 2.438231 | 2 |
src/libminutaria/libminutaria.py | Locynaeh/minutaria | 1 | 6619032 | #!/usr/bin/env python3
"""
libminutaria
============
:Authors:
Locynaeh
:Version:
1.0
Provide a library allowing to create timers and presets managed by a JSON file
and an integrable CLI to manage both.
This script is directly usable in a terminal. Use -h/--help arguments for more
information on how to use the CLI provided.
This file can also be imported as a module.
Classes
-------
Timer
Launch a given timer and provide utilies to manage it.
Preset
Initiate a virtual preset to perform operations on it : add tp a JSON
file, get, delete, rename, change duration.
Functions
---------
minutaria_cli
Manage the CLI interface and correctness of user inputs.
logger
Return a console logger.
"""
__all__ = ["__version__",
"Timer",
"Preset",
"logger",
"get_cli_args",
"handle_cli_args"
]
import logging
from datetime import datetime, timedelta
import argparse
import json
class Timer:
"""
Simple timer printing as HH:MM:SS.n
Allow to launch a given timer, check remaining time before 00:00:00, check
wether timing is reached and get the current timing along the process.
Attributes
----------
_base: datetime
The time at timer launch to be kept as a comparison base to
calculate the time passed
_actualization: datetime
The current time to be updated along the timer
_delta: timedelta
The timer duration
_actualized_delta: timedelta
The actualized duration according to time passed to be updated along
the timer
get_timing: str
The actual remaining time to reach 00:00:00 for a launched timer.
Public methods
--------------
is_timing_reached
Check if timing reached 00:00:00.
continue_after_pause
Actualize timer parameters to continue timing after a pause.
"""
def __init__(self, hours: int = 0, minutes: int = 0, seconds: int = 0):
"""Create and launch a given timer.
Parameters
----------
hours: int
The hours quantity of the timer
minutes: int
The minutes quantity of the timer
seconds: int
The seconds quantity of the timer
"""
self._base = datetime.now()
self._actualization = datetime(self._base.year,
self._base.month,
self._base.day,
self._base.hour,
self._base.minute,
self._base.second,
self._base.microsecond)
self._delta = timedelta(hours=+hours,
minutes=+minutes,
seconds=+seconds)
self._actualized_delta = timedelta(hours=+hours,
minutes=+minutes,
seconds=+seconds)
def _convert_delta_to_datetime(self) -> datetime:
"""Convert the base timedelta object to a datetime object allowing
arithmetic on it.
Returns
-------
datetime
Exact point of time to reach 00:00:00.
"""
return self._base + self._delta
def _rebase_current_time(self) -> None:
"""Actualize timing according to current time.
Set the actual exact point of time since timer launch.
Set the actual delta since timer launch.
"""
self._actualization = datetime.now()
self._actualized_delta = (self._convert_delta_to_datetime()
- self._actualization)
def is_timing_reached(self) -> bool:
"""Check if timing reached 00:00:00.
Returns
-------
bool
True if timing reached 00:00:00, else False.
"""
self._rebase_current_time()
timing_to_reach = self._convert_delta_to_datetime()
return self._actualization >= timing_to_reach
@property
def get_timing(self) -> str:
"""The actual remaining time to reach 00:00:00.
Returns
-------
str
The actual remaining time to reach 00:00:00.
"""
return str(self._actualized_delta)
def continue_after_pause(self) -> None:
"""Actualize timer parameters to continue timing after a pause.
Set the actual exact point of time since timer launch.
Set the actual delta since timer launch.
"""
self._base = datetime.now()
self._delta = self._actualized_delta
class Preset:
"""
A preset timer manager for the Timer class
Initialize a virtual timer preset which could be add as a preset to a
dedicated preset management JSON file if it does not exist, modified if it
does exist in this same file (name or duration), delete from the file or
get to be use as a timer by a Timer object.
Attributes
----------
_name: str
The name of the timer preset
_hours: int
The hours quantity of the timer preset
_minutes: int
The minutes quantity of the timer preset
_seconds: int
The seconds quantity of the timer preset
Class methods
-------------
get_all
Get all existing preset names in preset.json.
Public methods
--------------
add
Add the virtual preset to the JSON file preset.json if not exist.
get
Get the timing from the virtual timer name if exist in preset.json.
delete
Delete the preset if exist in the JSON file preset.json.
rename
Rename the preset if exist in the JSON file preset.json.
set_duration
set a new duration to the preset if exist in the JSON file preset.json.
"""
def __init__(self, name: str,
hours: int = 0,
minutes: int = 0,
seconds: int = 0,
preset_file: str = 'preset.json'):
"""Initialize a virtual preset.
Parameters
----------
name: str
The name of the timer preset
hours: int
The hours quantity of the timer preset
minutes: int
The minutes quantity of the timer preset
seconds: int
The seconds quantity of the timer preset
"""
self._name = name.lower()
self._hours = hours
self._minutes = minutes
self._seconds = seconds
self._preset_file = preset_file # Shall be a .json
# If the preset file doesn't exist, create it
try:
with open(self._preset_file, 'r'):
pass
except FileNotFoundError:
with open(self._preset_file, 'w') as preset_file_write:
json.dump([], preset_file_write, indent=4)
def add(self) -> dict:
"""Add a new preset.
Check whether the choosen name does exist, if not create the preset,
write it in the preset.json file and return the json object added as a
dict, if yes raise an exception.
Returns
-------
preset_dict_to_append: dict
The name and duration of the new added preset.
Raises
------
ValueError
If the preset does already exist.
"""
# Create a data set to be inclued, preset name is lowercased
# Check wether the name already exist
try:
self.get()
except ValueError:
# Prepare the set in a dict to be added as a json object
preset_dict_to_append = {"name": self._name,
"duration": {"hours": self._hours,
"min": self._minutes,
"secs": self._seconds
}
}
# Open the json preset file to add the new preset
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
with open(self._preset_file, 'w') as preset_file_write:
# Append the new json object
json_data.append(preset_dict_to_append)
json.dump(json_data, preset_file_write, indent=4)
return preset_dict_to_append
else:
raise ValueError("ValueError: already existing preset")
def get(self) -> dict:
"""Get an existing preset's duration.
Check whether the preset name does exist, if not raise an exception, if
yes return a dict containing timer values.
Returns
-------
timer_values: dict
The duration (hours, minutes and seconds) of
the existing preset.
Raises
------
ValueError
If the preset does not exist.
"""
timer_values = {"hours": None,
"minutes": None,
"seconds": None}
# Open the json preset file to search for the existing preset
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search if the preset does exist
if preset["name"] == self._name:
# Get the preset's timing
timer_values["hours"] = preset["duration"]["hours"]
timer_values["minutes"] = preset["duration"]["min"]
timer_values["seconds"] = preset["duration"]["secs"]
if (timer_values["hours"] or
timer_values["minutes"] or
timer_values["seconds"]) is None:
raise ValueError("ValueError: Preset not found")
return timer_values
@classmethod
def get_all(cls, preset_file='preset.json') -> list:
"""Get all existing preset names.
Check whether preset names do exist, if not raise an exception, if
yes return a list containing all names.
Returns
-------
preset_names: list[str]
Preset names capitalized.
Raises
------
ValueError
If there is no existing preset.
"""
preset_names = []
try:
# Open the json preset file to search for the existing preset
with open(preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Add each existing preset name to the list
preset_names.append(preset["name"].capitalize())
if preset_names == []:
raise ValueError("ValueError: No existing preset.")
except FileNotFoundError:
pass
return preset_names
def delete(self) -> bool:
"""Delete an existing preset.
Check whether the preset name does exist, if not raise an error, if yes
delete the preset from the preset.json file.
Returns
-------
bool
True if the preset got deleted.
Raises
------
ValueError
If the preset does not exist.
"""
# Check wether the preset exist
# If not raise the corresponding exception
try:
self.get()
except ValueError as exception:
raise exception
# Open the json preset file to search for the existing preset to delete
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset to delete
if preset["name"] == self._name:
# Delete the preset
json_data.remove(preset)
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
def rename(self, new_name: str) -> bool:
"""Rename an existing preset.
Check whether the preset name to change does exist, if not raise an
exception. Check wether the new preset name does exist, if not rename
the preset in the preset.json file, if yes raise an exception.
Parameters
----------
new_name : str
The new name to set for the existing preset.
Returns
-------
bool
True if the preset got renamed.
Raises
------
ValueError
If the given preset name to rename does not exist.
ValueError
If the given new name corresponds to an existing preset.
"""
# Check wether the preset exist and if the new name is available
try:
self.get()
except ValueError as exception:
raise exception
try:
self.new_name = Preset(name=new_name, preset_file=self._preset_file)
self.new_name.get()
except ValueError:
# Open the json preset file to search for the preset to rename
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset name
if preset["name"] == self._name:
# Rename it if found
preset["name"] = new_name.lower()
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
else:
raise ValueError("ValueError: already existing preset")
def set_duration(self, hours: int, minutes: int, seconds: int) -> bool:
"""
Check whether the choosen name does exist, if not raise an exception,
if yes update the preset duration according to parameters, write it in
the preset.json file.
Parameters
----------
hours: int
The new hours quantity of the timer preset
minutes: int
The new minutes quantity of the timer preset
seconds: int
The new seconds quantity of the timer preset
Returns
-------
bool
True if the duration got changed.
Raises
------
ValueError
If the preset does not exist.
"""
# Check wether the preset exist
try:
self.get()
except ValueError as exception:
raise exception
self._hours = hours
self._minutes = minutes
self._seconds = seconds
# Open the json preset file to search for the preset to modify
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset name
if preset["name"] == self._name:
# Get the preset's timing
preset["duration"]["hours"] = self._hours
preset["duration"]["min"] = self._minutes
preset["duration"]["secs"] = self._seconds
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
def logger(option: bool) -> logging.Logger:
"""Create a logger.
Create and return a console logger with level set to WARNING or DEBUG
if option provided is evaluate to True.
"""
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
console_handler = logging.StreamHandler()
if option:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.WARNING)
# create formatter
chf = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
# add formatter to ch
console_handler.setFormatter(chf)
# add ch to logger
logger.addHandler(console_handler)
return logger
def get_cli_args(default_timer: str) -> argparse.Namespace:
"""Command Line Interface for minutaria.
CLI for minutaria supporting choosing timer duration by hours, minutes
and seconds separately and managing preset : add, delete, rename, change
duration of an existing preset and use an existing preset.
Returns
-------
argparse.Namespace
The command line arguments input by the user.
"""
parser = argparse.ArgumentParser(prog="minutaria",
description="Execute a given timer from "
"min 00:00:01 to "
"max 23:59:59."
" Options -ap and -mpd shall "
"be used with duration "
"parameters.",
epilog=f"If no timer is provided, "
f"execute the default: "
f"{default_timer}.")
group = parser.add_mutually_exclusive_group()
parser.add_argument("-v",
"--version",
action="version",
version="%(prog)s 1.0")
parser.add_argument("-d",
"--debug",
action="store_true",
default=False,
help="enable debugging")
parser.add_argument("-H",
"--hours",
type=int,
action="store",
help="hour(s) to time")
parser.add_argument("-M",
"--minutes",
type=int,
action="store",
help="minute(s) to time")
parser.add_argument("-S",
"--seconds",
type=int,
action="store",
help="second(s) to time")
group.add_argument("-ap",
"--add_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to create")
group.add_argument("-p",
"--use_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to use")
group.add_argument("-rp",
"--rename_preset",
action="store",
nargs=2,
metavar=("OLD_NAME", "NEW_NAME"),
help="names of the timer preset to rename and the new")
group.add_argument("-mpd",
"--modify_preset_duration",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to modify")
group.add_argument("-dp",
"--del_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to delete")
return parser.parse_args()
def handle_cli_args(args: argparse.Namespace):
"""Command line arguments'handler for minutaria.
If a timing duration only is choosen, return the following dictionary
{"timer_hours": hours, "timer_min": minutes, "timer_secs": seconds}
where "hours", "minutes" and "seconds" are integers.
Else, exit the program after having done the expecting actions.
Also, manage incorrect user inputs.
Returns
-------
timer_values: dict
The duration (hours, minutes and seconds) of
the requested preset.
args.debug : bool
True if set, else False.
"""
# Accepted ranges error management
if args.hours and args.hours not in range(0, 24):
print("minutaria: Error: argument -H/--hours: invalid choice:"
f" {args.hours} (choose from 0 to 23)")
exit()
if args.minutes and args.minutes not in range(0, 60):
print(f"minutaria: Error: argument -M/--minutes: invalid choice:"
f" {args.minutes} (choose from 0 to 59)")
exit()
if (args.seconds or args.seconds == 0) and args.seconds not in range(1, 60):
print(f"minutaria: Error: argument -S/--seconds: invalid choice:"
f" {args.seconds} (choose from 1 to 59)")
exit()
# Container for timer values
timer_values = {
"timer_hours": None,
"timer_min": None,
"timer_secs": None
}
# Actualize timing global variables if at list one CLI argument is used
if args.hours or args.minutes or args.seconds:
if args.hours is None:
timer_values["timer_hours"] = 0
else:
timer_values["timer_hours"] = args.hours
if args.minutes is None:
timer_values["timer_min"] = 0
else:
timer_values["timer_min"] = args.minutes
if args.seconds is None:
timer_values["timer_secs"] = 0
else:
timer_values["timer_secs"] = args.seconds
# Check whether the user input a timer with the name of the preset to add
if args.add_preset and (not args.hours
and not args.minutes
and not args.seconds):
print(f"minutaria: Error: argument -ap/--add_preset: "
f"incomplete input: {args.add_preset} (indicate preset name "
f"and corresponding timer with dedicated parameters)")
exit()
elif args.add_preset:
# Create the corresponding preset and quit
new_preset = Preset(args.add_preset,
timer_values["timer_hours"],
timer_values["timer_min"],
timer_values["timer_secs"])
try:
new_preset.add()
new_preset_duration = timedelta(hours=+timer_values["timer_hours"],
minutes=+timer_values["timer_min"],
seconds=+timer_values["timer_secs"])
print("New preset added: "
f"{args.add_preset.capitalize()} - "
f"{str(new_preset_duration)}")
exit()
except ValueError:
print(f"The preset name {args.add_preset.capitalize()} "
f"already exist. Please choose an other name.")
exit()
# Check whether the user input a timer with the name of
# the preset to modify
if args.modify_preset_duration and (not args.hours
and not args.minutes
and not args.seconds):
print(f"minutaria: Error: argument -mpd/--modify_preset_duration: "
f"incomplete input: {args.modify_preset_duration} (indicate "
f"preset name and corresponding timer to modify with dedicated "
f"parameters)")
exit()
elif args.modify_preset_duration:
# Modify the corresponding preset and quit
try:
preset_to_modify = Preset(args.modify_preset_duration)
modified = preset_to_modify.set_duration(timer_values["timer_hours"],
timer_values["timer_min"],
timer_values["timer_secs"])
modified_duration = timedelta(hours=+timer_values["timer_hours"],
minutes=+timer_values["timer_min"],
seconds=+timer_values["timer_secs"])
if modified:
print("New preset duration: "
f"{args.modify_preset_duration.capitalize()}"
f" - {str(modified_duration)}")
exit()
except ValueError:
print(f"The preset {args.modify_preset_duration.capitalize()} "
"does not exist. Please choose an existing name.")
exit()
# Check whether the preset to rename is the only user input
if args.rename_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -rp/--rename_preset: invalid input: "
"only indicate the names of the old and the new presets")
exit()
elif args.rename_preset:
# Rename the corresponding preset and quit
try:
preset_to_rename = Preset(args.rename_preset[0])
renamed = preset_to_rename.rename(args.rename_preset[1])
if renamed:
print(f"Preset {args.rename_preset[0].capitalize()} renamed: "
f"{args.rename_preset[1].capitalize()}")
exit()
except ValueError:
print(f"The preset {args.rename_preset[0].capitalize()} "
f"does not exist or the new name "
f"{args.rename_preset[1].capitalize()} is not available.")
exit()
# Check whether the preset to delete is the only user input
if args.del_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -dp/--del_preset: "
"invalid input: only indicate the name of the preset to delete")
exit()
elif args.del_preset:
# Delete the corresponding preset and quit
try:
preset_to_delete = Preset(args.del_preset)
deleted = preset_to_delete.delete()
if deleted:
print(f"Preset deleted: {args.del_preset.capitalize()}")
exit()
except ValueError:
print(f"The preset {args.del_preset.capitalize()} does not exist.")
exit()
# Check whether the preset to get and use is the only user input
if args.use_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -p/--use_preset: "
"invalid input: only indicate the name of the preset to use")
exit()
elif args.use_preset:
try:
# Use the corresponding preset
preset_to_get = Preset(args.use_preset)
preset_to_use = preset_to_get.get()
# Check wether the preset does exist
if preset_to_use:
timer_values["timer_hours"] = preset_to_use["hours"]
timer_values["timer_min"] = preset_to_use["minutes"]
timer_values["timer_secs"] = preset_to_use["seconds"]
except ValueError:
print(f"The preset {args.use_preset.capitalize()} "
"does not exist. Please choose an existing preset.")
exit()
return timer_values, args.debug
if __name__ == '__main__':
# Default parameters to be use if this file is launched as a test script
# or modified by user input
TIMER_HOURS = 0 # min 0, max 23
TIMER_MIN = 0 # min 0, max 59
TIMER_SEC = 5 # min 0, max 59
# Initialize and launch a timer according to parameters
timer = Timer(hours=TIMER_HOURS, minutes=TIMER_MIN, seconds=TIMER_SEC)
# Check remaining time along the timer and print it
counter = timer.is_timing_reached()
while counter is False:
print("minutaria -", "Remaining :", timer.get_timing[:9], end='\r',
flush=True)
counter = timer.is_timing_reached()
# Timer reached 00:00:00
# Print 3 "GONG !" and some spaces to clear the line
print("GONG ! " * 3 + ' '*17)
| #!/usr/bin/env python3
"""
libminutaria
============
:Authors:
Locynaeh
:Version:
1.0
Provide a library allowing to create timers and presets managed by a JSON file
and an integrable CLI to manage both.
This script is directly usable in a terminal. Use -h/--help arguments for more
information on how to use the CLI provided.
This file can also be imported as a module.
Classes
-------
Timer
Launch a given timer and provide utilies to manage it.
Preset
Initiate a virtual preset to perform operations on it : add tp a JSON
file, get, delete, rename, change duration.
Functions
---------
minutaria_cli
Manage the CLI interface and correctness of user inputs.
logger
Return a console logger.
"""
__all__ = ["__version__",
"Timer",
"Preset",
"logger",
"get_cli_args",
"handle_cli_args"
]
import logging
from datetime import datetime, timedelta
import argparse
import json
class Timer:
"""
Simple timer printing as HH:MM:SS.n
Allow to launch a given timer, check remaining time before 00:00:00, check
wether timing is reached and get the current timing along the process.
Attributes
----------
_base: datetime
The time at timer launch to be kept as a comparison base to
calculate the time passed
_actualization: datetime
The current time to be updated along the timer
_delta: timedelta
The timer duration
_actualized_delta: timedelta
The actualized duration according to time passed to be updated along
the timer
get_timing: str
The actual remaining time to reach 00:00:00 for a launched timer.
Public methods
--------------
is_timing_reached
Check if timing reached 00:00:00.
continue_after_pause
Actualize timer parameters to continue timing after a pause.
"""
def __init__(self, hours: int = 0, minutes: int = 0, seconds: int = 0):
"""Create and launch a given timer.
Parameters
----------
hours: int
The hours quantity of the timer
minutes: int
The minutes quantity of the timer
seconds: int
The seconds quantity of the timer
"""
self._base = datetime.now()
self._actualization = datetime(self._base.year,
self._base.month,
self._base.day,
self._base.hour,
self._base.minute,
self._base.second,
self._base.microsecond)
self._delta = timedelta(hours=+hours,
minutes=+minutes,
seconds=+seconds)
self._actualized_delta = timedelta(hours=+hours,
minutes=+minutes,
seconds=+seconds)
def _convert_delta_to_datetime(self) -> datetime:
"""Convert the base timedelta object to a datetime object allowing
arithmetic on it.
Returns
-------
datetime
Exact point of time to reach 00:00:00.
"""
return self._base + self._delta
def _rebase_current_time(self) -> None:
"""Actualize timing according to current time.
Set the actual exact point of time since timer launch.
Set the actual delta since timer launch.
"""
self._actualization = datetime.now()
self._actualized_delta = (self._convert_delta_to_datetime()
- self._actualization)
def is_timing_reached(self) -> bool:
"""Check if timing reached 00:00:00.
Returns
-------
bool
True if timing reached 00:00:00, else False.
"""
self._rebase_current_time()
timing_to_reach = self._convert_delta_to_datetime()
return self._actualization >= timing_to_reach
@property
def get_timing(self) -> str:
"""The actual remaining time to reach 00:00:00.
Returns
-------
str
The actual remaining time to reach 00:00:00.
"""
return str(self._actualized_delta)
def continue_after_pause(self) -> None:
"""Actualize timer parameters to continue timing after a pause.
Set the actual exact point of time since timer launch.
Set the actual delta since timer launch.
"""
self._base = datetime.now()
self._delta = self._actualized_delta
class Preset:
"""
A preset timer manager for the Timer class
Initialize a virtual timer preset which could be add as a preset to a
dedicated preset management JSON file if it does not exist, modified if it
does exist in this same file (name or duration), delete from the file or
get to be use as a timer by a Timer object.
Attributes
----------
_name: str
The name of the timer preset
_hours: int
The hours quantity of the timer preset
_minutes: int
The minutes quantity of the timer preset
_seconds: int
The seconds quantity of the timer preset
Class methods
-------------
get_all
Get all existing preset names in preset.json.
Public methods
--------------
add
Add the virtual preset to the JSON file preset.json if not exist.
get
Get the timing from the virtual timer name if exist in preset.json.
delete
Delete the preset if exist in the JSON file preset.json.
rename
Rename the preset if exist in the JSON file preset.json.
set_duration
set a new duration to the preset if exist in the JSON file preset.json.
"""
def __init__(self, name: str,
hours: int = 0,
minutes: int = 0,
seconds: int = 0,
preset_file: str = 'preset.json'):
"""Initialize a virtual preset.
Parameters
----------
name: str
The name of the timer preset
hours: int
The hours quantity of the timer preset
minutes: int
The minutes quantity of the timer preset
seconds: int
The seconds quantity of the timer preset
"""
self._name = name.lower()
self._hours = hours
self._minutes = minutes
self._seconds = seconds
self._preset_file = preset_file # Shall be a .json
# If the preset file doesn't exist, create it
try:
with open(self._preset_file, 'r'):
pass
except FileNotFoundError:
with open(self._preset_file, 'w') as preset_file_write:
json.dump([], preset_file_write, indent=4)
def add(self) -> dict:
"""Add a new preset.
Check whether the choosen name does exist, if not create the preset,
write it in the preset.json file and return the json object added as a
dict, if yes raise an exception.
Returns
-------
preset_dict_to_append: dict
The name and duration of the new added preset.
Raises
------
ValueError
If the preset does already exist.
"""
# Create a data set to be inclued, preset name is lowercased
# Check wether the name already exist
try:
self.get()
except ValueError:
# Prepare the set in a dict to be added as a json object
preset_dict_to_append = {"name": self._name,
"duration": {"hours": self._hours,
"min": self._minutes,
"secs": self._seconds
}
}
# Open the json preset file to add the new preset
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
with open(self._preset_file, 'w') as preset_file_write:
# Append the new json object
json_data.append(preset_dict_to_append)
json.dump(json_data, preset_file_write, indent=4)
return preset_dict_to_append
else:
raise ValueError("ValueError: already existing preset")
def get(self) -> dict:
"""Get an existing preset's duration.
Check whether the preset name does exist, if not raise an exception, if
yes return a dict containing timer values.
Returns
-------
timer_values: dict
The duration (hours, minutes and seconds) of
the existing preset.
Raises
------
ValueError
If the preset does not exist.
"""
timer_values = {"hours": None,
"minutes": None,
"seconds": None}
# Open the json preset file to search for the existing preset
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search if the preset does exist
if preset["name"] == self._name:
# Get the preset's timing
timer_values["hours"] = preset["duration"]["hours"]
timer_values["minutes"] = preset["duration"]["min"]
timer_values["seconds"] = preset["duration"]["secs"]
if (timer_values["hours"] or
timer_values["minutes"] or
timer_values["seconds"]) is None:
raise ValueError("ValueError: Preset not found")
return timer_values
@classmethod
def get_all(cls, preset_file='preset.json') -> list:
"""Get all existing preset names.
Check whether preset names do exist, if not raise an exception, if
yes return a list containing all names.
Returns
-------
preset_names: list[str]
Preset names capitalized.
Raises
------
ValueError
If there is no existing preset.
"""
preset_names = []
try:
# Open the json preset file to search for the existing preset
with open(preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Add each existing preset name to the list
preset_names.append(preset["name"].capitalize())
if preset_names == []:
raise ValueError("ValueError: No existing preset.")
except FileNotFoundError:
pass
return preset_names
def delete(self) -> bool:
"""Delete an existing preset.
Check whether the preset name does exist, if not raise an error, if yes
delete the preset from the preset.json file.
Returns
-------
bool
True if the preset got deleted.
Raises
------
ValueError
If the preset does not exist.
"""
# Check wether the preset exist
# If not raise the corresponding exception
try:
self.get()
except ValueError as exception:
raise exception
# Open the json preset file to search for the existing preset to delete
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset to delete
if preset["name"] == self._name:
# Delete the preset
json_data.remove(preset)
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
def rename(self, new_name: str) -> bool:
"""Rename an existing preset.
Check whether the preset name to change does exist, if not raise an
exception. Check wether the new preset name does exist, if not rename
the preset in the preset.json file, if yes raise an exception.
Parameters
----------
new_name : str
The new name to set for the existing preset.
Returns
-------
bool
True if the preset got renamed.
Raises
------
ValueError
If the given preset name to rename does not exist.
ValueError
If the given new name corresponds to an existing preset.
"""
# Check wether the preset exist and if the new name is available
try:
self.get()
except ValueError as exception:
raise exception
try:
self.new_name = Preset(name=new_name, preset_file=self._preset_file)
self.new_name.get()
except ValueError:
# Open the json preset file to search for the preset to rename
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset name
if preset["name"] == self._name:
# Rename it if found
preset["name"] = new_name.lower()
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
else:
raise ValueError("ValueError: already existing preset")
def set_duration(self, hours: int, minutes: int, seconds: int) -> bool:
"""
Check whether the choosen name does exist, if not raise an exception,
if yes update the preset duration according to parameters, write it in
the preset.json file.
Parameters
----------
hours: int
The new hours quantity of the timer preset
minutes: int
The new minutes quantity of the timer preset
seconds: int
The new seconds quantity of the timer preset
Returns
-------
bool
True if the duration got changed.
Raises
------
ValueError
If the preset does not exist.
"""
# Check wether the preset exist
try:
self.get()
except ValueError as exception:
raise exception
self._hours = hours
self._minutes = minutes
self._seconds = seconds
# Open the json preset file to search for the preset to modify
with open(self._preset_file, 'r') as preset_file_read:
# Load json presets to be modified
json_data = json.load(preset_file_read)
for preset in json_data:
# Search for the preset name
if preset["name"] == self._name:
# Get the preset's timing
preset["duration"]["hours"] = self._hours
preset["duration"]["min"] = self._minutes
preset["duration"]["secs"] = self._seconds
with open(self._preset_file, 'w') as preset_file_write:
# Append the modified json object
json.dump(json_data, preset_file_write, indent=4)
return True
def logger(option: bool) -> logging.Logger:
"""Create a logger.
Create and return a console logger with level set to WARNING or DEBUG
if option provided is evaluate to True.
"""
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
console_handler = logging.StreamHandler()
if option:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.WARNING)
# create formatter
chf = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
# add formatter to ch
console_handler.setFormatter(chf)
# add ch to logger
logger.addHandler(console_handler)
return logger
def get_cli_args(default_timer: str) -> argparse.Namespace:
"""Command Line Interface for minutaria.
CLI for minutaria supporting choosing timer duration by hours, minutes
and seconds separately and managing preset : add, delete, rename, change
duration of an existing preset and use an existing preset.
Returns
-------
argparse.Namespace
The command line arguments input by the user.
"""
parser = argparse.ArgumentParser(prog="minutaria",
description="Execute a given timer from "
"min 00:00:01 to "
"max 23:59:59."
" Options -ap and -mpd shall "
"be used with duration "
"parameters.",
epilog=f"If no timer is provided, "
f"execute the default: "
f"{default_timer}.")
group = parser.add_mutually_exclusive_group()
parser.add_argument("-v",
"--version",
action="version",
version="%(prog)s 1.0")
parser.add_argument("-d",
"--debug",
action="store_true",
default=False,
help="enable debugging")
parser.add_argument("-H",
"--hours",
type=int,
action="store",
help="hour(s) to time")
parser.add_argument("-M",
"--minutes",
type=int,
action="store",
help="minute(s) to time")
parser.add_argument("-S",
"--seconds",
type=int,
action="store",
help="second(s) to time")
group.add_argument("-ap",
"--add_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to create")
group.add_argument("-p",
"--use_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to use")
group.add_argument("-rp",
"--rename_preset",
action="store",
nargs=2,
metavar=("OLD_NAME", "NEW_NAME"),
help="names of the timer preset to rename and the new")
group.add_argument("-mpd",
"--modify_preset_duration",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to modify")
group.add_argument("-dp",
"--del_preset",
action="store",
metavar="PRESET_NAME",
help="name of the timer preset to delete")
return parser.parse_args()
def handle_cli_args(args: argparse.Namespace):
"""Command line arguments'handler for minutaria.
If a timing duration only is choosen, return the following dictionary
{"timer_hours": hours, "timer_min": minutes, "timer_secs": seconds}
where "hours", "minutes" and "seconds" are integers.
Else, exit the program after having done the expecting actions.
Also, manage incorrect user inputs.
Returns
-------
timer_values: dict
The duration (hours, minutes and seconds) of
the requested preset.
args.debug : bool
True if set, else False.
"""
# Accepted ranges error management
if args.hours and args.hours not in range(0, 24):
print("minutaria: Error: argument -H/--hours: invalid choice:"
f" {args.hours} (choose from 0 to 23)")
exit()
if args.minutes and args.minutes not in range(0, 60):
print(f"minutaria: Error: argument -M/--minutes: invalid choice:"
f" {args.minutes} (choose from 0 to 59)")
exit()
if (args.seconds or args.seconds == 0) and args.seconds not in range(1, 60):
print(f"minutaria: Error: argument -S/--seconds: invalid choice:"
f" {args.seconds} (choose from 1 to 59)")
exit()
# Container for timer values
timer_values = {
"timer_hours": None,
"timer_min": None,
"timer_secs": None
}
# Actualize timing global variables if at list one CLI argument is used
if args.hours or args.minutes or args.seconds:
if args.hours is None:
timer_values["timer_hours"] = 0
else:
timer_values["timer_hours"] = args.hours
if args.minutes is None:
timer_values["timer_min"] = 0
else:
timer_values["timer_min"] = args.minutes
if args.seconds is None:
timer_values["timer_secs"] = 0
else:
timer_values["timer_secs"] = args.seconds
# Check whether the user input a timer with the name of the preset to add
if args.add_preset and (not args.hours
and not args.minutes
and not args.seconds):
print(f"minutaria: Error: argument -ap/--add_preset: "
f"incomplete input: {args.add_preset} (indicate preset name "
f"and corresponding timer with dedicated parameters)")
exit()
elif args.add_preset:
# Create the corresponding preset and quit
new_preset = Preset(args.add_preset,
timer_values["timer_hours"],
timer_values["timer_min"],
timer_values["timer_secs"])
try:
new_preset.add()
new_preset_duration = timedelta(hours=+timer_values["timer_hours"],
minutes=+timer_values["timer_min"],
seconds=+timer_values["timer_secs"])
print("New preset added: "
f"{args.add_preset.capitalize()} - "
f"{str(new_preset_duration)}")
exit()
except ValueError:
print(f"The preset name {args.add_preset.capitalize()} "
f"already exist. Please choose an other name.")
exit()
# Check whether the user input a timer with the name of
# the preset to modify
if args.modify_preset_duration and (not args.hours
and not args.minutes
and not args.seconds):
print(f"minutaria: Error: argument -mpd/--modify_preset_duration: "
f"incomplete input: {args.modify_preset_duration} (indicate "
f"preset name and corresponding timer to modify with dedicated "
f"parameters)")
exit()
elif args.modify_preset_duration:
# Modify the corresponding preset and quit
try:
preset_to_modify = Preset(args.modify_preset_duration)
modified = preset_to_modify.set_duration(timer_values["timer_hours"],
timer_values["timer_min"],
timer_values["timer_secs"])
modified_duration = timedelta(hours=+timer_values["timer_hours"],
minutes=+timer_values["timer_min"],
seconds=+timer_values["timer_secs"])
if modified:
print("New preset duration: "
f"{args.modify_preset_duration.capitalize()}"
f" - {str(modified_duration)}")
exit()
except ValueError:
print(f"The preset {args.modify_preset_duration.capitalize()} "
"does not exist. Please choose an existing name.")
exit()
# Check whether the preset to rename is the only user input
if args.rename_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -rp/--rename_preset: invalid input: "
"only indicate the names of the old and the new presets")
exit()
elif args.rename_preset:
# Rename the corresponding preset and quit
try:
preset_to_rename = Preset(args.rename_preset[0])
renamed = preset_to_rename.rename(args.rename_preset[1])
if renamed:
print(f"Preset {args.rename_preset[0].capitalize()} renamed: "
f"{args.rename_preset[1].capitalize()}")
exit()
except ValueError:
print(f"The preset {args.rename_preset[0].capitalize()} "
f"does not exist or the new name "
f"{args.rename_preset[1].capitalize()} is not available.")
exit()
# Check whether the preset to delete is the only user input
if args.del_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -dp/--del_preset: "
"invalid input: only indicate the name of the preset to delete")
exit()
elif args.del_preset:
# Delete the corresponding preset and quit
try:
preset_to_delete = Preset(args.del_preset)
deleted = preset_to_delete.delete()
if deleted:
print(f"Preset deleted: {args.del_preset.capitalize()}")
exit()
except ValueError:
print(f"The preset {args.del_preset.capitalize()} does not exist.")
exit()
# Check whether the preset to get and use is the only user input
if args.use_preset and (args.hours or args.minutes or args.seconds):
print("minutaria: Error: argument -p/--use_preset: "
"invalid input: only indicate the name of the preset to use")
exit()
elif args.use_preset:
try:
# Use the corresponding preset
preset_to_get = Preset(args.use_preset)
preset_to_use = preset_to_get.get()
# Check wether the preset does exist
if preset_to_use:
timer_values["timer_hours"] = preset_to_use["hours"]
timer_values["timer_min"] = preset_to_use["minutes"]
timer_values["timer_secs"] = preset_to_use["seconds"]
except ValueError:
print(f"The preset {args.use_preset.capitalize()} "
"does not exist. Please choose an existing preset.")
exit()
return timer_values, args.debug
if __name__ == '__main__':
# Default parameters to be use if this file is launched as a test script
# or modified by user input
TIMER_HOURS = 0 # min 0, max 23
TIMER_MIN = 0 # min 0, max 59
TIMER_SEC = 5 # min 0, max 59
# Initialize and launch a timer according to parameters
timer = Timer(hours=TIMER_HOURS, minutes=TIMER_MIN, seconds=TIMER_SEC)
# Check remaining time along the timer and print it
counter = timer.is_timing_reached()
while counter is False:
print("minutaria -", "Remaining :", timer.get_timing[:9], end='\r',
flush=True)
counter = timer.is_timing_reached()
# Timer reached 00:00:00
# Print 3 "GONG !" and some spaces to clear the line
print("GONG ! " * 3 + ' '*17)
| en | 0.685702 | #!/usr/bin/env python3 libminutaria ============ :Authors: Locynaeh :Version: 1.0 Provide a library allowing to create timers and presets managed by a JSON file and an integrable CLI to manage both. This script is directly usable in a terminal. Use -h/--help arguments for more information on how to use the CLI provided. This file can also be imported as a module. Classes ------- Timer Launch a given timer and provide utilies to manage it. Preset Initiate a virtual preset to perform operations on it : add tp a JSON file, get, delete, rename, change duration. Functions --------- minutaria_cli Manage the CLI interface and correctness of user inputs. logger Return a console logger. Simple timer printing as HH:MM:SS.n Allow to launch a given timer, check remaining time before 00:00:00, check wether timing is reached and get the current timing along the process. Attributes ---------- _base: datetime The time at timer launch to be kept as a comparison base to calculate the time passed _actualization: datetime The current time to be updated along the timer _delta: timedelta The timer duration _actualized_delta: timedelta The actualized duration according to time passed to be updated along the timer get_timing: str The actual remaining time to reach 00:00:00 for a launched timer. Public methods -------------- is_timing_reached Check if timing reached 00:00:00. continue_after_pause Actualize timer parameters to continue timing after a pause. Create and launch a given timer. Parameters ---------- hours: int The hours quantity of the timer minutes: int The minutes quantity of the timer seconds: int The seconds quantity of the timer Convert the base timedelta object to a datetime object allowing arithmetic on it. Returns ------- datetime Exact point of time to reach 00:00:00. Actualize timing according to current time. Set the actual exact point of time since timer launch. Set the actual delta since timer launch. Check if timing reached 00:00:00. Returns ------- bool True if timing reached 00:00:00, else False. The actual remaining time to reach 00:00:00. Returns ------- str The actual remaining time to reach 00:00:00. Actualize timer parameters to continue timing after a pause. Set the actual exact point of time since timer launch. Set the actual delta since timer launch. A preset timer manager for the Timer class Initialize a virtual timer preset which could be add as a preset to a dedicated preset management JSON file if it does not exist, modified if it does exist in this same file (name or duration), delete from the file or get to be use as a timer by a Timer object. Attributes ---------- _name: str The name of the timer preset _hours: int The hours quantity of the timer preset _minutes: int The minutes quantity of the timer preset _seconds: int The seconds quantity of the timer preset Class methods ------------- get_all Get all existing preset names in preset.json. Public methods -------------- add Add the virtual preset to the JSON file preset.json if not exist. get Get the timing from the virtual timer name if exist in preset.json. delete Delete the preset if exist in the JSON file preset.json. rename Rename the preset if exist in the JSON file preset.json. set_duration set a new duration to the preset if exist in the JSON file preset.json. Initialize a virtual preset. Parameters ---------- name: str The name of the timer preset hours: int The hours quantity of the timer preset minutes: int The minutes quantity of the timer preset seconds: int The seconds quantity of the timer preset # Shall be a .json # If the preset file doesn't exist, create it Add a new preset. Check whether the choosen name does exist, if not create the preset, write it in the preset.json file and return the json object added as a dict, if yes raise an exception. Returns ------- preset_dict_to_append: dict The name and duration of the new added preset. Raises ------ ValueError If the preset does already exist. # Create a data set to be inclued, preset name is lowercased # Check wether the name already exist # Prepare the set in a dict to be added as a json object # Open the json preset file to add the new preset # Load json presets to be modified # Append the new json object Get an existing preset's duration. Check whether the preset name does exist, if not raise an exception, if yes return a dict containing timer values. Returns ------- timer_values: dict The duration (hours, minutes and seconds) of the existing preset. Raises ------ ValueError If the preset does not exist. # Open the json preset file to search for the existing preset # Load json presets to be modified # Search if the preset does exist # Get the preset's timing Get all existing preset names. Check whether preset names do exist, if not raise an exception, if yes return a list containing all names. Returns ------- preset_names: list[str] Preset names capitalized. Raises ------ ValueError If there is no existing preset. # Open the json preset file to search for the existing preset # Load json presets to be modified # Add each existing preset name to the list Delete an existing preset. Check whether the preset name does exist, if not raise an error, if yes delete the preset from the preset.json file. Returns ------- bool True if the preset got deleted. Raises ------ ValueError If the preset does not exist. # Check wether the preset exist # If not raise the corresponding exception # Open the json preset file to search for the existing preset to delete # Load json presets to be modified # Search for the preset to delete # Delete the preset # Append the modified json object Rename an existing preset. Check whether the preset name to change does exist, if not raise an exception. Check wether the new preset name does exist, if not rename the preset in the preset.json file, if yes raise an exception. Parameters ---------- new_name : str The new name to set for the existing preset. Returns ------- bool True if the preset got renamed. Raises ------ ValueError If the given preset name to rename does not exist. ValueError If the given new name corresponds to an existing preset. # Check wether the preset exist and if the new name is available # Open the json preset file to search for the preset to rename # Load json presets to be modified # Search for the preset name # Rename it if found # Append the modified json object Check whether the choosen name does exist, if not raise an exception, if yes update the preset duration according to parameters, write it in the preset.json file. Parameters ---------- hours: int The new hours quantity of the timer preset minutes: int The new minutes quantity of the timer preset seconds: int The new seconds quantity of the timer preset Returns ------- bool True if the duration got changed. Raises ------ ValueError If the preset does not exist. # Check wether the preset exist # Open the json preset file to search for the preset to modify # Load json presets to be modified # Search for the preset name # Get the preset's timing # Append the modified json object Create a logger. Create and return a console logger with level set to WARNING or DEBUG if option provided is evaluate to True. # Create logger # create console handler and set level to debug # create formatter # add formatter to ch # add ch to logger Command Line Interface for minutaria. CLI for minutaria supporting choosing timer duration by hours, minutes and seconds separately and managing preset : add, delete, rename, change duration of an existing preset and use an existing preset. Returns ------- argparse.Namespace The command line arguments input by the user. Command line arguments'handler for minutaria. If a timing duration only is choosen, return the following dictionary {"timer_hours": hours, "timer_min": minutes, "timer_secs": seconds} where "hours", "minutes" and "seconds" are integers. Else, exit the program after having done the expecting actions. Also, manage incorrect user inputs. Returns ------- timer_values: dict The duration (hours, minutes and seconds) of the requested preset. args.debug : bool True if set, else False. # Accepted ranges error management # Container for timer values # Actualize timing global variables if at list one CLI argument is used # Check whether the user input a timer with the name of the preset to add # Create the corresponding preset and quit # Check whether the user input a timer with the name of # the preset to modify # Modify the corresponding preset and quit # Check whether the preset to rename is the only user input # Rename the corresponding preset and quit # Check whether the preset to delete is the only user input # Delete the corresponding preset and quit # Check whether the preset to get and use is the only user input # Use the corresponding preset # Check wether the preset does exist # Default parameters to be use if this file is launched as a test script # or modified by user input # min 0, max 23 # min 0, max 59 # min 0, max 59 # Initialize and launch a timer according to parameters # Check remaining time along the timer and print it # Timer reached 00:00:00 # Print 3 "GONG !" and some spaces to clear the line | 2.806863 | 3 |
app/models/user.py | michael-gann/larder | 0 | 6619033 | <filename>app/models/user.py
from .db import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(255), nullable=False, unique=True)
hashed_password = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(),
server_onupdate=db.func.now())
recipes = db.relationship("Recipe", back_populates="users")
cooking_lists = db.relationship("CookingList", back_populates="users")
pantry_ingredients = db.relationship(
"PantryIngredient", back_populates="users")
@property
def password(self):
return self.hashed_password
@password.setter
def password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| <filename>app/models/user.py
from .db import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
email = db.Column(db.String(255), nullable=False, unique=True)
hashed_password = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, server_default=db.func.now())
updated_at = db.Column(db.DateTime, server_default=db.func.now(),
server_onupdate=db.func.now())
recipes = db.relationship("Recipe", back_populates="users")
cooking_lists = db.relationship("CookingList", back_populates="users")
pantry_ingredients = db.relationship(
"PantryIngredient", back_populates="users")
@property
def password(self):
return self.hashed_password
@password.setter
def password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| none | 1 | 2.779327 | 3 | |
config/settings.py | siruku6/ml_sample | 0 | 6619034 | """
Django settings for ml_sample project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env(
DEBUG=(bool, False),
ENVIRONMENT=(str, None)
)
env_file = str(BASE_DIR.joinpath('.env'))
env.read_env(env_file)
VIRTUAL_ENVIRONMENT = env('ENVIRONMENT')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
####################
# SECURITY WARNING: keep the secret key used in production secret!
####################
SECRET_KEY = env('SECRET_KEY')
DEBUG = env('DEBUG', False)
ALLOWED_HOSTS = ['*']
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = 'logout'
LOGOUT_REDIRECT_URL = '/ml/login'
###########################################
# Application definition (Core settings)
###########################################
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mlapp',
'classify_images',
'detect_expression',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR, 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
###########################
# Database
###########################
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
if VIRTUAL_ENVIRONMENT == 'heroku':
import dj_database_url
db_from_env = dj_database_url.config()
DATABASES = {
'default': db_from_env
}
elif VIRTUAL_ENVIRONMENT == 'docker':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': env('POSTGRES_PASSWORD'),
'HOST': 'postgres',
'PORT': 5432,
'TEST': {
'NAME': 'life_record_test',
},
}
}
# NOTE: reach this branch when running test or mypy
else:
DATABASES = {}
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#################################
# Internationalization
#################################
# https://docs.djangoproject.com/en/4.0/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_TZ = True
######################################################
# Static files (CSS, JavaScript, Images)
######################################################
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# heroku settings
import django_heroku
django_heroku.settings(locals())
###########################
# Logging
###########################
LOGGING = {
'version': 1,
# Don't disable logger settings already exist
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s [%(levelname)s] %(process)d %(thread)d %(message)s '
'%(pathname)s:%(lineno)d',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'console': {
'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'file': {
'level': 'DEBUG' if DEBUG else 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'log/app.log',
'maxBytes': 50000,
'backupCount': 3,
'formatter': 'default',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'handlers': ['file', 'console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': False,
},
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propagate': False,
},
},
}
| """
Django settings for ml_sample project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env(
DEBUG=(bool, False),
ENVIRONMENT=(str, None)
)
env_file = str(BASE_DIR.joinpath('.env'))
env.read_env(env_file)
VIRTUAL_ENVIRONMENT = env('ENVIRONMENT')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
####################
# SECURITY WARNING: keep the secret key used in production secret!
####################
SECRET_KEY = env('SECRET_KEY')
DEBUG = env('DEBUG', False)
ALLOWED_HOSTS = ['*']
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = 'logout'
LOGOUT_REDIRECT_URL = '/ml/login'
###########################################
# Application definition (Core settings)
###########################################
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mlapp',
'classify_images',
'detect_expression',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR, 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
###########################
# Database
###########################
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
if VIRTUAL_ENVIRONMENT == 'heroku':
import dj_database_url
db_from_env = dj_database_url.config()
DATABASES = {
'default': db_from_env
}
elif VIRTUAL_ENVIRONMENT == 'docker':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': env('POSTGRES_PASSWORD'),
'HOST': 'postgres',
'PORT': 5432,
'TEST': {
'NAME': 'life_record_test',
},
}
}
# NOTE: reach this branch when running test or mypy
else:
DATABASES = {}
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#################################
# Internationalization
#################################
# https://docs.djangoproject.com/en/4.0/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_TZ = True
######################################################
# Static files (CSS, JavaScript, Images)
######################################################
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# heroku settings
import django_heroku
django_heroku.settings(locals())
###########################
# Logging
###########################
LOGGING = {
'version': 1,
# Don't disable logger settings already exist
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s [%(levelname)s] %(process)d %(thread)d %(message)s '
'%(pathname)s:%(lineno)d',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'console': {
'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'file': {
'level': 'DEBUG' if DEBUG else 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'log/app.log',
'maxBytes': 50000,
'backupCount': 3,
'formatter': 'default',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'handlers': ['file', 'console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': False,
},
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propagate': False,
},
},
}
| en | 0.310118 | Django settings for ml_sample project. Generated by 'django-admin startproject' using Django 4.0.2. For more information on this file, see https://docs.djangoproject.com/en/4.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/4.0/ref/settings/ # Build paths inside the project like this: BASE_DIR / 'subdir'. # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/ #################### # SECURITY WARNING: keep the secret key used in production secret! #################### ########################################### # Application definition (Core settings) ########################################### ########################### # Database ########################### # https://docs.djangoproject.com/en/4.0/ref/settings/#databases # NOTE: reach this branch when running test or mypy # Default primary key field type # https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field # Password validation # https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators ################################# # Internationalization ################################# # https://docs.djangoproject.com/en/4.0/topics/i18n/ # LANGUAGE_CODE = 'en-us' ###################################################### # Static files (CSS, JavaScript, Images) ###################################################### # https://docs.djangoproject.com/en/4.0/howto/static-files/ # heroku settings ########################### # Logging ########################### # Don't disable logger settings already exist | 1.944508 | 2 |
naive-bayes.py | arhumkhan/movie-sentiment-analysis | 1 | 6619035 | import sys
import json
import math
def bow_to_list(bow):
output = []
for word, freq in bow.items():
for i in range(freq):
output.append(word)
return output
def getinput():
training = sys.argv[1] # Training file
test = sys.argv[2] # Test file
param_file = sys.argv[3] # Param file to be saved
outputfile = sys.argv[4] # Output file
vocab = set([line.rstrip() for line in open('reviews/imdb.vocab')]) # All of our words
documents = []
classes = {}
test_docs = {}
training_file = open(training, "r")
for line in training_file.readlines():
vector = json.loads(line)
documents.append(vector)
label = list(vector.keys())[0]
if label in classes:
classes[label].append(vector[label])
else:
classes[label] = [vector[label]]
training_file.close()
test_file = open(test, "r")
for line in test_file.readlines():
vector = json.loads(line)
label = list(vector.keys())[0]
if label in test_docs:
test_docs[label].append(bow_to_list(vector[label]))
else:
test_docs[label] = [bow_to_list(vector[label])]
test_file.close()
return documents, classes, vocab, test_docs, param_file, outputfile
def trainNB(documents, classes, vocab):
total_doc_num = len(documents)
logprev = {} # Represeting probabilities as log probabilities to prevent floating point underflow
bow_for_each_class = {}
logprobs = {}
num_of_words_in_each_class = {}
for label, docs_in_the_class in classes.items():
num_of_documents_in_this_class = len(docs_in_the_class)
logprev[label] = math.log2(num_of_documents_in_this_class / total_doc_num) #Again we use log probs here
bow_for_each_class[label] = {}
num_of_words_in_each_class[label] = 0
for doc in docs_in_the_class:
for word, value in doc.items():
num_of_words_in_each_class[label] += value
if word in bow_for_each_class[label]:
bow_for_each_class[label][word] += value
else:
bow_for_each_class[label][word] = value
for word in vocab:
count = 0
if word in bow_for_each_class[label]:
count = bow_for_each_class[label][word]
logprobs[(word, label)] = math.log2((count + 1) / (num_of_words_in_each_class[label] + len(vocab)))
return logprev, logprobs, bow_for_each_class
def argmax(d): # Referencing formula
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def testNB(test_doc, classes, vocab, logprev, logprobs):
sum_of_log_probs = {}
for label, docs_in_the_class in classes.items():
sum_of_log_probs[label] = logprev[label]
for word in test_doc:
if word in vocab:
sum_of_log_probs[label] += logprobs[(word, label)]
return argmax(sum_of_log_probs)
def textprobability (x):
prob_formatting = ""
for key, val in x.items():
w = str(key[0])
c = str(key[1])
prob_formatting += 'p(' + w + ' | ' + c + ') = ' + str(val) + '\n'
return prob_formatting
def allcalculations():
documents, classes, vocab, test_docs, model_output, predictions_output = getinput()
logprev, logprobs, bow_in_each_class = trainNB(documents, classes, vocab)
results = {True: 0, False: 0}
predictions = "# of Doc Predicted Review True Review\n"
num = 1
for label, documents in test_docs.items():
for document in documents:
test_result = testNB(document, classes, vocab, logprev, logprobs)
results[test_result == label] += 1
predictions += " " + str(num) + " | " + test_result + " | " + label + "\n"
num += 1
param_file = open(model_output, "w")
model = "Log probability of each class:\n" + str(logprev) + \
'\n\nLog of each word given each class: \n' + textprobability (logprobs)
param_file.write(model)
param_file.close()
outputfile = open(predictions_output, "w")
accuracy = results[True] / (results[False] + results[True]) * 100
predictions += "Total Words: " + str(results) + ". Accuracy of results: " + str(accuracy) + '%'
outputfile.write(predictions)
outputfile.close()
allcalculations()
| import sys
import json
import math
def bow_to_list(bow):
output = []
for word, freq in bow.items():
for i in range(freq):
output.append(word)
return output
def getinput():
training = sys.argv[1] # Training file
test = sys.argv[2] # Test file
param_file = sys.argv[3] # Param file to be saved
outputfile = sys.argv[4] # Output file
vocab = set([line.rstrip() for line in open('reviews/imdb.vocab')]) # All of our words
documents = []
classes = {}
test_docs = {}
training_file = open(training, "r")
for line in training_file.readlines():
vector = json.loads(line)
documents.append(vector)
label = list(vector.keys())[0]
if label in classes:
classes[label].append(vector[label])
else:
classes[label] = [vector[label]]
training_file.close()
test_file = open(test, "r")
for line in test_file.readlines():
vector = json.loads(line)
label = list(vector.keys())[0]
if label in test_docs:
test_docs[label].append(bow_to_list(vector[label]))
else:
test_docs[label] = [bow_to_list(vector[label])]
test_file.close()
return documents, classes, vocab, test_docs, param_file, outputfile
def trainNB(documents, classes, vocab):
total_doc_num = len(documents)
logprev = {} # Represeting probabilities as log probabilities to prevent floating point underflow
bow_for_each_class = {}
logprobs = {}
num_of_words_in_each_class = {}
for label, docs_in_the_class in classes.items():
num_of_documents_in_this_class = len(docs_in_the_class)
logprev[label] = math.log2(num_of_documents_in_this_class / total_doc_num) #Again we use log probs here
bow_for_each_class[label] = {}
num_of_words_in_each_class[label] = 0
for doc in docs_in_the_class:
for word, value in doc.items():
num_of_words_in_each_class[label] += value
if word in bow_for_each_class[label]:
bow_for_each_class[label][word] += value
else:
bow_for_each_class[label][word] = value
for word in vocab:
count = 0
if word in bow_for_each_class[label]:
count = bow_for_each_class[label][word]
logprobs[(word, label)] = math.log2((count + 1) / (num_of_words_in_each_class[label] + len(vocab)))
return logprev, logprobs, bow_for_each_class
def argmax(d): # Referencing formula
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def testNB(test_doc, classes, vocab, logprev, logprobs):
sum_of_log_probs = {}
for label, docs_in_the_class in classes.items():
sum_of_log_probs[label] = logprev[label]
for word in test_doc:
if word in vocab:
sum_of_log_probs[label] += logprobs[(word, label)]
return argmax(sum_of_log_probs)
def textprobability (x):
prob_formatting = ""
for key, val in x.items():
w = str(key[0])
c = str(key[1])
prob_formatting += 'p(' + w + ' | ' + c + ') = ' + str(val) + '\n'
return prob_formatting
def allcalculations():
documents, classes, vocab, test_docs, model_output, predictions_output = getinput()
logprev, logprobs, bow_in_each_class = trainNB(documents, classes, vocab)
results = {True: 0, False: 0}
predictions = "# of Doc Predicted Review True Review\n"
num = 1
for label, documents in test_docs.items():
for document in documents:
test_result = testNB(document, classes, vocab, logprev, logprobs)
results[test_result == label] += 1
predictions += " " + str(num) + " | " + test_result + " | " + label + "\n"
num += 1
param_file = open(model_output, "w")
model = "Log probability of each class:\n" + str(logprev) + \
'\n\nLog of each word given each class: \n' + textprobability (logprobs)
param_file.write(model)
param_file.close()
outputfile = open(predictions_output, "w")
accuracy = results[True] / (results[False] + results[True]) * 100
predictions += "Total Words: " + str(results) + ". Accuracy of results: " + str(accuracy) + '%'
outputfile.write(predictions)
outputfile.close()
allcalculations()
| en | 0.860081 | # Training file # Test file # Param file to be saved # Output file # All of our words # Represeting probabilities as log probabilities to prevent floating point underflow #Again we use log probs here # Referencing formula | 2.900418 | 3 |
utils/gen_reference_table.py | deperrone/content | 1,138 | 6619036 | #!/usr/bin/python3
import os
import re
import glob
import ssg.build_yaml
import ssg.constants
import tables.table_renderer
class HtmlOutput(tables.table_renderer.TableHtmlOutput):
TEMPLATE_NAME = "tables/reference_tables_template.html"
def __init__(self, * args, ** kwargs):
super(HtmlOutput, self).__init__(* args, ** kwargs)
self.cached_rules = []
def _fix_var_sub_in_text(self, text, varname, value):
return re.sub(
r'<sub\s+idref="{var}"\s*/>'.format(var=varname),
r'<abbr title="${var}"><tt>{val}</tt></abbr>'.format(var=varname, val=value), text)
def _get_eligible_rules(self, refcat):
filenames = glob.glob(os.path.join(self.rules_root, "*.yml"))
if self.cached_rules:
all_rules = self.cached_rules
else:
all_rules = [ssg.build_yaml.Rule.from_yaml(f, self.env_yaml) for f in filenames]
self.cached_rules = all_rules
rules = []
for rule in all_rules:
if refcat in rule.references:
rules.append(rule)
return rules
def process_rules(self, reference):
super(HtmlOutput, self).process_rules(reference)
self.template_data["title"] = (
"{product} rules by {refcat} references"
.format(product=self.product, refcat=reference.name)
)
def update_parser(parser):
pass
def parse_args():
parser = HtmlOutput.create_parser(
"Generate HTML table that maps references to rules "
"using compiled rules as source of data.")
tables.table_renderer.update_parser(parser)
update_parser(parser)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
renderer = HtmlOutput(args.product, args.build_dir, args.verbose)
reference = ssg.constants.REFERENCES[args.refcategory]
renderer.process_rules(reference)
renderer.output_results(args)
| #!/usr/bin/python3
import os
import re
import glob
import ssg.build_yaml
import ssg.constants
import tables.table_renderer
class HtmlOutput(tables.table_renderer.TableHtmlOutput):
TEMPLATE_NAME = "tables/reference_tables_template.html"
def __init__(self, * args, ** kwargs):
super(HtmlOutput, self).__init__(* args, ** kwargs)
self.cached_rules = []
def _fix_var_sub_in_text(self, text, varname, value):
return re.sub(
r'<sub\s+idref="{var}"\s*/>'.format(var=varname),
r'<abbr title="${var}"><tt>{val}</tt></abbr>'.format(var=varname, val=value), text)
def _get_eligible_rules(self, refcat):
filenames = glob.glob(os.path.join(self.rules_root, "*.yml"))
if self.cached_rules:
all_rules = self.cached_rules
else:
all_rules = [ssg.build_yaml.Rule.from_yaml(f, self.env_yaml) for f in filenames]
self.cached_rules = all_rules
rules = []
for rule in all_rules:
if refcat in rule.references:
rules.append(rule)
return rules
def process_rules(self, reference):
super(HtmlOutput, self).process_rules(reference)
self.template_data["title"] = (
"{product} rules by {refcat} references"
.format(product=self.product, refcat=reference.name)
)
def update_parser(parser):
pass
def parse_args():
parser = HtmlOutput.create_parser(
"Generate HTML table that maps references to rules "
"using compiled rules as source of data.")
tables.table_renderer.update_parser(parser)
update_parser(parser)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
renderer = HtmlOutput(args.product, args.build_dir, args.verbose)
reference = ssg.constants.REFERENCES[args.refcategory]
renderer.process_rules(reference)
renderer.output_results(args)
| fr | 0.386793 | #!/usr/bin/python3 | 2.296897 | 2 |
accounts/admin.py | aniruddha2000/foodfeeda | 0 | 6619037 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from accounts.forms import CustomUserChangeForm, CustomUserCreationForm
from accounts.models import NGO, Donner
class DonnerAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = Donner
list_display = (
"email",
"first_name",
"is_staff",
"is_active",
)
list_filter = (
"email",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("email", "phone_number", "password",)}),
("Permissions", {"fields": ("is_staff", "is_active",
"is_email_verified", "is_phone_verified",)}),
("Address", {"fields": ("country", "state", "city", "pin",)}),
("Details", {"fields": ("first_name", "last_name",
"gender", "coins", "DOB", "profile_photo",)}),
)
readonly_fields = (
"id",
"type",
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"first_name",
"last_name",
"gender",
"coins",
"DOB",
"email",
"password1",
"password2",
"is_staff",
"is_active",
"type",
"phone_number",
"country",
"state",
"city",
"pin",
"profile_photo",
),
},
),
)
search_fields = ("email",)
ordering = ("email",)
class NGOAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = NGO
list_display = (
"email",
"name",
"is_staff",
"is_active",
)
list_filter = (
"email",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("email", "phone_number", "password",)}),
("Permissions", {"fields": ("is_staff", "is_active",
"is_email_verified", "is_phone_verified",)}),
("Details", {"fields": ("name", "ngo_approval_cert",)}),
)
readonly_fields = (
"id",
"type",
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"name",
"email",
"<PASSWORD>",
"<PASSWORD>",
"is_staff",
"is_active",
"is_email_verified",
"type",
"phone_number",
"country",
"state",
"city",
"pin",
"ngo_approval_cert",
),
},
),
)
search_fields = ("email",)
ordering = ("email",)
admin.site.register(NGO, NGOAdmin)
admin.site.register(Donner, DonnerAdmin)
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from accounts.forms import CustomUserChangeForm, CustomUserCreationForm
from accounts.models import NGO, Donner
class DonnerAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = Donner
list_display = (
"email",
"first_name",
"is_staff",
"is_active",
)
list_filter = (
"email",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("email", "phone_number", "password",)}),
("Permissions", {"fields": ("is_staff", "is_active",
"is_email_verified", "is_phone_verified",)}),
("Address", {"fields": ("country", "state", "city", "pin",)}),
("Details", {"fields": ("first_name", "last_name",
"gender", "coins", "DOB", "profile_photo",)}),
)
readonly_fields = (
"id",
"type",
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"first_name",
"last_name",
"gender",
"coins",
"DOB",
"email",
"password1",
"password2",
"is_staff",
"is_active",
"type",
"phone_number",
"country",
"state",
"city",
"pin",
"profile_photo",
),
},
),
)
search_fields = ("email",)
ordering = ("email",)
class NGOAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = NGO
list_display = (
"email",
"name",
"is_staff",
"is_active",
)
list_filter = (
"email",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("email", "phone_number", "password",)}),
("Permissions", {"fields": ("is_staff", "is_active",
"is_email_verified", "is_phone_verified",)}),
("Details", {"fields": ("name", "ngo_approval_cert",)}),
)
readonly_fields = (
"id",
"type",
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"name",
"email",
"<PASSWORD>",
"<PASSWORD>",
"is_staff",
"is_active",
"is_email_verified",
"type",
"phone_number",
"country",
"state",
"city",
"pin",
"ngo_approval_cert",
),
},
),
)
search_fields = ("email",)
ordering = ("email",)
admin.site.register(NGO, NGOAdmin)
admin.site.register(Donner, DonnerAdmin)
| none | 1 | 2.144135 | 2 | |
output/models/nist_data/list_pkg/string/schema_instance/nistschema_sv_iv_list_string_pattern_3_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 6619038 | <reponame>tefra/xsdata-w3c-tests<gh_stars>1-10
from output.models.nist_data.list_pkg.string.schema_instance.nistschema_sv_iv_list_string_pattern_3_xsd.nistschema_sv_iv_list_string_pattern_3 import NistschemaSvIvListStringPattern3
__all__ = [
"NistschemaSvIvListStringPattern3",
]
| from output.models.nist_data.list_pkg.string.schema_instance.nistschema_sv_iv_list_string_pattern_3_xsd.nistschema_sv_iv_list_string_pattern_3 import NistschemaSvIvListStringPattern3
__all__ = [
"NistschemaSvIvListStringPattern3",
] | none | 1 | 0.97013 | 1 | |
bitmovin/services/manifests/smooth_manifest_service.py | camberbridge/bitmovin-python | 44 | 6619039 | <reponame>camberbridge/bitmovin-python<filename>bitmovin/services/manifests/smooth_manifest_service.py
from bitmovin.resources import SmoothManifest
from bitmovin.services.manifests.generic_manifest_service import GenericManifestService
from .manifest_control_service import ManifestControlService
from .smooth_representation_service import MP4RepresentationService
from .smooth_content_protection_service import SmoothContentProtectionService
class Smooth(GenericManifestService, ManifestControlService):
manifest_type = 'smooth'
def __init__(self, http_client):
super().__init__(http_client=http_client, manifest_type=self.manifest_type, resource_class=SmoothManifest)
self.MP4Representation = MP4RepresentationService(http_client=http_client)
self.ContentProtection = SmoothContentProtectionService(http_client=http_client)
| from bitmovin.resources import SmoothManifest
from bitmovin.services.manifests.generic_manifest_service import GenericManifestService
from .manifest_control_service import ManifestControlService
from .smooth_representation_service import MP4RepresentationService
from .smooth_content_protection_service import SmoothContentProtectionService
class Smooth(GenericManifestService, ManifestControlService):
manifest_type = 'smooth'
def __init__(self, http_client):
super().__init__(http_client=http_client, manifest_type=self.manifest_type, resource_class=SmoothManifest)
self.MP4Representation = MP4RepresentationService(http_client=http_client)
self.ContentProtection = SmoothContentProtectionService(http_client=http_client) | none | 1 | 1.939443 | 2 | |
10-sequence_hash_slice/vector_v4.py | sexyjoon/fluent-python | 0 | 6619040 | from array import array
import reprlib
import math
import functools
import operator
class VectorV4:
typecode = 'd'
shortcut_names = 'xyzt'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes(ord(self.typecode))) + bytes(self._components)
# def __eq__(self, other):
# return tuple(self) == tuple(other)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __len__(self):
return len(self._components)
def __getitem__(self, item):
cls = type(self)
if isinstance(item, slice):
return cls(self._components[item])
elif isinstance(item, int):
return self._components[item]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
def __getattr__(self, item):
cls = type(self)
if len(item) == 1:
pos = cls.shortcut_names.find(item)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, item))
def __setattr__(self, key, value):
cls = type(self)
if len(key) == 1:
if key in cls.shortcut_names:
error = 'readonly attribute {attr_name!r}'
elif key.islower():
error = 'can\'t set attributes \'a\' to \'z\' in {cls_name!r}'
else:
error = ''
if error:
msg = error.format(cls_name=cls.__name__, attr_name=key)
raise AttributeError(msg)
super().__setattr__(key, value)
def __eq__(self, other):
# # return tuple(self) == tuple(other)
# # 메모리 효율을 위해 반복자 사용
# if len(self) != len(other):
# return False
# for a, b in zip(self, other):
# if a != b:
# return False
# return True
# all 함수를 사용하여 한 줄로 줄임
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __hash__(self):
# hashes = (hash(x) for x in self._components)
# 맵 단계를 더 잘 드러내기 위해 제너레이터 표현식 대신 map 함수 사용
hashes = map(hash, self._components)
return functools.reduce(operator.xor, hashes, 0)
| from array import array
import reprlib
import math
import functools
import operator
class VectorV4:
typecode = 'd'
shortcut_names = 'xyzt'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes(ord(self.typecode))) + bytes(self._components)
# def __eq__(self, other):
# return tuple(self) == tuple(other)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __len__(self):
return len(self._components)
def __getitem__(self, item):
cls = type(self)
if isinstance(item, slice):
return cls(self._components[item])
elif isinstance(item, int):
return self._components[item]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
def __getattr__(self, item):
cls = type(self)
if len(item) == 1:
pos = cls.shortcut_names.find(item)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, item))
def __setattr__(self, key, value):
cls = type(self)
if len(key) == 1:
if key in cls.shortcut_names:
error = 'readonly attribute {attr_name!r}'
elif key.islower():
error = 'can\'t set attributes \'a\' to \'z\' in {cls_name!r}'
else:
error = ''
if error:
msg = error.format(cls_name=cls.__name__, attr_name=key)
raise AttributeError(msg)
super().__setattr__(key, value)
def __eq__(self, other):
# # return tuple(self) == tuple(other)
# # 메모리 효율을 위해 반복자 사용
# if len(self) != len(other):
# return False
# for a, b in zip(self, other):
# if a != b:
# return False
# return True
# all 함수를 사용하여 한 줄로 줄임
return len(self) == len(other) and all(a == b for a, b in zip(self, other))
def __hash__(self):
# hashes = (hash(x) for x in self._components)
# 맵 단계를 더 잘 드러내기 위해 제너레이터 표현식 대신 map 함수 사용
hashes = map(hash, self._components)
return functools.reduce(operator.xor, hashes, 0)
| ko | 0.776022 | # def __eq__(self, other): # return tuple(self) == tuple(other) # # return tuple(self) == tuple(other) # # 메모리 효율을 위해 반복자 사용 # if len(self) != len(other): # return False # for a, b in zip(self, other): # if a != b: # return False # return True # all 함수를 사용하여 한 줄로 줄임 # hashes = (hash(x) for x in self._components) # 맵 단계를 더 잘 드러내기 위해 제너레이터 표현식 대신 map 함수 사용 | 2.606388 | 3 |
backend/api/serializers/railroad_company_serializer.py | ferdn4ndo/infotrem | 0 | 6619041 | <gh_stars>0
from rest_framework import serializers
from api.models import CompanyInformation, CompanyPaintSchemeInformation, CompanyPaintScheme
from api.models.information_model import Information
from api.models.route_model import Company
from api.serializers.information_serializer import InformationSerializer
class CompanyInformationSerializer(serializers.ModelSerializer):
company_id = serializers.CharField(required=True, write_only=True)
information = InformationSerializer()
class Meta:
model = CompanyInformation
fields = ['id', 'railroad_id', 'information']
def create(self, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.create(**information_data)
railroad = Company.objects.get(id=validated_data['railroad_id'])
return CompanyInformation.objects.create(railroad=railroad, information=information)
def update(self, instance, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.get(id=information_data['id'])
serializer = InformationSerializer(information, data=information_data)
serializer.save()
instance.information = information
instance.save()
return instance
class CompanySerializer(serializers.ModelSerializer):
company_information = CompanyInformationSerializer(many=True)
class Meta:
model = Company
fields = ['id', 'abbrev', 'name', 'company_information']
class CompanyPaintSchemeInformationSerializer(serializers.ModelSerializer):
paint_scheme_id = serializers.CharField(required=True, write_only=True)
information = InformationSerializer()
class Meta:
model = CompanyPaintSchemeInformation
fields = ['id', 'paint_scheme_id', 'information']
def create(self, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.create(**information_data)
paint_scheme = CompanyPaintScheme.objects.get(id=validated_data['paint_scheme_id'])
return CompanyPaintSchemeInformation.objects.create(paint_scheme=paint_scheme, information=information)
def update(self, instance, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.get(id=information_data['id'])
serializer = InformationSerializer(information, data=information_data)
serializer.save()
instance.information = information
instance.save()
return instance
class CompanyPaintSchemeSerializer(serializers.ModelSerializer):
railroad = CompanySerializer()
railroad_information = CompanyPaintSchemeInformationSerializer(many=True)
class Meta:
model = CompanyPaintScheme
fields = ['id', 'name', 'railroad', 'start_date', 'end_date', 'railroad_information']
| from rest_framework import serializers
from api.models import CompanyInformation, CompanyPaintSchemeInformation, CompanyPaintScheme
from api.models.information_model import Information
from api.models.route_model import Company
from api.serializers.information_serializer import InformationSerializer
class CompanyInformationSerializer(serializers.ModelSerializer):
company_id = serializers.CharField(required=True, write_only=True)
information = InformationSerializer()
class Meta:
model = CompanyInformation
fields = ['id', 'railroad_id', 'information']
def create(self, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.create(**information_data)
railroad = Company.objects.get(id=validated_data['railroad_id'])
return CompanyInformation.objects.create(railroad=railroad, information=information)
def update(self, instance, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.get(id=information_data['id'])
serializer = InformationSerializer(information, data=information_data)
serializer.save()
instance.information = information
instance.save()
return instance
class CompanySerializer(serializers.ModelSerializer):
company_information = CompanyInformationSerializer(many=True)
class Meta:
model = Company
fields = ['id', 'abbrev', 'name', 'company_information']
class CompanyPaintSchemeInformationSerializer(serializers.ModelSerializer):
paint_scheme_id = serializers.CharField(required=True, write_only=True)
information = InformationSerializer()
class Meta:
model = CompanyPaintSchemeInformation
fields = ['id', 'paint_scheme_id', 'information']
def create(self, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.create(**information_data)
paint_scheme = CompanyPaintScheme.objects.get(id=validated_data['paint_scheme_id'])
return CompanyPaintSchemeInformation.objects.create(paint_scheme=paint_scheme, information=information)
def update(self, instance, validated_data):
information_data = validated_data.pop('information')
information = Information.objects.get(id=information_data['id'])
serializer = InformationSerializer(information, data=information_data)
serializer.save()
instance.information = information
instance.save()
return instance
class CompanyPaintSchemeSerializer(serializers.ModelSerializer):
railroad = CompanySerializer()
railroad_information = CompanyPaintSchemeInformationSerializer(many=True)
class Meta:
model = CompanyPaintScheme
fields = ['id', 'name', 'railroad', 'start_date', 'end_date', 'railroad_information'] | none | 1 | 2.226738 | 2 | |
data/services.py | njncalub/logistiko | 0 | 6619042 | <filename>data/services.py
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .utils import destroy_database, initialize_database
from .models import Item, ItemStatus, ItemStatusHistory, Package, Region
class DataService(object):
"""
Service for handling the database connection.
"""
__shared_state = {} # Borg design pattern's shared state.
__instantiated = False
def __init__(self, engine):
self.__dict__ = self.__shared_state
# exit if already instantiated
if self.__instantiated:
return
else:
self.__instantiated = True
if not engine:
raise ValueError('The values specified in engine parameter has ' \
'to be supported by SQLAlchemy.')
self.engine = engine
db_engine = create_engine(engine)
self.db_engine = db_engine
db_session = sessionmaker(bind=db_engine)
self.session = db_session()
def init_database(self):
initialize_database(engine=self.engine)
def drop_database(self):
destroy_database(engine=self.engine)
def add_region(self, region, major_region, id_=None):
"""
Creates and saves a new Region to the database.
:param id_: Existing id of the region
:param region: Name of the region
:param major_region: Name of the Major Region
"""
new_region = Region(id=id_,
region=region,
major_region=major_region)
self.session.add(new_region)
self.session.commit()
return new_region
def get_regions(self, region=None):
if region:
found = self.session.query(Region).filter(Region.region == region)
else:
found = self.session.query(Region).all()
return found
def update_package_addresses(self, region):
"""
Find package by sub region name.
TODO: update this to not resort to using this hack.
"""
found_packages = self.session.query(Package).filter(
Package.address.startswith(region.region))
if not found_packages:
return
for package in found_packages:
package.region = region
self.session.add(package)
self.session.commit()
def get_packages(self, package_number=None):
if package_number:
found = self.session.query(Package).filter(
Package.package_number == package_number)
else:
found = self.session.query(Package).all()
return found
def add_package(self, address, region, package_number, shipped_at,
delivered_at, lead_time=None, id_=None):
"""
Creates and saves a new Package to the database.
:param id_: Existing id of the package
:param address: The address of the recipient
:param region: FK of the region
:param package_number: Unique package number
:param shipped_at: The time when the package is shipped
:param delivered_at: The time when the package is delivered to customer
:param lead_time: The time from when the package is shipped
until it is delievered to customer
"""
new_package = Package(id=id_,
address=address,
region=region,
package_number=package_number,
shipped_at=shipped_at,
delivered_at=delivered_at,
lead_time=lead_time)
self.session.add(new_package)
self.session.commit()
return new_package
def add_so_item(self, id_sales_order_item, bob_id_sales_order_item,
fk_sales_order, fk_sales_order_item_status,
fk_delivery_type, unit_price, tax_amount, paid_price,
name, sku, created_at, updated_at, last_status_change,
original_unit_price, shipping_type, real_delivery_date,
bob_id_supplier, is_marketplace):
"""
Creates and saves a new Item to the database.
Columns taken from ims_sales_order_item.csv.
"""
new_item = Item(id_sales_order_item=id_sales_order_item,
bob_id_sales_order_item=bob_id_sales_order_item,
fk_sales_order=fk_sales_order,
fk_sales_order_item_status=fk_sales_order_item_status,
fk_delivery_type=fk_delivery_type,
unit_price=unit_price,
tax_amount=tax_amount,
paid_price=paid_price,
name=name,
sku=sku,
created_at=created_at,
updated_at=updated_at,
last_status_change=last_status_change,
original_unit_price=original_unit_price,
shipping_type=shipping_type,
real_delivery_date=real_delivery_date,
bob_id_supplier=bob_id_supplier,
is_marketplace=is_marketplace)
self.session.add(new_item)
self.session.commit()
return new_item
def add_so_item_status(self, id_sales_order_item_status, fk_oms_function,
status, desc, deprecated, updated_at):
"""
Creates and saves a new Item Status to the database.
Columns taken from ims_sales_order_item_status.csv.
"""
new_status = ItemStatus(
id_sales_order_item_status=id_sales_order_item_status,
fk_oms_function=fk_oms_function,
status=status,
desc=desc,
deprecated=deprecated,
updated_at=updated_at)
self.session.add(new_status)
self.session.commit()
return new_status
def add_so_item_status_history(self, id_sales_order_item_status_history,
fk_sales_order_item,
fk_sales_order_item_status, created_at):
"""
Creates and saves a new Item Status History to the database.
Columns taken from ims_sales_order_item_status_history.csv.
"""
new_history = ItemStatusHistory(
id_sales_order_item_status_history=\
id_sales_order_item_status_history,
fk_sales_order_item=fk_sales_order_item,
fk_sales_order_item_status=fk_sales_order_item_status,
created_at=created_at)
self.session.add(new_history)
self.session.commit()
return new_history
def get_items(self, pk=None):
if pk:
found = self.session.query(Item).filter(
Item.id_sales_order_item == pk)
else:
found = self.session.query(Item).all()
return found
def get_status(self, pk=None):
if pk:
found = self.session.query(ItemStatus).filter(
ItemStatus.id_sales_order_item_status == pk)
else:
found = self.session.query(ItemStatus).all()
return found
def get_history(self, pk=None):
if pk:
found = self.session.query(ItemStatusHistory).filter(
ItemStatusHistory.id_sales_order_item_status_history == pk)
else:
found = self.session.query(ItemStatusHistory).all()
return found
| <filename>data/services.py
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .utils import destroy_database, initialize_database
from .models import Item, ItemStatus, ItemStatusHistory, Package, Region
class DataService(object):
"""
Service for handling the database connection.
"""
__shared_state = {} # Borg design pattern's shared state.
__instantiated = False
def __init__(self, engine):
self.__dict__ = self.__shared_state
# exit if already instantiated
if self.__instantiated:
return
else:
self.__instantiated = True
if not engine:
raise ValueError('The values specified in engine parameter has ' \
'to be supported by SQLAlchemy.')
self.engine = engine
db_engine = create_engine(engine)
self.db_engine = db_engine
db_session = sessionmaker(bind=db_engine)
self.session = db_session()
def init_database(self):
initialize_database(engine=self.engine)
def drop_database(self):
destroy_database(engine=self.engine)
def add_region(self, region, major_region, id_=None):
"""
Creates and saves a new Region to the database.
:param id_: Existing id of the region
:param region: Name of the region
:param major_region: Name of the Major Region
"""
new_region = Region(id=id_,
region=region,
major_region=major_region)
self.session.add(new_region)
self.session.commit()
return new_region
def get_regions(self, region=None):
if region:
found = self.session.query(Region).filter(Region.region == region)
else:
found = self.session.query(Region).all()
return found
def update_package_addresses(self, region):
"""
Find package by sub region name.
TODO: update this to not resort to using this hack.
"""
found_packages = self.session.query(Package).filter(
Package.address.startswith(region.region))
if not found_packages:
return
for package in found_packages:
package.region = region
self.session.add(package)
self.session.commit()
def get_packages(self, package_number=None):
if package_number:
found = self.session.query(Package).filter(
Package.package_number == package_number)
else:
found = self.session.query(Package).all()
return found
def add_package(self, address, region, package_number, shipped_at,
delivered_at, lead_time=None, id_=None):
"""
Creates and saves a new Package to the database.
:param id_: Existing id of the package
:param address: The address of the recipient
:param region: FK of the region
:param package_number: Unique package number
:param shipped_at: The time when the package is shipped
:param delivered_at: The time when the package is delivered to customer
:param lead_time: The time from when the package is shipped
until it is delievered to customer
"""
new_package = Package(id=id_,
address=address,
region=region,
package_number=package_number,
shipped_at=shipped_at,
delivered_at=delivered_at,
lead_time=lead_time)
self.session.add(new_package)
self.session.commit()
return new_package
def add_so_item(self, id_sales_order_item, bob_id_sales_order_item,
fk_sales_order, fk_sales_order_item_status,
fk_delivery_type, unit_price, tax_amount, paid_price,
name, sku, created_at, updated_at, last_status_change,
original_unit_price, shipping_type, real_delivery_date,
bob_id_supplier, is_marketplace):
"""
Creates and saves a new Item to the database.
Columns taken from ims_sales_order_item.csv.
"""
new_item = Item(id_sales_order_item=id_sales_order_item,
bob_id_sales_order_item=bob_id_sales_order_item,
fk_sales_order=fk_sales_order,
fk_sales_order_item_status=fk_sales_order_item_status,
fk_delivery_type=fk_delivery_type,
unit_price=unit_price,
tax_amount=tax_amount,
paid_price=paid_price,
name=name,
sku=sku,
created_at=created_at,
updated_at=updated_at,
last_status_change=last_status_change,
original_unit_price=original_unit_price,
shipping_type=shipping_type,
real_delivery_date=real_delivery_date,
bob_id_supplier=bob_id_supplier,
is_marketplace=is_marketplace)
self.session.add(new_item)
self.session.commit()
return new_item
def add_so_item_status(self, id_sales_order_item_status, fk_oms_function,
status, desc, deprecated, updated_at):
"""
Creates and saves a new Item Status to the database.
Columns taken from ims_sales_order_item_status.csv.
"""
new_status = ItemStatus(
id_sales_order_item_status=id_sales_order_item_status,
fk_oms_function=fk_oms_function,
status=status,
desc=desc,
deprecated=deprecated,
updated_at=updated_at)
self.session.add(new_status)
self.session.commit()
return new_status
def add_so_item_status_history(self, id_sales_order_item_status_history,
fk_sales_order_item,
fk_sales_order_item_status, created_at):
"""
Creates and saves a new Item Status History to the database.
Columns taken from ims_sales_order_item_status_history.csv.
"""
new_history = ItemStatusHistory(
id_sales_order_item_status_history=\
id_sales_order_item_status_history,
fk_sales_order_item=fk_sales_order_item,
fk_sales_order_item_status=fk_sales_order_item_status,
created_at=created_at)
self.session.add(new_history)
self.session.commit()
return new_history
def get_items(self, pk=None):
if pk:
found = self.session.query(Item).filter(
Item.id_sales_order_item == pk)
else:
found = self.session.query(Item).all()
return found
def get_status(self, pk=None):
if pk:
found = self.session.query(ItemStatus).filter(
ItemStatus.id_sales_order_item_status == pk)
else:
found = self.session.query(ItemStatus).all()
return found
def get_history(self, pk=None):
if pk:
found = self.session.query(ItemStatusHistory).filter(
ItemStatusHistory.id_sales_order_item_status_history == pk)
else:
found = self.session.query(ItemStatusHistory).all()
return found
| en | 0.795465 | Service for handling the database connection. # Borg design pattern's shared state. # exit if already instantiated Creates and saves a new Region to the database. :param id_: Existing id of the region :param region: Name of the region :param major_region: Name of the Major Region Find package by sub region name. TODO: update this to not resort to using this hack. Creates and saves a new Package to the database. :param id_: Existing id of the package :param address: The address of the recipient :param region: FK of the region :param package_number: Unique package number :param shipped_at: The time when the package is shipped :param delivered_at: The time when the package is delivered to customer :param lead_time: The time from when the package is shipped until it is delievered to customer Creates and saves a new Item to the database. Columns taken from ims_sales_order_item.csv. Creates and saves a new Item Status to the database. Columns taken from ims_sales_order_item_status.csv. Creates and saves a new Item Status History to the database. Columns taken from ims_sales_order_item_status_history.csv. | 2.684036 | 3 |
sfaira_extension/versions/topology_versions/human/embedding/nmf.py | theislab/sfaira_extension | 0 | 6619043 | <reponame>theislab/sfaira_extension
NMF_TOPOLOGIES = {}
| NMF_TOPOLOGIES = {} | none | 1 | 0.982254 | 1 | |
tests/kyu_5_tests/test_first_variation_on_caesar_cipher.py | the-zebulan/CodeWars | 40 | 6619044 | <reponame>the-zebulan/CodeWars<gh_stars>10-100
import unittest
from katas.kyu_5.first_variation_on_caesar_cipher import demoving_shift, moving_shift
class CaesarCipherTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(demoving_shift(
['J vltasl rlhr ', 'zdfog odxr ypw', ' atasl rlhr p ',
'gwkzzyq zntyhv', ' lvz wp!!!'], 1),
'I should have known that you would have a perfect answer for me'
'!!!')
def test_equals_2(self):
self.assertEqual(moving_shift(
'I should have known that you would have a perfect answer for me'
'!!!', 1),
['J vltasl rlhr ', 'zdfog odxr ypw', ' atasl rlhr p ',
'gwkzzyq zntyhv', ' lvz wp!!!'])
| import unittest
from katas.kyu_5.first_variation_on_caesar_cipher import demoving_shift, moving_shift
class CaesarCipherTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(demoving_shift(
['J vltasl rlhr ', 'zdfog odxr ypw', ' atasl rlhr p ',
'gwkzzyq zntyhv', ' lvz wp!!!'], 1),
'I should have known that you would have a perfect answer for me'
'!!!')
def test_equals_2(self):
self.assertEqual(moving_shift(
'I should have known that you would have a perfect answer for me'
'!!!', 1),
['J vltasl rlhr ', 'zdfog odxr ypw', ' atasl rlhr p ',
'gwkzzyq zntyhv', ' lvz wp!!!']) | none | 1 | 3.179312 | 3 | |
rubedo/sqlsorcery/__init__.py | mkomet/rubedo | 0 | 6619045 | <reponame>mkomet/rubedo
from .sqlsorcery import SqlSorceryBackend, metadata
from .sqlutils import (
build_mysql_uri,
build_sqlite_uri,
create_all,
raw_sql_session,
sql_session,
)
__all__ = [
"SqlSorceryBackend",
"metadata",
"raw_sql_session",
"sql_session",
"build_mysql_uri",
"build_sqlite_uri",
"create_all",
]
| from .sqlsorcery import SqlSorceryBackend, metadata
from .sqlutils import (
build_mysql_uri,
build_sqlite_uri,
create_all,
raw_sql_session,
sql_session,
)
__all__ = [
"SqlSorceryBackend",
"metadata",
"raw_sql_session",
"sql_session",
"build_mysql_uri",
"build_sqlite_uri",
"create_all",
] | none | 1 | 1.343623 | 1 | |
tests/test_single_recipe_delete.py | PatrickCmd/Yummy-Recipe-RestAPI | 0 | 6619046 | # tests/test_single_recipe_update.py
import unittest
import json
import uuid
import time
from api import db
from api.models import User, RecipeCategory, Recipe
from tests.register_login import RegisterLogin
class TestDeleteSingleRecipeBlueprint(RegisterLogin):
def test_delete_recipe_in_category(self):
"""
Test for deleting recipe in category
"""
response = self.register_user(
"Patrick", "Walukagga",
"<EMAIL>", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("<EMAIL>", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.delete('/recipe_category/2/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 200)
self.assertIn('Recipe item deleted', str(response.data))
response = self.client.get('/recipe_category/2/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Recipe not found', str(response.data))
# delete recipe not yet in database
response = self.client.delete('/recipe_category/2/recipes/4',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Recipe not found', str(response.data))
# delete recipe in category not yet in database
response = self.client.delete('/recipe_category/3/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Category not found in database',
str(response.data))
def test_delete_recipe_in_category_catid_recipeid_not_number(self):
"""
Test for deleting recipe in category
"""
response = self.register_user(
"Patrick", "Walukagga",
"<EMAIL>", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("<EMAIL>", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.delete('/recipe_category/a/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Category ID must be an integer', str(response.data))
# recipe id not number
response = self.client.delete('/recipe_category/2/recipes/a',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Recipe ID must be an integer', str(response.data))
if __name__ == '__main__':
unittest.main()
| # tests/test_single_recipe_update.py
import unittest
import json
import uuid
import time
from api import db
from api.models import User, RecipeCategory, Recipe
from tests.register_login import RegisterLogin
class TestDeleteSingleRecipeBlueprint(RegisterLogin):
def test_delete_recipe_in_category(self):
"""
Test for deleting recipe in category
"""
response = self.register_user(
"Patrick", "Walukagga",
"<EMAIL>", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("<EMAIL>", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.delete('/recipe_category/2/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 200)
self.assertIn('Recipe item deleted', str(response.data))
response = self.client.get('/recipe_category/2/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Recipe not found', str(response.data))
# delete recipe not yet in database
response = self.client.delete('/recipe_category/2/recipes/4',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Recipe not found', str(response.data))
# delete recipe in category not yet in database
response = self.client.delete('/recipe_category/3/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Category not found in database',
str(response.data))
def test_delete_recipe_in_category_catid_recipeid_not_number(self):
"""
Test for deleting recipe in category
"""
response = self.register_user(
"Patrick", "Walukagga",
"<EMAIL>", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("<EMAIL>", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.delete('/recipe_category/a/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Category ID must be an integer', str(response.data))
# recipe id not number
response = self.client.delete('/recipe_category/2/recipes/a',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Recipe ID must be an integer', str(response.data))
if __name__ == '__main__':
unittest.main()
| en | 0.7809 | # tests/test_single_recipe_update.py Test for deleting recipe in category # registered user login # valid token # delete recipe not yet in database # delete recipe in category not yet in database Test for deleting recipe in category # registered user login # valid token # recipe id not number | 2.490937 | 2 |
tibia/parser.py | Truta446/tibia-crawler | 0 | 6619047 | <gh_stars>0
import re
from bs4 import BeautifulSoup
from model import Tibia
from utils import normalizeText
ACCOUNT_STATUS_REGEX = r"(Account\sStatus\:)"
GUILD_MEMBERSHIP_REGEX = r"(Guild\sMembership\:)"
class Parser:
def parse(self, html):
parsed = BeautifulSoup(html, "html.parser")
return Tibia(
name=self.extract_name(parsed),
title=self.extract_title(parsed),
sex=self.extract_sex(parsed),
vocation=self.extract_vocation(parsed),
level=self.extract_level(parsed),
achivement=self.extract_achivement(parsed),
world=self.extract_world(parsed),
residence=self.extract_residence(parsed),
guild_membership=self.extract_guild_membership(parsed),
last_login=self.extract_last_login(parsed),
account_status=self.extract_account_status(parsed),
deaths=self.extract_deaths(parsed),
)
def extract_deaths(self, html):
text = html.find("b", string="Character Deaths")
if text:
result = []
rows = text.find_all_next("tr")
for item in rows:
if item.text == "Search Character" or item.text == "Account Information":
break
timestamp = normalizeText(
item.select_one("td:nth-of-type(1)").text.strip()
)
description = normalizeText(
item.select_one("td:nth-of-type(2)").text.strip()
)
result.append({
"timestamp": timestamp,
"description": description
})
return result
def extract_account_status(self, html):
result = html.find("td", string=re.compile(ACCOUNT_STATUS_REGEX))
return self._getInformation(result)
def extract_last_login(self, html):
result = html.find("td", string="Last Login:")
return normalizeText(self._getInformation(result))
def extract_guild_membership(self, html):
result = html.find("td", string=re.compile(GUILD_MEMBERSHIP_REGEX))
return normalizeText(self._getInformation(result))
def extract_residence(self, html):
result = html.find("td", string="Residence:")
return self._getInformation(result)
def extract_world(self, html):
result = html.find("td", string="World:")
return self._getInformation(result)
def extract_achivement(self, html):
result = html.find("td", string="Achievement Points:")
return self._getInformation(result)
def extract_level(self, html):
result = html.find("td", string="Level:")
return self._getInformation(result)
def extract_vocation(self, html):
result = html.find("td", string="Vocation:")
return self._getInformation(result)
def extract_sex(self, html):
result = html.find("td", string="Sex:")
return self._getInformation(result)
def extract_title(self, html):
result = html.find("td", string="Title:")
return self._getInformation(result)
def extract_name(self, html):
result = html.find("td", string="Name:")
return self._getInformation(result)
def characterNotFound(self, html):
parsed = BeautifulSoup(html, "html.parser")
result = parsed.find(string=re.compile(r"(does\snot\sexist.)"))
return not bool(result)
def _getInformation(self, result):
if result:
return result.find_next("td").text.strip()
| import re
from bs4 import BeautifulSoup
from model import Tibia
from utils import normalizeText
ACCOUNT_STATUS_REGEX = r"(Account\sStatus\:)"
GUILD_MEMBERSHIP_REGEX = r"(Guild\sMembership\:)"
class Parser:
def parse(self, html):
parsed = BeautifulSoup(html, "html.parser")
return Tibia(
name=self.extract_name(parsed),
title=self.extract_title(parsed),
sex=self.extract_sex(parsed),
vocation=self.extract_vocation(parsed),
level=self.extract_level(parsed),
achivement=self.extract_achivement(parsed),
world=self.extract_world(parsed),
residence=self.extract_residence(parsed),
guild_membership=self.extract_guild_membership(parsed),
last_login=self.extract_last_login(parsed),
account_status=self.extract_account_status(parsed),
deaths=self.extract_deaths(parsed),
)
def extract_deaths(self, html):
text = html.find("b", string="Character Deaths")
if text:
result = []
rows = text.find_all_next("tr")
for item in rows:
if item.text == "Search Character" or item.text == "Account Information":
break
timestamp = normalizeText(
item.select_one("td:nth-of-type(1)").text.strip()
)
description = normalizeText(
item.select_one("td:nth-of-type(2)").text.strip()
)
result.append({
"timestamp": timestamp,
"description": description
})
return result
def extract_account_status(self, html):
result = html.find("td", string=re.compile(ACCOUNT_STATUS_REGEX))
return self._getInformation(result)
def extract_last_login(self, html):
result = html.find("td", string="Last Login:")
return normalizeText(self._getInformation(result))
def extract_guild_membership(self, html):
result = html.find("td", string=re.compile(GUILD_MEMBERSHIP_REGEX))
return normalizeText(self._getInformation(result))
def extract_residence(self, html):
result = html.find("td", string="Residence:")
return self._getInformation(result)
def extract_world(self, html):
result = html.find("td", string="World:")
return self._getInformation(result)
def extract_achivement(self, html):
result = html.find("td", string="Achievement Points:")
return self._getInformation(result)
def extract_level(self, html):
result = html.find("td", string="Level:")
return self._getInformation(result)
def extract_vocation(self, html):
result = html.find("td", string="Vocation:")
return self._getInformation(result)
def extract_sex(self, html):
result = html.find("td", string="Sex:")
return self._getInformation(result)
def extract_title(self, html):
result = html.find("td", string="Title:")
return self._getInformation(result)
def extract_name(self, html):
result = html.find("td", string="Name:")
return self._getInformation(result)
def characterNotFound(self, html):
parsed = BeautifulSoup(html, "html.parser")
result = parsed.find(string=re.compile(r"(does\snot\sexist.)"))
return not bool(result)
def _getInformation(self, result):
if result:
return result.find_next("td").text.strip() | none | 1 | 2.827261 | 3 | |
mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py | Mindhome/field_service | 1 | 6619048 | <filename>mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from json import loads, dumps
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import call_hook_method, fmt_money
from frappe.integrations.utils import create_request_log, create_payment_gateway
from frappe.utils import get_request_site_address
from erpnext.erpnext_integrations.utils import create_mode_of_payment
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_connector import MpesaConnector
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_custom_fields import create_custom_pos_fields
class MpesaSettings(Document):
supported_currencies = ["KES"]
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. Mpesa does not support transactions in currency '{0}'").format(currency))
def on_update(self):
create_custom_pos_fields()
create_payment_gateway('Mpesa-' + self.payment_gateway_name, settings='Mpesa Settings', controller=self.payment_gateway_name)
call_hook_method('payment_gateway_enabled', gateway='Mpesa-' + self.payment_gateway_name, payment_channel="Phone")
# required to fetch the bank account details from the payment gateway account
frappe.db.commit()
create_mode_of_payment('Mpesa-' + self.payment_gateway_name, payment_type="Phone")
def request_for_payment(self, **kwargs):
args = frappe._dict(kwargs)
request_amounts = self.split_request_amount_according_to_transaction_limit(args)
for i, amount in enumerate(request_amounts):
args.request_amount = amount
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_payment_request_response_payload
response = frappe._dict(get_payment_request_response_payload(amount))
else:
response = frappe._dict(generate_stk_push(**args))
self.handle_api_response("CheckoutRequestID", args, response)
def split_request_amount_according_to_transaction_limit(self, args):
request_amount = args.request_amount
if request_amount > self.transaction_limit:
# make multiple requests
request_amounts = []
requests_to_be_made = frappe.utils.ceil(request_amount / self.transaction_limit) # 480/150 = ceil(3.2) = 4
for i in range(requests_to_be_made):
amount = self.transaction_limit
if i == requests_to_be_made - 1:
amount = request_amount - (self.transaction_limit * i) # for 4th request, 480 - (150 * 3) = 30
request_amounts.append(amount)
else:
request_amounts = [request_amount]
return request_amounts
def get_account_balance_info(self):
payload = dict(
reference_doctype="Mpesa Settings",
reference_docname=self.name,
doc_details=vars(self)
)
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_test_account_balance_response
response = frappe._dict(get_test_account_balance_response())
else:
response = frappe._dict(get_account_balance(payload))
self.handle_api_response("ConversationID", payload, response)
def handle_api_response(self, global_id, request_dict, response):
"""Response received from API calls returns a global identifier for each transaction, this code is returned during the callback."""
# check error response
if getattr(response, "requestId"):
req_name = getattr(response, "requestId")
error = response
else:
# global checkout id used as request name
req_name = getattr(response, global_id)
error = None
if not frappe.db.exists('Integration Request', req_name):
create_request_log(request_dict, "Host", "Mpesa", req_name, error)
if error:
frappe.throw(_(getattr(response, "errorMessage")), title=_("Transaction Error"))
def generate_stk_push(**kwargs):
"""Generate stk push by making a API call to the stk push API."""
args = frappe._dict(kwargs)
try:
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.verify_transaction"
mpesa_settings = frappe.get_doc("Mpesa Settings", args.payment_gateway[6:])
env = "production" if not mpesa_settings.sandbox else "sandbox"
# for sandbox, business shortcode is same as till number
business_shortcode = mpesa_settings.business_shortcode if env == "production" else mpesa_settings.till_number
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
mobile_number = sanitize_mobile_number(args.sender)
response = connector.stk_push(
business_shortcode=business_shortcode, amount=args.request_amount,
passcode=mpesa_settings.get_password("<PASSWORD>"),
callback_url=callback_url, reference_code=mpesa_settings.till_number,
phone_number=mobile_number, description="POS Payment"
)
return response
except Exception:
frappe.log_error(title=_("Mpesa Express Transaction Error"))
frappe.throw(_("Issue detected with Mpesa configuration, check the error logs for more details"), title=_("Mpesa Express Error"))
def sanitize_mobile_number(number):
"""Add country code and strip leading zeroes from the phone number."""
return "254" + str(number).lstrip("0")
@frappe.whitelist(allow_guest=True)
def verify_transaction(**kwargs):
"""Verify the transaction result received via callback from stk."""
transaction_response = frappe._dict(kwargs["Body"]["stkCallback"])
checkout_id = getattr(transaction_response, "CheckoutRequestID", "")
integration_request = frappe.get_doc("Integration Request", checkout_id)
transaction_data = frappe._dict(loads(integration_request.data))
total_paid = 0 # for multiple integration request made against a pos invoice
success = False # for reporting successfull callback to point of sale ui
if transaction_response['ResultCode'] == 0:
if integration_request.reference_doctype and integration_request.reference_docname:
try:
item_response = transaction_response["CallbackMetadata"]["Item"]
amount = fetch_param_value(item_response, "Amount", "Name")
mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
pr = frappe.get_doc(integration_request.reference_doctype, integration_request.reference_docname)
mpesa_receipts, completed_payments = get_completed_integration_requests_info(
integration_request.reference_doctype,
integration_request.reference_docname,
checkout_id
)
total_paid = amount + sum(completed_payments)
mpesa_receipts = ', '.join(mpesa_receipts + [mpesa_receipt])
if total_paid >= pr.grand_total:
pr.run_method("on_payment_authorized", 'Completed')
success = True
frappe.db.set_value("POS Invoice", pr.reference_name, "mpesa_receipt_number", mpesa_receipts)
integration_request.handle_success(transaction_response)
except Exception:
integration_request.handle_failure(transaction_response)
frappe.log_error(frappe.get_traceback())
else:
integration_request.handle_failure(transaction_response)
frappe.publish_realtime(
event='process_phone_payment',
doctype="POS Invoice",
docname=transaction_data.payment_reference,
user=integration_request.owner,
message={
'amount': total_paid,
'success': success,
'failure_message': transaction_response["ResultDesc"] if transaction_response['ResultCode'] != 0 else ''
},
)
def get_completed_integration_requests_info(reference_doctype, reference_docname, checkout_id):
output_of_other_completed_requests = frappe.get_all("Integration Request", filters={
'name': ['!=', checkout_id],
'reference_doctype': reference_doctype,
'reference_docname': reference_docname,
'status': 'Completed'
}, pluck="output")
mpesa_receipts, completed_payments = [], []
for out in output_of_other_completed_requests:
out = frappe._dict(loads(out))
item_response = out["CallbackMetadata"]["Item"]
completed_amount = fetch_param_value(item_response, "Amount", "Name")
completed_mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
completed_payments.append(completed_amount)
mpesa_receipts.append(completed_mpesa_receipt)
return mpesa_receipts, completed_payments
def get_account_balance(request_payload):
"""Call account balance API to send the request to the Mpesa Servers."""
try:
mpesa_settings = frappe.get_doc("Mpesa Settings", request_payload.get("reference_docname"))
env = "production" if not mpesa_settings.sandbox else "sandbox"
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.process_balance_info"
response = connector.get_balance(mpesa_settings.initiator_name, mpesa_settings.security_credential, mpesa_settings.till_number, 4, mpesa_settings.name, callback_url, callback_url)
return response
except Exception:
frappe.log_error(title=_("Account Balance Processing Error"))
frappe.throw(_("Please check your configuration and try again"), title=_("Error"))
@frappe.whitelist(allow_guest=True)
def process_balance_info(**kwargs):
"""Process and store account balance information received via callback from the account balance API call."""
account_balance_response = frappe._dict(kwargs["Result"])
conversation_id = getattr(account_balance_response, "ConversationID", "")
request = frappe.get_doc("Integration Request", conversation_id)
if request.status == "Completed":
return
transaction_data = frappe._dict(loads(request.data))
if account_balance_response["ResultCode"] == 0:
try:
result_params = account_balance_response["ResultParameters"]["ResultParameter"]
balance_info = fetch_param_value(result_params, "AccountBalance", "Key")
balance_info = format_string_to_json(balance_info)
ref_doc = frappe.get_doc(transaction_data.reference_doctype, transaction_data.reference_docname)
ref_doc.db_set("account_balance", balance_info)
request.handle_success(account_balance_response)
frappe.publish_realtime("refresh_mpesa_dashboard", doctype="Mpesa Settings",
docname=transaction_data.reference_docname, user=transaction_data.owner)
except Exception:
request.handle_failure(account_balance_response)
frappe.log_error(title=_("Mpesa Account Balance Processing Error"), message=account_balance_response)
else:
request.handle_failure(account_balance_response)
def format_string_to_json(balance_info):
"""
Format string to json.
e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00'''
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
"""
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split('|')
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES")
)
return dumps(balance_dict)
def fetch_param_value(response, key, key_field):
"""Fetch the specified key from list of dictionary. Key is identified via the key field."""
for param in response:
if param[key_field] == key:
return param["Value"] | <filename>mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from json import loads, dumps
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import call_hook_method, fmt_money
from frappe.integrations.utils import create_request_log, create_payment_gateway
from frappe.utils import get_request_site_address
from erpnext.erpnext_integrations.utils import create_mode_of_payment
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_connector import MpesaConnector
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_custom_fields import create_custom_pos_fields
class MpesaSettings(Document):
supported_currencies = ["KES"]
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. Mpesa does not support transactions in currency '{0}'").format(currency))
def on_update(self):
create_custom_pos_fields()
create_payment_gateway('Mpesa-' + self.payment_gateway_name, settings='Mpesa Settings', controller=self.payment_gateway_name)
call_hook_method('payment_gateway_enabled', gateway='Mpesa-' + self.payment_gateway_name, payment_channel="Phone")
# required to fetch the bank account details from the payment gateway account
frappe.db.commit()
create_mode_of_payment('Mpesa-' + self.payment_gateway_name, payment_type="Phone")
def request_for_payment(self, **kwargs):
args = frappe._dict(kwargs)
request_amounts = self.split_request_amount_according_to_transaction_limit(args)
for i, amount in enumerate(request_amounts):
args.request_amount = amount
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_payment_request_response_payload
response = frappe._dict(get_payment_request_response_payload(amount))
else:
response = frappe._dict(generate_stk_push(**args))
self.handle_api_response("CheckoutRequestID", args, response)
def split_request_amount_according_to_transaction_limit(self, args):
request_amount = args.request_amount
if request_amount > self.transaction_limit:
# make multiple requests
request_amounts = []
requests_to_be_made = frappe.utils.ceil(request_amount / self.transaction_limit) # 480/150 = ceil(3.2) = 4
for i in range(requests_to_be_made):
amount = self.transaction_limit
if i == requests_to_be_made - 1:
amount = request_amount - (self.transaction_limit * i) # for 4th request, 480 - (150 * 3) = 30
request_amounts.append(amount)
else:
request_amounts = [request_amount]
return request_amounts
def get_account_balance_info(self):
payload = dict(
reference_doctype="Mpesa Settings",
reference_docname=self.name,
doc_details=vars(self)
)
if frappe.flags.in_test:
from erpnext.erpnext_integrations.doctype.mpesa_settings.test_mpesa_settings import get_test_account_balance_response
response = frappe._dict(get_test_account_balance_response())
else:
response = frappe._dict(get_account_balance(payload))
self.handle_api_response("ConversationID", payload, response)
def handle_api_response(self, global_id, request_dict, response):
"""Response received from API calls returns a global identifier for each transaction, this code is returned during the callback."""
# check error response
if getattr(response, "requestId"):
req_name = getattr(response, "requestId")
error = response
else:
# global checkout id used as request name
req_name = getattr(response, global_id)
error = None
if not frappe.db.exists('Integration Request', req_name):
create_request_log(request_dict, "Host", "Mpesa", req_name, error)
if error:
frappe.throw(_(getattr(response, "errorMessage")), title=_("Transaction Error"))
def generate_stk_push(**kwargs):
"""Generate stk push by making a API call to the stk push API."""
args = frappe._dict(kwargs)
try:
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.verify_transaction"
mpesa_settings = frappe.get_doc("Mpesa Settings", args.payment_gateway[6:])
env = "production" if not mpesa_settings.sandbox else "sandbox"
# for sandbox, business shortcode is same as till number
business_shortcode = mpesa_settings.business_shortcode if env == "production" else mpesa_settings.till_number
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
mobile_number = sanitize_mobile_number(args.sender)
response = connector.stk_push(
business_shortcode=business_shortcode, amount=args.request_amount,
passcode=mpesa_settings.get_password("<PASSWORD>"),
callback_url=callback_url, reference_code=mpesa_settings.till_number,
phone_number=mobile_number, description="POS Payment"
)
return response
except Exception:
frappe.log_error(title=_("Mpesa Express Transaction Error"))
frappe.throw(_("Issue detected with Mpesa configuration, check the error logs for more details"), title=_("Mpesa Express Error"))
def sanitize_mobile_number(number):
"""Add country code and strip leading zeroes from the phone number."""
return "254" + str(number).lstrip("0")
@frappe.whitelist(allow_guest=True)
def verify_transaction(**kwargs):
"""Verify the transaction result received via callback from stk."""
transaction_response = frappe._dict(kwargs["Body"]["stkCallback"])
checkout_id = getattr(transaction_response, "CheckoutRequestID", "")
integration_request = frappe.get_doc("Integration Request", checkout_id)
transaction_data = frappe._dict(loads(integration_request.data))
total_paid = 0 # for multiple integration request made against a pos invoice
success = False # for reporting successfull callback to point of sale ui
if transaction_response['ResultCode'] == 0:
if integration_request.reference_doctype and integration_request.reference_docname:
try:
item_response = transaction_response["CallbackMetadata"]["Item"]
amount = fetch_param_value(item_response, "Amount", "Name")
mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
pr = frappe.get_doc(integration_request.reference_doctype, integration_request.reference_docname)
mpesa_receipts, completed_payments = get_completed_integration_requests_info(
integration_request.reference_doctype,
integration_request.reference_docname,
checkout_id
)
total_paid = amount + sum(completed_payments)
mpesa_receipts = ', '.join(mpesa_receipts + [mpesa_receipt])
if total_paid >= pr.grand_total:
pr.run_method("on_payment_authorized", 'Completed')
success = True
frappe.db.set_value("POS Invoice", pr.reference_name, "mpesa_receipt_number", mpesa_receipts)
integration_request.handle_success(transaction_response)
except Exception:
integration_request.handle_failure(transaction_response)
frappe.log_error(frappe.get_traceback())
else:
integration_request.handle_failure(transaction_response)
frappe.publish_realtime(
event='process_phone_payment',
doctype="POS Invoice",
docname=transaction_data.payment_reference,
user=integration_request.owner,
message={
'amount': total_paid,
'success': success,
'failure_message': transaction_response["ResultDesc"] if transaction_response['ResultCode'] != 0 else ''
},
)
def get_completed_integration_requests_info(reference_doctype, reference_docname, checkout_id):
output_of_other_completed_requests = frappe.get_all("Integration Request", filters={
'name': ['!=', checkout_id],
'reference_doctype': reference_doctype,
'reference_docname': reference_docname,
'status': 'Completed'
}, pluck="output")
mpesa_receipts, completed_payments = [], []
for out in output_of_other_completed_requests:
out = frappe._dict(loads(out))
item_response = out["CallbackMetadata"]["Item"]
completed_amount = fetch_param_value(item_response, "Amount", "Name")
completed_mpesa_receipt = fetch_param_value(item_response, "MpesaReceiptNumber", "Name")
completed_payments.append(completed_amount)
mpesa_receipts.append(completed_mpesa_receipt)
return mpesa_receipts, completed_payments
def get_account_balance(request_payload):
"""Call account balance API to send the request to the Mpesa Servers."""
try:
mpesa_settings = frappe.get_doc("Mpesa Settings", request_payload.get("reference_docname"))
env = "production" if not mpesa_settings.sandbox else "sandbox"
connector = MpesaConnector(env=env,
app_key=mpesa_settings.consumer_key,
app_secret=mpesa_settings.get_password("consumer_secret"))
callback_url = get_request_site_address(True) + "/api/method/erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings.process_balance_info"
response = connector.get_balance(mpesa_settings.initiator_name, mpesa_settings.security_credential, mpesa_settings.till_number, 4, mpesa_settings.name, callback_url, callback_url)
return response
except Exception:
frappe.log_error(title=_("Account Balance Processing Error"))
frappe.throw(_("Please check your configuration and try again"), title=_("Error"))
@frappe.whitelist(allow_guest=True)
def process_balance_info(**kwargs):
"""Process and store account balance information received via callback from the account balance API call."""
account_balance_response = frappe._dict(kwargs["Result"])
conversation_id = getattr(account_balance_response, "ConversationID", "")
request = frappe.get_doc("Integration Request", conversation_id)
if request.status == "Completed":
return
transaction_data = frappe._dict(loads(request.data))
if account_balance_response["ResultCode"] == 0:
try:
result_params = account_balance_response["ResultParameters"]["ResultParameter"]
balance_info = fetch_param_value(result_params, "AccountBalance", "Key")
balance_info = format_string_to_json(balance_info)
ref_doc = frappe.get_doc(transaction_data.reference_doctype, transaction_data.reference_docname)
ref_doc.db_set("account_balance", balance_info)
request.handle_success(account_balance_response)
frappe.publish_realtime("refresh_mpesa_dashboard", doctype="Mpesa Settings",
docname=transaction_data.reference_docname, user=transaction_data.owner)
except Exception:
request.handle_failure(account_balance_response)
frappe.log_error(title=_("Mpesa Account Balance Processing Error"), message=account_balance_response)
else:
request.handle_failure(account_balance_response)
def format_string_to_json(balance_info):
"""
Format string to json.
e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00'''
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
"""
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split('|')
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES")
)
return dumps(balance_dict)
def fetch_param_value(response, key, key_field):
"""Fetch the specified key from list of dictionary. Key is identified via the key field."""
for param in response:
if param[key_field] == key:
return param["Value"] | en | 0.83931 | # -*- coding: utf-8 -*- # Copyright (c) 2020, Frappe Technologies and contributors # For license information, please see license.txt # required to fetch the bank account details from the payment gateway account # make multiple requests # 480/150 = ceil(3.2) = 4 # for 4th request, 480 - (150 * 3) = 30 Response received from API calls returns a global identifier for each transaction, this code is returned during the callback. # check error response # global checkout id used as request name Generate stk push by making a API call to the stk push API. # for sandbox, business shortcode is same as till number Add country code and strip leading zeroes from the phone number. Verify the transaction result received via callback from stk. # for multiple integration request made against a pos invoice # for reporting successfull callback to point of sale ui Call account balance API to send the request to the Mpesa Servers. Process and store account balance information received via callback from the account balance API call. Format string to json. e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00''' => {'Working Account': {'current_balance': '481000.00', 'available_balance': '481000.00', 'reserved_balance': '0.00', 'uncleared_balance': '0.00'}} Fetch the specified key from list of dictionary. Key is identified via the key field. | 1.841361 | 2 |
static/sourcecode/ranking-notes.py | twitter/birdwatch | 102 | 6619049 | <reponame>twitter/birdwatch
import pandas as pd
notes = pd.read_csv('notes-00000.tsv', sep='\t')
ratings = pd.read_csv('ratings-00000.tsv', sep='\t')
## Note: this code snippet's results won't match the results of Birdwatch in production,
## because this code snippet doesn't weight ratings by contributors' helpfulness scores.
ratings['helpfulScore'] = 0
ratings.loc[ratings['helpful']==1,'helpfulScore'] = 1
ratings.loc[ratings['helpfulnessLevel']=='SOMEWHAT_HELPFUL','helpfulScore'] = 0.5
ratings.loc[ratings['helpfulnessLevel']=='HELPFUL','helpfulScore'] = 1
ratingsWithNotes = notes.set_index('noteId').join(ratings.set_index('noteId'), lsuffix="\_note", rsuffix="\_rating", how='inner')
ratingsWithNotes['numRatings'] = 1
def getScoredNotesForTweet(
tweetId,
minRatingsNeeded = 5,
minHelpfulnessRatioNeededHelpful = 0.84,
maxHelpfulnessRatioNeededNotHelpful = .29,
minRatingsToGetTag = 2,
):
ratingsWithNotesForTweet = ratingsWithNotes[ratingsWithNotes['tweetId']==tweetId]
scoredNotes = ratingsWithNotesForTweet.groupby('noteId').sum()
scoredNotes['helpfulnessRatio'] = scoredNotes['helpfulScore']/scoredNotes['numRatings']
helpfulWhys = ['helpfulOther', 'helpfulInformative', 'helpfulClear',
'helpfulGoodSources', 'helpfulEmpathetic', 'helpfulUniqueContext']
notHelpfulWhys = ['notHelpfulOther', 'notHelpfulOpinionSpeculationOrBias', 'notHelpfulSourcesMissingOrUnreliable',
'notHelpfulMissingKeyPoints', 'notHelpfulArgumentativeOrInflammatory', 'notHelpfulIncorrect',
'notHelpfulOffTopic', 'notHelpfulHardToUnderstand', 'notHelpfulSpamHarassmentOrAbuse', 'notHelpfulOutdated']
scoredNotes['ratingStatus'] = 'Needs More Ratings'
scoredNotes.loc[(scoredNotes['numRatings'] >= minRatingsNeeded) & (scoredNotes['helpfulnessRatio'] >= minHelpfulnessRatioNeededHelpful), 'ratingStatus'] = 'Currently Rated Helpful'
scoredNotes.loc[(scoredNotes['numRatings'] >= minRatingsNeeded) & (scoredNotes['helpfulnessRatio'] <= maxHelpfulnessRatioNeededNotHelpful), 'ratingStatus'] = 'Currently Not Rated Helpful'
scoredNotes['firstTag'] = np.nan
scoredNotes['secondTag'] = np.nan
def topWhys(row):
if row['ratingStatus']=='Currently Rated Helpful':
whyCounts = pd.DataFrame(row[helpfulWhys])
elif row['ratingStatus']=='Currently Not Rated Helpful':
whyCounts = pd.DataFrame(row[notHelpfulWhys])
else:
return row
whyCounts.columns = ['tagCounts']
whyCounts['tiebreakOrder'] = range(len(whyCounts))
whyCounts = whyCounts[whyCounts['tagCounts'] >= minRatingsToGetTag]
topTags = whyCounts.sort_values(by=['tagCounts','tiebreakOrder'], ascending=False)[:2]
if (len(topTags) < 2):
row['ratingStatus'] = 'Needs More Ratings'
else:
row['firstTag'] = topTags.index[0]
row['secondTag'] = topTags.index[1]
return row
scoredNotes = scoredNotes.apply(topWhys, axis=1)
scoredNotes = scoredNotes.join(notes[['noteId','summary']].set_index('noteId'), lsuffix="_note", rsuffix="_rating", how='inner')
scoredNotes['orderWithinStatus'] = 'helpfulnessRatio'
scoredNotes.loc[scoredNotes['ratingStatus']=='Needs More Ratings', 'orderWithinStatus'] = 'createdAtMillis_note'
statusOrder = {'Currently Rated Helpful':2, 'Needs More Ratings':1, 'Currently Not Rated Helpful':0}
scoredNotes['statusOrder'] = scoredNotes.apply(lambda x: statusOrder[x['ratingStatus']], axis=1)
return scoredNotes.sort_values(by=['statusOrder','orderWithinStatus'], ascending=False)
| import pandas as pd
notes = pd.read_csv('notes-00000.tsv', sep='\t')
ratings = pd.read_csv('ratings-00000.tsv', sep='\t')
## Note: this code snippet's results won't match the results of Birdwatch in production,
## because this code snippet doesn't weight ratings by contributors' helpfulness scores.
ratings['helpfulScore'] = 0
ratings.loc[ratings['helpful']==1,'helpfulScore'] = 1
ratings.loc[ratings['helpfulnessLevel']=='SOMEWHAT_HELPFUL','helpfulScore'] = 0.5
ratings.loc[ratings['helpfulnessLevel']=='HELPFUL','helpfulScore'] = 1
ratingsWithNotes = notes.set_index('noteId').join(ratings.set_index('noteId'), lsuffix="\_note", rsuffix="\_rating", how='inner')
ratingsWithNotes['numRatings'] = 1
def getScoredNotesForTweet(
tweetId,
minRatingsNeeded = 5,
minHelpfulnessRatioNeededHelpful = 0.84,
maxHelpfulnessRatioNeededNotHelpful = .29,
minRatingsToGetTag = 2,
):
ratingsWithNotesForTweet = ratingsWithNotes[ratingsWithNotes['tweetId']==tweetId]
scoredNotes = ratingsWithNotesForTweet.groupby('noteId').sum()
scoredNotes['helpfulnessRatio'] = scoredNotes['helpfulScore']/scoredNotes['numRatings']
helpfulWhys = ['helpfulOther', 'helpfulInformative', 'helpfulClear',
'helpfulGoodSources', 'helpfulEmpathetic', 'helpfulUniqueContext']
notHelpfulWhys = ['notHelpfulOther', 'notHelpfulOpinionSpeculationOrBias', 'notHelpfulSourcesMissingOrUnreliable',
'notHelpfulMissingKeyPoints', 'notHelpfulArgumentativeOrInflammatory', 'notHelpfulIncorrect',
'notHelpfulOffTopic', 'notHelpfulHardToUnderstand', 'notHelpfulSpamHarassmentOrAbuse', 'notHelpfulOutdated']
scoredNotes['ratingStatus'] = 'Needs More Ratings'
scoredNotes.loc[(scoredNotes['numRatings'] >= minRatingsNeeded) & (scoredNotes['helpfulnessRatio'] >= minHelpfulnessRatioNeededHelpful), 'ratingStatus'] = 'Currently Rated Helpful'
scoredNotes.loc[(scoredNotes['numRatings'] >= minRatingsNeeded) & (scoredNotes['helpfulnessRatio'] <= maxHelpfulnessRatioNeededNotHelpful), 'ratingStatus'] = 'Currently Not Rated Helpful'
scoredNotes['firstTag'] = np.nan
scoredNotes['secondTag'] = np.nan
def topWhys(row):
if row['ratingStatus']=='Currently Rated Helpful':
whyCounts = pd.DataFrame(row[helpfulWhys])
elif row['ratingStatus']=='Currently Not Rated Helpful':
whyCounts = pd.DataFrame(row[notHelpfulWhys])
else:
return row
whyCounts.columns = ['tagCounts']
whyCounts['tiebreakOrder'] = range(len(whyCounts))
whyCounts = whyCounts[whyCounts['tagCounts'] >= minRatingsToGetTag]
topTags = whyCounts.sort_values(by=['tagCounts','tiebreakOrder'], ascending=False)[:2]
if (len(topTags) < 2):
row['ratingStatus'] = 'Needs More Ratings'
else:
row['firstTag'] = topTags.index[0]
row['secondTag'] = topTags.index[1]
return row
scoredNotes = scoredNotes.apply(topWhys, axis=1)
scoredNotes = scoredNotes.join(notes[['noteId','summary']].set_index('noteId'), lsuffix="_note", rsuffix="_rating", how='inner')
scoredNotes['orderWithinStatus'] = 'helpfulnessRatio'
scoredNotes.loc[scoredNotes['ratingStatus']=='Needs More Ratings', 'orderWithinStatus'] = 'createdAtMillis_note'
statusOrder = {'Currently Rated Helpful':2, 'Needs More Ratings':1, 'Currently Not Rated Helpful':0}
scoredNotes['statusOrder'] = scoredNotes.apply(lambda x: statusOrder[x['ratingStatus']], axis=1)
return scoredNotes.sort_values(by=['statusOrder','orderWithinStatus'], ascending=False) | en | 0.928059 | ## Note: this code snippet's results won't match the results of Birdwatch in production, ## because this code snippet doesn't weight ratings by contributors' helpfulness scores. | 2.875192 | 3 |
test/test_auth.py | apivideo/api.video-python | 6 | 6619050 | """
api.video
api.video is an API that encodes on the go to facilitate immediate playback, enhancing viewer streaming experiences across multiple devices and platforms. You can stream live or on-demand online videos within minutes. # noqa: E501
Contact: <EMAIL>
"""
import time
import unittest
import apivideo
from apivideo.exceptions import ApiAuthException
from urllib3_mock import Responses
responses = Responses()
AUTH_RESPONSE = """
{
"access_token": "<KEY>",
"refresh_token": "<PASSWORD>",
"token_type": "Bearer",
"expires_in": 11
}
"""
class TestAuth(unittest.TestCase):
def setUp(self) -> None:
self.client = apivideo.AuthenticatedApiClient("__KEY__")
@responses.activate
def test_connect_fail(self):
responses.add('POST', '/auth/api-key', body="{}", status=int(200), content_type='application/json')
with self.assertRaises(ApiAuthException):
self.client.connect()
with self.assertRaises(ApiAuthException):
self.client.call_api('/test', 'GET')
@responses.activate
def test_connect_success(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
self.client.connect()
self.client.call_api('/test', 'GET')
@responses.activate
def test_refresh_fail(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body="{}", status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
with self.assertRaises(ApiAuthException):
self.client.refresh_token()
self.client.connect()
self.client.call_api('/test', 'GET')
with self.assertRaises(ApiAuthException):
self.client.refresh_token()
with self.assertRaises(ApiAuthException):
self.client.call_api('/test', 'GET')
@responses.activate
def test_refresh_success(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
self.client.connect()
self.client.call_api('/test', 'GET')
self.client.refresh_token()
self.client.call_api('/test', 'GET')
@responses.activate
def test_autorefresh_fail(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body="{}", status=200, content_type='application/json')
with apivideo.AuthenticatedApiClient("__KEY__") as client:
time.sleep(2)
with self.assertRaises(ApiAuthException):
client.call_api('/test', 'GET')
self.assertEqual(2, len(responses.calls))
| """
api.video
api.video is an API that encodes on the go to facilitate immediate playback, enhancing viewer streaming experiences across multiple devices and platforms. You can stream live or on-demand online videos within minutes. # noqa: E501
Contact: <EMAIL>
"""
import time
import unittest
import apivideo
from apivideo.exceptions import ApiAuthException
from urllib3_mock import Responses
responses = Responses()
AUTH_RESPONSE = """
{
"access_token": "<KEY>",
"refresh_token": "<PASSWORD>",
"token_type": "Bearer",
"expires_in": 11
}
"""
class TestAuth(unittest.TestCase):
def setUp(self) -> None:
self.client = apivideo.AuthenticatedApiClient("__KEY__")
@responses.activate
def test_connect_fail(self):
responses.add('POST', '/auth/api-key', body="{}", status=int(200), content_type='application/json')
with self.assertRaises(ApiAuthException):
self.client.connect()
with self.assertRaises(ApiAuthException):
self.client.call_api('/test', 'GET')
@responses.activate
def test_connect_success(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
self.client.connect()
self.client.call_api('/test', 'GET')
@responses.activate
def test_refresh_fail(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body="{}", status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
with self.assertRaises(ApiAuthException):
self.client.refresh_token()
self.client.connect()
self.client.call_api('/test', 'GET')
with self.assertRaises(ApiAuthException):
self.client.refresh_token()
with self.assertRaises(ApiAuthException):
self.client.call_api('/test', 'GET')
@responses.activate
def test_refresh_success(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('GET', '/test', body="{}", status=200, content_type='application/json')
self.client.connect()
self.client.call_api('/test', 'GET')
self.client.refresh_token()
self.client.call_api('/test', 'GET')
@responses.activate
def test_autorefresh_fail(self):
responses.add('POST', '/auth/api-key', body=AUTH_RESPONSE, status=200, content_type='application/json')
responses.add('POST', '/auth/refresh', body="{}", status=200, content_type='application/json')
with apivideo.AuthenticatedApiClient("__KEY__") as client:
time.sleep(2)
with self.assertRaises(ApiAuthException):
client.call_api('/test', 'GET')
self.assertEqual(2, len(responses.calls))
| en | 0.696092 | api.video api.video is an API that encodes on the go to facilitate immediate playback, enhancing viewer streaming experiences across multiple devices and platforms. You can stream live or on-demand online videos within minutes. # noqa: E501 Contact: <EMAIL> { "access_token": "<KEY>", "refresh_token": "<PASSWORD>", "token_type": "Bearer", "expires_in": 11 } | 2.942552 | 3 |