id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1666802
|
from rest_framework import serializers
class SendPasswordResetEmailSerializer(serializers.Serializer):
username = serializers.CharField(required=True)
publicUrl = serializers.CharField(required=True)
|
1666819
|
import numpy as np
from ...utilities import get_num_atom_ids, normalize_ids
from .utilities import get_maximum_diameter
def test_get_maximum_diameter(case_data, get_atom_ids):
"""
Test :meth:`.Molecule.get_maximum_diameter`.
Parameters
----------
case_data : :class:`.CaseData`
A test case. Holds the molecule to test and the correct atomic
positions of it atoms.
get_atom_ids : :class:`callable`
Takes a single parameter, `molecule`, and returns a valid
`atom_ids` parameter for
:meth:`.Molecule.get_maximum_diameter`. This allows the
testing of different values of this parameter.
Returns
-------
None : :class:`NoneType`
Notes
-----
This test compares the result of
:meth:`.Molecule.get_maximum_diameter` to the result of
:func:`.get_maximum_diameter`, which is a utility function defined
for the purposes of this test. Because
:func:`.get_maximum_diameter` is tested independently, in
:mod:`.test_get_maximum_diameter_helper`, if its tests pass, then
it can be assumed, that :func:`.get_maximum_diameter` gives
correct results.
Now, assuming that :func:`.get_maximum_diameter` passed all of its
tests, this test compares the results of
:meth:`.Molecule.get_maximum_diameter` to the results of
:func:`.get_maximum_diameter`. If the results do not match, the
fault can be placed on :meth:`.Molecule.get_maximum_diameter`,
because :func:`.get_maximum_diameter` has already been verified to
be correct by its own tests.
"""
_test_get_maximum_diameter(
molecule=case_data.molecule,
get_atom_ids=get_atom_ids,
maximum_diameter=get_maximum_diameter(
position_matrix=case_data.position_matrix,
atom_ids=tuple(normalize_ids(
molecule=case_data.molecule,
ids=get_atom_ids(case_data.molecule),
)),
),
)
def _test_get_maximum_diameter(
molecule,
get_atom_ids,
maximum_diameter,
):
"""
Test :meth:`.Molecule.get_maximum_diameter`.
Parameters
----------
molecule : :class:`.Molecule`
The molecule to test.
get_atom_ids : :class:`callable`
Takes a single parameter, `molecule`, and returns a valid
`atom_ids` parameter for
:meth:`.Molecule.get_maximum_diameter`. This allows the
testing of different values of this parameter.
maximum_diameter : :class:`float`
The correct maximum_diameter.
Returns
-------
None : :class:`NoneType`
"""
num_atom_ids = get_num_atom_ids(molecule, get_atom_ids)
if num_atom_ids == 1:
result = molecule.get_maximum_diameter(
atom_ids=get_atom_ids(molecule),
)
assert result == 0
return
assert np.allclose(
a=maximum_diameter,
b=molecule.get_maximum_diameter(get_atom_ids(molecule)),
atol=1e-32,
)
|
1666849
|
import mxnet as mx
import logging
def get_setting_params(**kwargs):
# bn_params
bn_mom = kwargs.get('bn_mom', 0.9)
bn_eps = kwargs.get('bn_eps', 2e-5)
fix_gamma = kwargs.get('fix_gamma', False)
use_global_stats = kwargs.get('use_global_stats', False)
# net_setting param
workspace = kwargs.get('workspace', 512)
group_base = kwargs.get('group_base', 1)
setting_params={}
setting_params['bn_mom']=bn_mom
setting_params['bn_eps']=bn_eps
setting_params['fix_gamma'] = fix_gamma
setting_params['use_global_stats'] = use_global_stats
setting_params['workspace']=workspace
setting_params['group_base'] =group_base
return setting_params
'''
ConvOP [0,1,2] sep-conv/0, mobile-ib-conv-3/1, mobile-ib-conv-6/2.
KernelSize [0,1,2] 3x3/0, 5x5/1, 7x7/2.
SkipOp [0,1] no/0, id/1.
Layers [0,1,2,3] 1/0, 2/1, 3/2, 4/3.
WidthFactor [0.5, 1.0, 1.5, 2.0]
'''
def get_eatnet_param(net_code,_mbv2base=True):
assert type(net_code) is list
_basic_chs = [16, 24, 32, 64, 96, 160, 320]
block_chs = [32, ]
input_output_filter = []
conv_ops = []
repeat_num = []
first_stride = [0, 1, 1, 1, 0, 1, 0]
is_id_skip = []
kernel_size = []
filter_depth = []
for i in range(len(net_code)):
conv_ops.append('sep-conv' if net_code[i][0]<1 else 'mb-conv')
repeat_num.append(net_code[i][3]+1)
is_id_skip.append(net_code[i][2]>0)
kernel_size.append((net_code[i][1]+1)*2+1)
# filter_depth
if net_code[i][0]<1:
filter_depth.append(-1)
elif net_code[i][0]==1:
filter_depth.append(3)
elif net_code[i][0]==2:
filter_depth.append(6)
else:
raise ValueError('Wrong conv_ops {}'.format(conv_ops))
# input_output_filter
if _mbv2base == True:
block_chs.append(int(_basic_chs[i] * net_code[i][-1]))
else:
block_chs.append(int(block_chs[i] * net_code[i][-1]))
input_output_filter.append([block_chs[-2],block_chs[-1]])
num_stage = 7
net_params={}
net_params['conv_ops']=conv_ops
net_params['num_stage']=num_stage
net_params['repeat_num']=repeat_num
net_params['input_output_filter']=input_output_filter
net_params['first_stride']=first_stride
net_params['is_id_skip'] = is_id_skip
net_params['kernel_size']=kernel_size
net_params['filter_depth'] = filter_depth
return net_params
def inverted_residual_block(data,
input_channels,
output_channels,
setting_params,
multiplier=1,
kernel=(3,3),
stride=(1,1),
t=4,
id_skip=True,
dilate=1,
with_dilate=False,
name=None,
*args,
**kwargs):
bn_mom = setting_params['bn_mom']
bn_eps = setting_params['bn_eps']
fix_gamma = setting_params['fix_gamma']
use_global_stats = setting_params['use_global_stats']
workspace = setting_params['workspace']
group_base = setting_params['group_base']
assert stride[0] == stride[1]
in_channels= int(input_channels*multiplier)*t
out_channels=int(output_channels*multiplier)
pad = (((kernel[0] - 1) * dilate + 1) // 2,
((kernel[1] - 1) * dilate + 1) // 2)
if id_skip:
if (input_channels == output_channels) and (stride[0]==1):
short_cut = data
else:
bottleneck_bypass = mx.sym.Convolution(data=data,
num_filter=out_channels,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1) if with_dilate else stride,
no_bias=True,
num_group=1,
workspace=workspace,
name=name + '_bypass_conv')
bottleneck_bypass = mx.sym.BatchNorm(data=bottleneck_bypass,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_bypass_bn')
short_cut= bottleneck_bypass
if with_dilate:
stride=(1, 1)
bottleneck_a = mx.sym.Convolution(data=data,
num_filter=in_channels,
kernel=(1,1),
pad=(0,0),
stride=(1,1),
no_bias=True,
num_group=1,
workspace=workspace,
name=name + '_conv2d_pointwise')
bottleneck_a = mx.sym.BatchNorm(data=bottleneck_a,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_conv2d_pointwise_bn')
bottleneck_a = mx.sym.Activation(data=bottleneck_a,
act_type='relu',
name=name + '_conv2d_pointwise_relu')
bottleneck_b = mx.sym.Convolution(data=bottleneck_a,
num_filter=in_channels,
kernel=kernel,
pad=pad,
stride=stride,
no_bias=True,
num_group=int(in_channels/group_base),
dilate=(dilate, dilate),
workspace=workspace,
name=name + '_conv2d_depthwise')
bottleneck_b = mx.sym.BatchNorm(data=bottleneck_b,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_conv2d_depthwise_bn')
bottleneck_b = mx.sym.Activation(data=bottleneck_b,
act_type='relu',
name=name + '_conv2d_depthwise_relu')
bottleneck_c = mx.sym.Convolution(data=bottleneck_b,
num_filter=out_channels,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1),
no_bias=True,
num_group=1,
workspace=workspace,
name=name + '_conv2d_linear_transform')
bottleneck_c = mx.sym.BatchNorm(data=bottleneck_c,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_conv2d_linear_transform_bn')
if id_skip:
out_data=bottleneck_c+short_cut
else:
out_data=bottleneck_c
return out_data
def separable_conv2d(data,
input_channels,
output_channels,
setting_params,
kernel,
id_skip=False,
stride=(1,1),
bias=False,
bn_dw_out=True,
act_dw_out=True,
bn_pw_out=True,
act_pw_out=True,
dilate=1,
with_dilate=False,
name=None,
*args,
**kwargs
):
bn_mom = setting_params['bn_mom']
bn_eps = setting_params['bn_eps']
fix_gamma = setting_params['fix_gamma']
use_global_stats = setting_params['use_global_stats']
workspace = setting_params['workspace']
group_base = setting_params['group_base']
pad = (((kernel[0] - 1) * dilate + 1) // 2,
((kernel[1] - 1) * dilate + 1) // 2)
if id_skip:
if (input_channels == output_channels) and (stride[0]==1):
short_cut = data
else:
bottleneck_bypass = mx.sym.Convolution(data=data,
num_filter=output_channels,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1) if with_dilate else stride,
no_bias=True,
num_group=1,
workspace=workspace,
name=name + '_bypass_conv')
bottleneck_bypass = mx.sym.BatchNorm(data=bottleneck_bypass,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_bypass_bn')
short_cut= bottleneck_bypass
if with_dilate:
stride = (1, 1)
#depthwise
dw_out = mx.sym.Convolution(data=data,
num_filter=input_channels,
kernel=kernel,
pad=pad,
stride=stride,
no_bias=False if bias else True,
num_group=int(input_channels/group_base),
dilate=(dilate,dilate),
workspace=workspace,
name=name +'_conv2d_depthwise')
if bn_dw_out:
dw_out = mx.sym.BatchNorm(data=dw_out,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name+'_conv2d_depthwise_bn')
if act_dw_out:
dw_out = mx.sym.Activation(data=dw_out,
act_type='relu',
name=name+'_conv2d_depthwise_relu')
#pointwise
pw_out = mx.sym.Convolution(data=dw_out,
num_filter=output_channels,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
num_group=1,
no_bias=False if bias else True,
workspace=workspace,
name=name+'_conv2d_pointwise')
if bn_pw_out:
pw_out = mx.sym.BatchNorm(data=pw_out,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_conv2d_pointwise_bn')
if act_pw_out:
pw_out = mx.sym.Activation(data=pw_out,
act_type='relu',
name=name + '_conv2d_pointwise_relu')
if id_skip:
out_data=pw_out#+short_cut
else:
out_data=pw_out
return out_data
def add_stage_backbone_block(data,
conv_ops,
setting_params,
stage,
repeat_num,
input_output_filter,
first_stride,
is_id_skip,
kernel_size,
filter_depth,
multiplier=1,
dilate=1,
with_dilate=False,
name=None):
conv = inverted_residual_block if conv_ops[stage]=='mb-conv' else separable_conv2d
data =conv(data=data,
setting_params=setting_params,
input_channels=input_output_filter[stage][0],
output_channels=input_output_filter[stage][1],
kernel=(kernel_size[stage], kernel_size[stage]),
stride=(2, 2) if first_stride[stage] else (1, 1),
t=filter_depth[stage],
id_skip=True if is_id_skip[stage] else False,
multiplier=multiplier,
dilate=dilate,
with_dilate=with_dilate,
name=name+'_stage%d_unit%d_%s' % (stage + 1, 1, conv_ops[stage])
if name else 'stage%d_unit%d_%s' % (stage + 1, 1, conv_ops[stage]))
for j in range(repeat_num[stage] - 1):
data =conv(data=data,
setting_params=setting_params,
input_channels=input_output_filter[stage][1],
output_channels=input_output_filter[stage][1],
kernel=(kernel_size[stage], kernel_size[stage]),
stride=(1, 1),
t=filter_depth[stage],
id_skip=True if is_id_skip[stage] else False,
multiplier=multiplier,
dilate=dilate,
with_dilate=with_dilate,
name=name+'_stage%d_unit%d_%s' % (stage + 1, j + 2, conv_ops[stage])
if name else 'stage%d_unit%d_%s' % (stage + 1, j + 2, conv_ops[stage]))
return data, data
def add_head_block(data,
num_filter,
setting_params,
multiplier,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
name=None):
bn_mom = setting_params['bn_mom']
bn_eps = setting_params['bn_eps']
fix_gamma = setting_params['fix_gamma']
use_global_stats = setting_params['use_global_stats']
workspace = setting_params['workspace']
channels = int(num_filter * multiplier)
conv1 = mx.sym.Convolution(data=data,
num_filter=channels,
kernel=kernel,
pad=pad,
stride=stride,
no_bias=True,
num_group=1,
workspace=workspace,
name=name+'_conv1'if name else 'conv1')
conv1 = mx.sym.BatchNorm(data=conv1,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name+'_conv1_bn' if name else 'conv1_bn')
conv1 = mx.sym.Activation(data=conv1,
act_type='relu',
name=name+'_conv1_relu' if name else 'conv1_relu')
return conv1
def add_conv_1x1(data,
num_filter,
setting_params,
name=None):
bn_mom = setting_params['bn_mom']
bn_eps = setting_params['bn_eps']
fix_gamma = setting_params['fix_gamma']
use_global_stats = setting_params['use_global_stats']
workspace = setting_params['workspace']
data = mx.sym.Convolution(data=data,
num_filter=num_filter,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name+'_stage_conv1x1' if name else 'stage_conv1x1')
data = mx.sym.BatchNorm(data=data,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name+'_stage_conv1x1_bn' if name else 'stage_conv1x1_bn')
data = mx.sym.Activation(data=data,
act_type='relu',
name=name+'_stage_conv1x1_relu' if name else 'stage_conv1x1_relu')
return data
def add_fc_cls_block(data,
grad_scale=1,
label_smooth=False,
smooth_alpha=0.1,
num_class=1000,
is_training=True,
softmax_name=None,
name=None):
pool1 = mx.symbol.Pooling(data=data,
global_pool=True,
pool_type='avg',
name=name+'_global_avg_pooling' if name else 'global_avg_pooling')
flat = mx.symbol.Flatten(data=pool1)
fc = mx.symbol.FullyConnected(data=flat,
num_hidden=num_class,
name=name+'_fc1' if name else 'fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc,
grad_scale=grad_scale,
smooth_alpha=smooth_alpha if label_smooth else 0.0,
name=softmax_name if softmax_name else 'softmax')
return softmax
def conv_bn_relu(data, num_filter, kernel, setting_params, pad=(0,0), stride=(1,1), num_group=1, name=None):
bn_mom = setting_params['bn_mom']
bn_eps = setting_params['bn_eps']
fix_gamma = setting_params['fix_gamma']
use_global_stats = setting_params['use_global_stats']
workspace = setting_params['workspace']
data = mx.sym.Convolution(data=data,
num_filter=num_filter,
kernel=kernel,
pad=pad,
stride=stride,
no_bias=True,
num_group=num_group,
workspace=workspace,
name=name + '_conv2d')
data = mx.sym.BatchNorm(data=data,
fix_gamma=fix_gamma,
eps=bn_eps,
momentum=bn_mom,
use_global_stats=use_global_stats,
name=name + '_bn')
data = mx.sym.Activation(data=data,
act_type='relu',
name=name + '_relu')
return data
def auxiliary_head(data, setting_params, aux_head_weight=0.4, num_class=1000, name='aux_head'):
data1 = mx.sym.Activation(data=data, act_type='relu', name=name + '_relu')
data = mx.symbol.Pooling(data=data,
kernel=(5, 5),
pool_type='avg',
global_pool=False,
stride=(2,2),
name=name+'_avg_pooling')
data = conv_bn_relu(data=data1,
num_filter=128,
kernel=(1,1),
setting_params=setting_params,
name=name+'_conv1')
data = conv_bn_relu(data=data,
num_filter=768,
kernel=(2,2),
setting_params=setting_params,
name=name+'_conv2')
data = add_fc_cls_block(data=data,
grad_scale=aux_head_weight,
num_class=num_class,
softmax_name='aux_softmax',
name=name)
return data
def get_symbol(**kwargs):
net_code=kwargs.get('net_code',None)
multiplier = kwargs.get('multiplier', 1.0)
is_training=kwargs.get('is_training',True)
num_class=kwargs.get('num_class',1000)
label_smooth=kwargs.get('label_smooth', False)
smooth_alpha=kwargs.get('smooth_alpha',0.1)
mbv2base=kwargs.get('mbv2base',True)
use_aux_head = kwargs.get('use_aux_head', False)
aux_head_weight = kwargs.get('aux_head_weight', 0.4)
has_dropout = kwargs.get('has_dropout', False)
dropout_ratio = kwargs.get('dropout_ratio', 0.2)
setting_params=get_setting_params(**kwargs)
net_params=get_eatnet_param(net_code, _mbv2base=mbv2base)
num_stage=net_params['num_stage']
repeat_num=net_params['repeat_num']
input_output_filter=net_params['input_output_filter']
first_stride=net_params['first_stride']
is_id_skip=net_params['is_id_skip']
kernel_size=net_params['kernel_size']
filter_depth=net_params['filter_depth']
conv_ops = net_params['conv_ops']
assert num_stage==7
# for classification
dilate_list=[1,1,1,1,1,1,1] # for classification
with_dilate_list=[False, False, False, False,False,False,False]
data = mx.sym.Variable(name='data')
# head
head_data=add_head_block(data=data,
num_filter=32,
setting_params=setting_params,
multiplier=multiplier,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
name="eatnet_head")
stage0_data, _=add_stage_backbone_block(data=head_data,
conv_ops = conv_ops,
setting_params=setting_params,
stage=0,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[0],
with_dilate=with_dilate_list[0],
name="eatnet")
stage1_data, _=add_stage_backbone_block(data=stage0_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=1,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[1],
with_dilate=with_dilate_list[1],
name="eatnet")
stage2_data, _ = add_stage_backbone_block(data=stage1_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=2,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[2],
with_dilate=with_dilate_list[2],
name="eatnet")
stage3_data, _ = add_stage_backbone_block(data=stage2_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=3,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[3],
with_dilate=with_dilate_list[3],
name="eatnet")
stage4_data, _ = add_stage_backbone_block(data=stage3_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=4,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[4],
with_dilate=with_dilate_list[4],
name="eatnet")
if use_aux_head:
softmax_aux = auxiliary_head(data=stage4_data,
aux_head_weight=aux_head_weight,
setting_params=setting_params,
num_class=num_class)
stage5_data, _ = add_stage_backbone_block(data=stage4_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=5,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[5],
with_dilate=with_dilate_list[5],
name="eatnet")
stage6_data, _ = add_stage_backbone_block(data=stage5_data,
conv_ops=conv_ops,
setting_params=setting_params,
stage=6,
repeat_num=repeat_num,
input_output_filter=input_output_filter,
first_stride=first_stride,
is_id_skip=is_id_skip,
kernel_size=kernel_size,
filter_depth=filter_depth,
multiplier=multiplier,
dilate=dilate_list[6],
with_dilate=with_dilate_list[6],
name="eatnet")
stage6_data_conv1x1 = add_conv_1x1(data=stage6_data,
num_filter=int(1280 * multiplier) if multiplier > 1.0 else 1280,
setting_params=setting_params,
name='eatnet')
if has_dropout:
stage6_data_conv1x1 = mx.symbol.Dropout(stage6_data_conv1x1, p = dropout_ratio)
softmax = add_fc_cls_block(data=stage6_data_conv1x1,
label_smooth=label_smooth,
smooth_alpha=smooth_alpha,
num_class=num_class,
is_training=is_training,
name='eatnet')
if use_aux_head:
output = mx.sym.Group([softmax, softmax_aux])
return output
else:
return softmax
|
1666850
|
import tkinter
from command.DrawCommand import DrawCommand
from command.MacroCommand import MacroCommand
class CanvasFrame(tkinter.Frame):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.pack()
self.history = MacroCommand()
self.paintColor = "red"
self.clearButton = tkinter.Button(self, text="clear")
self.undoButton = tkinter.Button(self, text="undo")
self.redButton = tkinter.Button(self, text="red")
self.greenButton = tkinter.Button(self, text="green")
self.blueButton = tkinter.Button(self, text="blue")
self.canvas = tkinter.Canvas(self, width=400, height=300, bg="white")
self.redButton.grid(row=0, column=1, padx=2, pady=2, sticky=tkinter.EW)
self.greenButton.grid(row=0, column=2, padx=2, pady=2, sticky=tkinter.EW)
self.blueButton.grid(row=0, column=3, padx=2, pady=2, sticky=tkinter.EW)
self.clearButton.grid(row=0, column=4, padx=2, pady=2, sticky=tkinter.EW)
self.undoButton.grid(row=0, column=5, padx=2, pady=2, sticky=tkinter.EW)
self.canvas.grid(row=1, columnspan=7, padx=2, pady=2, sticky=tkinter.EW)
self.canvas.bind("<B1-Motion>", self._mouseDragged)
self.redButton.bind("<Button-1>", self._redPaint)
self.greenButton.bind("<Button-1>", self._greenPaint)
self.blueButton.bind("<Button-1>", self._bluePaint)
self.clearButton.bind("<Button-1>", self._clearCanvas)
self.undoButton.bind("<Button-1>", self._undoCanvas)
def _redPaint(self, event):
self.paintColor = "red"
def _greenPaint(self, event):
self.paintColor = "green"
def _bluePaint(self, event):
self.paintColor = "blue"
def _clearCanvas(self, event):
self.history.clear()
self.canvas.delete(tkinter.ALL)
def _undoCanvas(self, event):
self.history.undo()
self.canvas.delete(tkinter.ALL)
self.history.execute()
def _mouseDragged(self, event):
cmd = DrawCommand(self, event, self.paintColor)
self.history.append(cmd)
cmd.execute()
if __name__ == '__main__':
application = tkinter.Tk()
application.title("Command Pattern Samlpe")
window = CanvasFrame(application)
application.protocol("WM_DELETE_WINDOW", window.quit)
application.mainloop()
|
1666853
|
from arekit.contrib.source.common.labels import PositiveLabel, NegativeLabel
# Defaul label formattings.
POS_LABEL_STR = "pos"
NEG_LABEL_STR = "neg"
# Default label instances.
RUSENTREL_POS_LABEL_TYPE = PositiveLabel
RUSENTREL_NEG_LABEL_TYPE = NegativeLabel
|
1666934
|
from cryptography.fernet import Fernet, InvalidToken
from django.conf import settings
class Fern:
# """
# Usage:
# encrypt('foo')
# decrypt('CIPHERTEXT_ENCRYPTED_TEXT')
# """
def __init__(self, key=None):
if key:
self.key = key
else:
self.key = settings.BETA_ENVIRONMENT
def encrypt(self, message: str) -> str:
message_b = message.encode('utf-8')
ciphertext_b = Fernet(self.key).encrypt(message_b)
return ciphertext_b.decode('utf-8')
def decrypt(self, ciphertext: str) -> str:
try:
ciphertext_b = Fernet(self.key).decrypt(ciphertext.encode('utf-8'))
return ciphertext_b.decode('utf-8')
except InvalidToken:
return ''
|
1666946
|
import os
import shutil
import pytest
from flynt import api
from flynt import state
from flynt.api import _fstringify_file
# These "files" are byte-string constants instead of actual files to prevent e.g. Git or text editors from accidentally changing the encoding
invalid_unicode = b"# This is not valid unicode: " + bytes([0xFF, 0xFF])
mixed_line_endings_before = b"'{}'.format(1)\n'{}'.format(2)# Linux line ending\n'{}'.format(3)# Windows line ending\r\n"
mixed_line_endings_after = (
b"f'{1}'\nf'{2}'# Linux line ending\nf'{3}'# Windows line ending\r\n"
)
@pytest.fixture()
def formattable_file(tmpdir):
folder = os.path.dirname(__file__)
source_path = os.path.join(folder, "samples_in", "first_string.py")
tmp_path = os.path.join(tmpdir, "input.py")
shutil.copy2(source_path, tmp_path)
yield tmp_path
@pytest.fixture()
def py2_file(tmpdir):
folder = os.path.dirname(__file__)
py2_path = os.path.join(folder, "samples_in", "py2.py2")
tmp_path = os.path.join(tmpdir, "py2.py2")
shutil.copy2(py2_path, tmp_path)
yield tmp_path
@pytest.fixture()
def invalid_unicode_file(tmpdir):
folder = os.path.dirname(__file__)
tmp_path = os.path.join(tmpdir, "invalid_unicode.py")
with open(tmp_path, "wb") as f:
f.write(invalid_unicode)
yield tmp_path
@pytest.fixture()
def mixed_line_endings_file(tmpdir):
folder = os.path.dirname(__file__)
tmp_path = os.path.join(tmpdir, "mixed_line_endings.py")
with open(tmp_path, "wb") as file:
file.write(mixed_line_endings_before)
yield tmp_path
def test_py2(py2_file):
with open(py2_file) as f:
content_before = f.read()
modified, _, _, _ = _fstringify_file(py2_file, True, 1000)
with open(py2_file) as f:
content_after = f.read()
assert not modified
assert content_after == content_before
def test_invalid_unicode(invalid_unicode_file):
modified, _, _, _ = _fstringify_file(invalid_unicode_file, True, 1000)
with open(invalid_unicode_file, "rb") as f:
content_after = f.read()
assert not modified
assert content_after == invalid_unicode
def test_works(formattable_file):
with open(formattable_file) as f:
content_before = f.read()
modified, _, _, _ = _fstringify_file(formattable_file, True, 1000)
with open(formattable_file) as f:
content_after = f.read()
assert modified
assert content_after != content_before
def test_break_safe(formattable_file, monkeypatch):
with open(formattable_file) as f:
content_before = f.read()
def broken_fstringify_by_line(*args, **kwargs):
return "Hello World", 42
monkeypatch.setattr(api, "fstringify_code_by_line", broken_fstringify_by_line)
modified, _, _, _ = _fstringify_file(formattable_file, True, 1000)
with open(formattable_file) as f:
content_after = f.read()
assert not modified
assert content_after == content_before
def test_catches_subtle(formattable_file, monkeypatch):
with open(formattable_file) as f:
content_before = f.read()
def broken_fstringify_by_line(*args, **kwargs):
return "a = 42", 42
monkeypatch.setattr(api, "fstringify_code_by_line", broken_fstringify_by_line)
modified, _, _, _ = _fstringify_file(formattable_file, True, 1000)
with open(formattable_file) as f:
content_after = f.read()
assert not modified
assert content_after == content_before
def test_dry_run(formattable_file, monkeypatch):
monkeypatch.setattr(state, "dry_run", True)
with open(formattable_file) as f:
content_before = f.read()
modified, _, _, _ = _fstringify_file(formattable_file, True, 1000)
with open(formattable_file) as f:
content_after = f.read()
assert modified
assert content_after == content_before
def test_mixed_line_endings(mixed_line_endings_file):
modified, _, _, _ = _fstringify_file(mixed_line_endings_file, True, 1000)
with open(mixed_line_endings_file, "rb") as f:
content_after = f.read()
assert modified
assert content_after == mixed_line_endings_after
@pytest.fixture()
def bom_file(tmpdir):
folder = os.path.dirname(__file__)
source_path = os.path.join(folder, "samples_in", "bom.py")
tmp_path = os.path.join(tmpdir, "input.py")
shutil.copy2(source_path, tmp_path)
yield tmp_path
def test_bom(bom_file):
"""Test on a file with Byte order mark https://en.wikipedia.org/wiki/Byte_order_mark
It's possible to verify that a file has bom using `file` unix utility. """
modified, _, _, _ = _fstringify_file(bom_file, True, 1000)
assert modified
|
1666977
|
import os
try:
from ufoLib.glifLib import GlyphSet
except ImportError:
from robofab.glifLib import GlyphSet
import pkg_resources
DATADIR = pkg_resources.resource_filename('cu2qu.test', 'data')
CUBIC_GLYPHS = GlyphSet(os.path.join(DATADIR, 'cubic'))
QUAD_GLYPHS = GlyphSet(os.path.join(DATADIR, 'quadratic'))
import unittest
# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires
# deprecation warnings if a program uses the old name.
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
|
1667008
|
import ujson
import redis
import falcon
from celery.result import AsyncResult
from app.tasks import invoke_predict
from app.logic.pipeline import verify_input
INFO_FILE = './app/assets/info.txt'
class InfoResource(object):
def on_get(self, req, resp):
"""Handles GET requests"""
resp.status = falcon.HTTP_200
with open(INFO_FILE) as info_file:
info = ''.join(info_file)
resp.body = info
class StatusResource(object):
def on_get(self, req, resp, task_id):
"""Handles GET requests"""
task_result = AsyncResult(task_id)
result = {'status': task_result.status, 'result': task_result.result}
resp.status = falcon.HTTP_200
resp.body = ujson.dumps(result)
class PredictResource(object):
def __init__(self):
self._redis = redis.Redis(host='redis')
def on_post(self, req, resp):
"""Handles POST requests"""
# Read request
try:
raw_json = req.stream.read()
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Stream read error',
str(ex))
# Parse to json
try:
json_data = ujson.loads(raw_json.decode())
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400,
'Malformed JSON',
'Could not decode the request body.')
# Verify json fields
try:
json_data = verify_input(json_data)
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Bad input format',
str(ex))
try:
resp.status = falcon.HTTP_200
sync_response_timeout = self._respond_synchronically()
task = invoke_predict.delay(json_data, sync=bool(sync_response_timeout))
if sync_response_timeout: # Sync response
result = task.get(timeout=sync_response_timeout)
resp.body = ujson.dumps(result)
else: # Async response
resp.body = ujson.dumps({
'status': 'success',
'data': {
'task_id': task.id
}})
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400,
'Error invoking predict',
str(ex))
def _respond_synchronically(self) -> float:
""" Checks the configuration for the type of response (sync/async)
If timeout == 0.0, respond asynchronically, if > 0.0 respond synchronically with the value as timeout """
timeout = self._redis.get('synchronous_timeout')
if not timeout:
return 5.0 # Default sync value
return float(timeout)
# Never change this.
app = falcon.API()
# Create resources
info = InfoResource()
predict = PredictResource()
status = StatusResource()
# Routing
app.add_route('/info', info)
app.add_route('/predict', predict)
app.add_route('/status/{task_id}', status)
|
1667052
|
from hathor.conf import HathorSettings
from hathor.graphviz import GraphvizVisualizer
from hathor.simulator import FakeConnection
from tests import unittest
from tests.simulation.base import SimulatorTestCase
from tests.utils import add_custom_tx, gen_new_tx
settings = HathorSettings()
class BaseSoftVoidedTestCase(SimulatorTestCase):
seed_config = 5988775361793628169
def assertNoParentsAreSoftVoided(self, tx):
for h in tx.parents:
tx2 = tx.storage.get_transaction(h)
tx2_meta = tx2.get_metadata()
tx2_voided_by = tx2_meta.voided_by or set()
self.assertNotIn(settings.SOFT_VOIDED_ID, tx2_voided_by)
def test_soft_voided(self):
txA_hash = bytes.fromhex('4586c5428e8d666ea59684c1cd9286d2b9d9e89b4939207db47412eeaabc48b2')
soft_voided_tx_ids = set([
txA_hash,
])
manager1 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids)
manager1.allow_mining_without_peers()
miner1 = self.simulator.create_miner(manager1, hashpower=5e6)
miner1.start()
self.simulator.run(60)
gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx1.start()
self.simulator.run(300)
manager2 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids)
manager2.soft_voided_tx_ids = soft_voided_tx_ids
graphviz = GraphvizVisualizer(manager2.tx_storage, include_verifications=True, include_funds=True)
conn12 = FakeConnection(manager1, manager2, latency=0.001)
self.simulator.add_connection(conn12)
miner2 = self.simulator.create_miner(manager2, hashpower=10e6)
miner2.start()
gen_tx2 = self.simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True)
gen_tx2.start()
self.simulator.run(900)
txA = manager2.tx_storage.get_transaction(txA_hash)
metaA = txA.get_metadata()
self.assertEqual({settings.SOFT_VOIDED_ID, txA.hash}, metaA.voided_by)
graphviz.labels[txA.hash] = 'txA'
txB = add_custom_tx(manager2, [(txA, 0)])
metaB = txB.get_metadata()
self.assertEqual({txA.hash}, metaB.voided_by)
graphviz.labels[txB.hash] = 'txB'
txD1 = add_custom_tx(manager2, [(txB, 0)])
metaD1 = txD1.get_metadata()
self.assertEqual({txA.hash}, metaD1.voided_by)
graphviz.labels[txD1.hash] = 'txD1'
txD2 = add_custom_tx(manager2, [(txB, 0)])
metaD2 = txD2.get_metadata()
self.assertEqual({txA.hash, txD2.hash}, metaD2.voided_by)
graphviz.labels[txD2.hash] = 'txD2'
metaD1 = txD1.get_metadata()
self.assertEqual({txA.hash, txD1.hash}, metaD1.voided_by)
address = manager2.wallet.get_unused_address(mark_as_used=False)
value = 1
txC = gen_new_tx(manager2, address, value)
txC.parents[0] = txA.hash
txC.timestamp = max(txC.timestamp, txA.timestamp + 1)
txC.weight = 25
txC.update_hash()
self.assertTrue(manager2.propagate_tx(txC, fails_silently=False))
metaC = txC.get_metadata()
self.assertIsNone(metaC.voided_by)
graphviz.labels[txC.hash] = 'txC'
blk1 = manager2.generate_mining_block()
self.assertNoParentsAreSoftVoided(blk1)
blk1.parents[1] = txA.hash
blk1.nonce = self.rng.getrandbits(32)
blk1.update_hash()
self.assertTrue(manager2.propagate_tx(blk1, fails_silently=False))
blk1meta = blk1.get_metadata()
self.assertIsNone(blk1meta.voided_by)
graphviz.labels[blk1.hash] = 'b1'
blk2 = manager2.generate_mining_block()
self.assertNoParentsAreSoftVoided(blk2)
if txD1.hash not in blk2.parents:
blk2.parents[1] = txD1.hash
blk2.nonce = self.rng.getrandbits(32)
blk2.update_hash()
self.assertTrue(manager2.propagate_tx(blk2, fails_silently=False))
blk2meta = blk2.get_metadata()
self.assertIsNone(blk2meta.voided_by)
graphviz.labels[blk2.hash] = 'b2'
blk3 = manager2.generate_mining_block()
self.assertNoParentsAreSoftVoided(blk3)
blk3.parents[1] = txD2.hash
blk3.nonce = self.rng.getrandbits(32)
blk3.update_hash()
self.assertTrue(manager2.propagate_tx(blk3, fails_silently=False))
blk3meta = blk3.get_metadata()
self.assertIsNone(blk3meta.voided_by)
graphviz.labels[blk3.hash] = 'b3'
for tx in manager1.tx_storage.get_all_transactions():
meta = tx.get_metadata()
voided_by = meta.voided_by or set()
if settings.SOFT_VOIDED_ID in voided_by:
self.assertTrue({settings.SOFT_VOIDED_ID, tx.hash}.issubset(voided_by))
# Uncomment lines below to visualize the DAG and the blockchain.
# dot = graphviz.dot()
# dot.render('dot0')
class SyncV1SoftVoidedTestCase(unittest.SyncV1Params, BaseSoftVoidedTestCase):
__test__ = True
class SyncV2SoftVoidedTestCase(unittest.SyncV2Params, BaseSoftVoidedTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeSoftVoidedTestCase(unittest.SyncBridgeParams, SyncV2SoftVoidedTestCase):
__test__ = True
|
1667072
|
import datetime
from codecs import utf_8_decode
from codecs import utf_8_encode
import hashlib
import os
import time
from wsgiref.handlers import _monthname # Locale-independent, RFC-2616
from wsgiref.handlers import _weekdayname # Locale-independent, RFC-2616
try:
from urllib.parse import urlencode, parse_qsl
except ImportError:
from urllib import urlencode
from urlparse import parse_qsl
from zope.interface import implementer
from repoze.who.interfaces import IIdentifier
from repoze.who.interfaces import IAuthenticator
from repoze.who._compat import get_cookies
import repoze.who._auth_tkt as auth_tkt
from repoze.who._compat import STRING_TYPES
_UTCNOW = None # unit tests can replace
def _utcnow(): #pragma NO COVERAGE
if _UTCNOW is not None:
return _UTCNOW
return datetime.datetime.utcnow()
@implementer(IIdentifier, IAuthenticator)
class AuthTktCookiePlugin(object):
userid_typename = 'userid_type'
userid_type_decoders = {'int': int,
'unicode': lambda x: utf_8_decode(x)[0],
}
userid_type_encoders = {int: ('int', str),
}
try:
userid_type_encoders[long] = ('int', str)
except NameError: #pragma NO COVER Python >= 3.0
pass
try:
userid_type_encoders[unicode] = ('unicode',
lambda x: utf_8_encode(x)[0])
except NameError: #pragma NO COVER Python >= 3.0
pass
def __init__(self, secret, cookie_name='auth_tkt',
secure=False, include_ip=False,
timeout=None, reissue_time=None, userid_checker=None,
digest_algo=auth_tkt.DEFAULT_DIGEST):
self.secret = secret
self.cookie_name = cookie_name
self.include_ip = include_ip
self.secure = secure
if timeout and ( (not reissue_time) or (reissue_time > timeout) ):
raise ValueError('When timeout is specified, reissue_time must '
'be set to a lower value')
self.timeout = timeout
self.reissue_time = reissue_time
self.userid_checker = userid_checker
self.digest_algo = digest_algo
# IIdentifier
def identify(self, environ):
cookies = get_cookies(environ)
cookie = cookies.get(self.cookie_name)
if cookie is None or not cookie.value:
return None
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
try:
timestamp, userid, tokens, user_data = auth_tkt.parse_ticket(
self.secret, cookie.value, remote_addr, self.digest_algo)
except auth_tkt.BadTicket:
return None
if self.timeout and ( (timestamp + self.timeout) < time.time() ):
return None
user_data_dict = dict(parse_qsl(user_data))
userid_type = user_data_dict.get(self.userid_typename)
if userid_type:
decoder = self.userid_type_decoders.get(userid_type)
if decoder:
userid = decoder(userid)
environ['REMOTE_USER_TOKENS'] = tokens
environ['REMOTE_USER_DATA'] = user_data
environ['AUTH_TYPE'] = 'cookie'
identity = {}
identity['timestamp'] = timestamp
identity['repoze.who.plugins.auth_tkt.userid'] = userid
identity['tokens'] = tokens
identity['userdata'] = user_data_dict
return identity
# IIdentifier
def forget(self, environ, identity):
# return a set of expires Set-Cookie headers
return self._get_cookies(environ, 'INVALID', 0)
# IIdentifier
def remember(self, environ, identity):
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
cookies = get_cookies(environ)
old_cookie = cookies.get(self.cookie_name)
existing = cookies.get(self.cookie_name)
old_cookie_value = getattr(existing, 'value', None)
max_age = identity.get('max_age', None)
timestamp, userid, tokens, userdata = None, '', (), ''
if old_cookie_value:
try:
timestamp,userid,tokens,userdata = auth_tkt.parse_ticket(
self.secret, old_cookie_value, remote_addr,
self.digest_algo)
except auth_tkt.BadTicket:
pass
tokens = tuple(tokens)
who_userid = identity['repoze.who.userid']
who_tokens = tuple(identity.get('tokens', ()))
who_userdata_dict = identity.get('userdata', {})
encoding_data = self.userid_type_encoders.get(type(who_userid))
if encoding_data:
encoding, encoder = encoding_data
who_userid = encoder(who_userid)
who_userdata_dict[self.userid_typename] = encoding
who_userdata = urlencode(who_userdata_dict)
old_data = (userid, tokens, userdata)
new_data = (who_userid, who_tokens, who_userdata)
if old_data != new_data or (self.reissue_time and
( (timestamp + self.reissue_time) < time.time() )):
ticket = auth_tkt.AuthTicket(
self.secret,
who_userid,
remote_addr,
tokens=who_tokens,
user_data=who_userdata,
cookie_name=self.cookie_name,
secure=self.secure,
digest_algo=self.digest_algo)
new_cookie_value = ticket.cookie_value()
if old_cookie_value != new_cookie_value:
# return a set of Set-Cookie headers
return self._get_cookies(environ, new_cookie_value, max_age)
# IAuthenticator
def authenticate(self, environ, identity):
userid = identity.get('repoze.who.plugins.auth_tkt.userid')
if userid is None:
return None
if self.userid_checker and not self.userid_checker(userid):
return None
identity['repoze.who.userid'] = userid
return userid
def _get_cookies(self, environ, value, max_age=None):
if max_age is not None:
max_age = int(max_age)
later = _utcnow() + datetime.timedelta(seconds=max_age)
# Wdy, DD-Mon-YY HH:MM:SS GMT
expires = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[later.weekday()],
later.day,
_monthname[later.month],
later.year,
later.hour,
later.minute,
later.second,
)
# the Expires header is *required* at least for IE7 (IE7 does
# not respect Max-Age)
max_age = "; Max-Age=%s; Expires=%s" % (max_age, expires)
else:
max_age = ''
secure = ''
if self.secure:
secure = '; secure; HttpOnly'
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
cur_domain = cur_domain.split(':')[0] # drop port
wild_domain = '.' + cur_domain
cookies = [
('Set-Cookie', '%s="%s"; Path=/%s%s' % (
self.cookie_name, value, max_age, secure)),
('Set-Cookie', '%s="%s"; Path=/; Domain=%s%s%s' % (
self.cookie_name, value, cur_domain, max_age, secure)),
('Set-Cookie', '%s="%s"; Path=/; Domain=%s%s%s' % (
self.cookie_name, value, wild_domain, max_age, secure))
]
return cookies
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
id(self)) #pragma NO COVERAGE
def _bool(value):
if isinstance(value, STRING_TYPES):
return value.lower() in ('yes', 'true', '1')
return value
def make_plugin(secret=None,
secretfile=None,
cookie_name='auth_tkt',
secure=False,
include_ip=False,
timeout=None,
reissue_time=None,
userid_checker=None,
digest_algo=auth_tkt.DEFAULT_DIGEST,
):
from repoze.who.utils import resolveDotted
if (secret is None and secretfile is None):
raise ValueError("One of 'secret' or 'secretfile' must not be None.")
if (secret is not None and secretfile is not None):
raise ValueError("Specify only one of 'secret' or 'secretfile'.")
if secretfile:
secretfile = os.path.abspath(os.path.expanduser(secretfile))
if not os.path.exists(secretfile):
raise ValueError("No such 'secretfile': %s" % secretfile)
with open(secretfile) as f:
secret = f.read().strip()
if timeout:
timeout = int(timeout)
if reissue_time:
reissue_time = int(reissue_time)
if userid_checker is not None:
userid_checker = resolveDotted(userid_checker)
if isinstance(digest_algo, str):
try:
digest_algo = getattr(hashlib, digest_algo)
except AttributeError:
raise ValueError("No such 'digest_algo': %s" % digest_algo)
plugin = AuthTktCookiePlugin(secret,
cookie_name,
_bool(secure),
_bool(include_ip),
timeout,
reissue_time,
userid_checker,
digest_algo,
)
return plugin
|
1667105
|
import numpy as np
import logging
logger = logging.getLogger(__name__)
def get_n_unique_rows(edge):
new = [tuple(row) for row in edge]
edge_unique = np.unique(new)
edge_size = edge_unique.shape[0]
return edge_size
def get_unique_rows(edge):
edge_unique = np.unique(edge, axis=0)
return edge_unique
def get_depth_of_nested_list(l):
depth = lambda L: isinstance(L, list) and max(map(depth, L)) + 1
return depth(l)
|
1667166
|
import pandas as pd
def check_gtf_composition(gtf_file, annotation=None, feature_type='gene'):
"""
annotation can be either HAVANA or ENSEMBL
feature_type can be gene, transcript, exon
"""
# loading the gtf file
mtx = []
with open(gtf_file) as f:
for line in f:
if line[0] != '#':
mtx.append(line.split('\t'))
mtx_df = pd.DataFrame(mtx)
if annotation != None:
is_true = mtx_df[1]==annotation
mtx_df2 = mtx_df[is_true]
else:
mtx_df2 = mtx_df.copy()
is_true = mtx_df2[2]==feature_type
mtx_df3 = mtx_df2[is_true]
list_genetype = [x.split(';')[1].split('"')[1] for x in mtx_df3[8].tolist()]
count_list = {}
for key in list(set(list_genetype)):
count_list[key] = 0
for n in list_genetype:
count_list[n] += 1
return(count_list)
|
1667170
|
from django.conf.urls import url
from . import duo_auth
urlpatterns = [
url(r'^accounts/duo_login', duo_auth.login),
url(r'^accounts/duo_logout/$', duo_auth.logout),
]
|
1667195
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from bc4py.database.builder import *
"""
database object
====
warning: do not import bc4py.* on this file
"""
tables: 'Tables' = None
chain_builder: 'ChainBuilder' = None
tx_builder: 'TransactionBuilder' = None
account_builder: 'AccountBuilder' = None
__all__ = [
"tables",
"chain_builder",
"tx_builder",
"account_builder",
]
|
1667222
|
from enum import Enum
from grapheme.grapheme_property_group import GraphemePropertyGroup as G
from grapheme.grapheme_property_group import get_group
class FSM:
@classmethod
def default(cls, n):
if n is G.OTHER:
return True, cls.default
if n is G.CR:
return True, cls.cr
if n in [G.LF, G.CONTROL]:
return True, cls.lf_or_control
if n in [G.EXTEND, G.SPACING_MARK, G.ZWJ]:
return False, cls.default
if n is G.EXTENDED_PICTOGRAPHIC:
return True, cls.emoji
if n is G.REGIONAL_INDICATOR:
return True, cls.ri
if n is G.L:
return True, cls.hangul_l
if n in [G.LV, G.V]:
return True, cls.hangul_lv_or_v
if n in [G.LVT, G.T]:
return True, cls.hangul_lvt_or_t
if n is G.PREPEND:
return True, cls.prepend
return True, cls.default
@classmethod
def default_next_state(cls, n, should_break):
_, next_state = cls.default(n)
return should_break, next_state
@classmethod
def cr(cls, n):
if n is G.LF:
return False, cls.lf_or_control
return cls.default_next_state(n, should_break=True)
@classmethod
def lf_or_control(cls, n):
return cls.default_next_state(n, should_break=True)
@classmethod
def prepend(cls, n):
if n in [G.CONTROL, G.LF]:
return True, cls.default
if n is G.CR:
return True, cls.cr
return cls.default_next_state(n, should_break=False)
# Hanguls
@classmethod
def hangul_l(cls, n):
if n in [G.V, G.LV]:
return False, cls.hangul_lv_or_v
if n is G.LVT:
return False, cls.hangul_lvt_or_t
if n is G.L:
return False, cls.hangul_l
return cls.default(n)
@classmethod
def hangul_lv_or_v(cls, n):
if n is G.V:
return False, cls.hangul_lv_or_v
if n is G.T:
return False, cls.hangul_lvt_or_t
return cls.default(n)
@classmethod
def hangul_lvt_or_t(cls, n):
if n is G.T:
return False, cls.hangul_lvt_or_t
return cls.default(n)
# Emojis
@classmethod
def emoji(cls, n):
if n is G.EXTEND:
return False, cls.emoji
if n is G.ZWJ:
return False, cls.emoji_zjw
return cls.default(n)
@classmethod
def emoji_zjw(cls, n):
if n is G.EXTENDED_PICTOGRAPHIC:
return False, cls.emoji
return cls.default(n)
# Regional indication (flag)
@classmethod
def ri(cls, n):
if n is G.REGIONAL_INDICATOR:
return False, cls.default
return cls.default(n)
class BreakPossibility(Enum):
CERTAIN = "certain"
POSSIBLE = "possible"
NO_BREAK = "nobreak"
def get_break_possibility(a, b):
# Probably most common, included as short circuit before checking all else
if a is G.OTHER and b is G.OTHER:
return BreakPossibility.CERTAIN
assert isinstance(a, G)
assert isinstance(b, G)
# Only break if preceeded by an uneven number of REGIONAL_INDICATORS
# sot (RI RI)* RI × RI
# [ ^ RI] (RI RI) * RI × RI
if a is G.REGIONAL_INDICATOR and b is G.REGIONAL_INDICATOR:
return BreakPossibility.POSSIBLE
# (Control | CR | LF) ÷
# ÷ (Control | CR | LF)
if a in [G.CONTROL, G.CR, G.LF] or b in [G.CONTROL, G.CR, G.LF]:
# CR × LF
if a is G.CR and b is G.LF:
return BreakPossibility.NO_BREAK
else:
return BreakPossibility.CERTAIN
# L × (L | V | LV | LVT)
if a is G.L and b in [G.L, G.V, G.LV, G.LVT]:
return BreakPossibility.NO_BREAK
# (LV | V) × (V | T)
if a in [G.LV, G.V] and b in [G.V, G.T]:
return BreakPossibility.NO_BREAK
# (LVT | T) × T
if a in [G.LVT, G.T] and b is G.T:
return BreakPossibility.NO_BREAK
# × (Extend | ZWJ)
# × SpacingMark
# Prepend ×
if b in [G.EXTEND, G.ZWJ, G.SPACING_MARK] or a is G.PREPEND:
return BreakPossibility.NO_BREAK
# \p{Extended_Pictographic} Extend* ZWJ × \p{Extended_Pictographic}
if a is G.ZWJ and b is G.EXTENDED_PICTOGRAPHIC:
return BreakPossibility.POSSIBLE
# everything else, assumes all other rules are included above
return BreakPossibility.CERTAIN
def get_last_certain_break_index(string, index):
if index >= len(string):
return len(string)
prev = get_group(string[index])
while True:
if index <= 0:
return 0
index -= 1
cur = get_group(string[index])
if get_break_possibility(cur, prev) == BreakPossibility.CERTAIN:
return index + 1
prev = cur
class GraphemeIterator:
def __init__(self, string):
self.str_iter = iter(string)
try:
self.buffer = next(self.str_iter)
except StopIteration:
self.buffer = None
else:
_, state = FSM.default(get_group(self.buffer))
self.state = state
def __iter__(self):
return self
def __next__(self):
for codepoint in self.str_iter:
should_break, state = self.state(get_group(codepoint))
self.state = state
if should_break:
return self._break(codepoint)
self.buffer += codepoint
if self.buffer:
return self._break(None)
raise StopIteration()
def _break(self, new):
old_buffer = self.buffer
self.buffer = new
return old_buffer
|
1667258
|
from .molecule import Molecule, might_be_variant, molecule_to_random_primer_dict
from .iterator import MoleculeIterator, ReadIterator
from .taps import *
from .chic import *
from .featureannotatedmolecule import *
from .nlaIII import *
from .rna import *
from .consensus import *
from .fourthiouridine import *
from .filter import molecule_iterator_filter
from .scartrace import *
|
1667301
|
from __future__ import print_function
__author__ = "<NAME>, <NAME> and <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class VehicleData():
"""
Class describing the vehicle data corresponding to the timestamp
of the color image
Attributes:
velocity (float): Velocity of the vehicle in m/s
yaw_rate (float): Yaw Rate of the vehicle in rad/s,
counterclock-wise
longitude (float): GPS Longitude in Degree
latitude (float): GPS latitude in Degree
"""
def __init__(self):
self.velocity = 0.
self.yaw_rate = 0.
self.longitude = 0.
self.latitude = 0.
def parse_vehicle_data_dict(self, image_dict: dict):
"""
Method loading vehicle data from yaml file dict
Args:
vehicle_data_dict(dict): vehicle dict read from yml
"""
self.velocity = image_dict['velocity']
self.yaw_rate = image_dict['yaw_rate']
self.longitude = image_dict['longitude']
self.latitude = image_dict['latitude']
|
1667334
|
from kubernetes.client.rest import ApiException
import kube_vars as globalvars
import kube_factory as factory
import kube_pod
import re
def CheckContainerLog(Client,PodName,ContainerName,NameSpace,Message,Possization=0):
print("Run CheckContainerLog in kube_log model")
_CoreV1Api = Client
if _CoreV1Api == None:
_CoreV1Api = globalvars.get_value('KubCoreV1Api')
isMatch,matchStr= checkContainerLog(_CoreV1Api, PodName, ContainerName, NameSpace,Message,Possization)
if isMatch:
return True,matchStr
return False,None
def checkContainerLog(Client,PodName,ContainerName,NameSpace,Message,Possization):
if NameSpace == None:
_namespace="default"
else:
_namespace=NameSpace
_message=prehandleMessage(Message)
#detect target pod and find the random name which given by Kubernetes
matchPods=detectPodName(Client,NameSpace,PodName)
if len(matchPods) !=0 :
_matchedPod=matchPods[0]
print("find target pod %s" %(_matchedPod))
else:
return False,None
try:
if ContainerName != None:
api_response = Client.read_namespaced_pod_log(_matchedPod, _namespace, container=ContainerName,pretty=True)
else:
api_response = Client.read_namespaced_pod_log(_matchedPod, _namespace,pretty=True)
print(api_response)
isBingo, matchStr= bingoTarget(_message,api_response,Possization)
if isBingo:
return True,matchStr
except ApiException as e:
print("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
return False,None
def prehandleMessage(message):
return message.replace("\'","").replace("\"","")
def detectPodName(Client,NameSpace,PodNamePattern):
return kube_pod.ListPods(Client,NameSpace,PodNamePattern)
def bingoTarget(target,source,posization):
pattern = re.compile(target)
print(target)
if re.match(pattern,source.replace("\n","")) !=None:
print("Find pattern %s " %(re.match(pattern,source.replace("\n","")).group(posization)))
return True,re.match(pattern, source.replace("\n", "")).group(posization)
print("not find target pattern %s" %(target))
return False,None
|
1667343
|
from django.db import models
import requests
class Location(models.Model):
zip_code = models.IntegerField()
latitude = models.DecimalField(blank=True, max_digits=9, decimal_places=6)
longitude = models.DecimalField(blank=True, max_digits=9, decimal_places=6)
def save(self, *args, **kwargs):
r = requests.get(f'https://public.opendatasoft.com/api/records/1.0/search/?dataset=us-zip-code-latitude-and-longitude&q={self.zip_code}&facet=state&facet=timezone&facet=dst')
self.latitude = r.json()['records'][0]['fields']['latitude']
self.longitude = r.json()['records'][0]['fields']['longitude']
super().save(*args, **kwargs)
def __str__(self):
return str(self.zip_code)
|
1667372
|
import neural_network_lyapunov.train_utils as train_utils
import unittest
import torch
import numpy as np
def setup_relu(relu_layer_width, params):
assert (isinstance(relu_layer_width, tuple))
dtype = torch.float64
def set_param(linear, param_count):
linear.weight.data = params[param_count:param_count +
linear.in_features *
linear.out_features].clone().reshape(
(linear.out_features,
linear.in_features))
param_count += linear.in_features * linear.out_features
linear.bias.data = params[param_count:param_count +
linear.out_features].clone()
param_count += linear.out_features
return param_count
linear_layers = [None] * len(relu_layer_width)
param_count = 0
for i in range(len(relu_layer_width)):
next_layer_width = relu_layer_width[i+1] if \
i < len(relu_layer_width)-1 else 1
linear_layers[i] = torch.nn.Linear(relu_layer_width[i],
next_layer_width).type(dtype)
if params is None:
pass
else:
param_count = set_param(linear_layers[i], param_count)
layers = [None] * (len(relu_layer_width) * 2 - 1)
for i in range(len(relu_layer_width) - 1):
layers[2 * i] = linear_layers[i]
layers[2 * i + 1] = torch.nn.LeakyReLU(0.2)
layers[-1] = linear_layers[-1]
relu = torch.nn.Sequential(*layers)
return relu
def test_project_gradient(relu, loss1, loss2, mode):
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
loss1.backward(retain_graph=True)
n1 = torch.cat([p.grad.clone().reshape((-1, )) for p in relu.parameters()])
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
loss2.backward(retain_graph=True)
n2 = torch.cat([p.grad.clone().reshape((-1, )) for p in relu.parameters()])
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
need_projection, n1, n2 = train_utils.project_gradient(relu,
loss1,
loss2,
mode,
retain_graph=True)
grad = torch.cat(
[p.grad.clone().reshape((-1, )) for p in relu.parameters()])
if n1 @ n2 < 0:
np.testing.assert_equal(need_projection, True)
n1_perp = n1 - n1 @ n2 / (n2 @ n2) * n2
n2_perp = n2 - n1 @ n2 / (n1 @ n1) * n1
if mode == train_utils.ProjectGradientMode.LOSS1:
np.testing.assert_almost_equal((grad @ n2).item(), 0)
np.testing.assert_allclose((n1 - grad), n1 @ n2 / (n2 @ n2) * n2)
np.testing.assert_allclose(grad, n1_perp)
elif mode == train_utils.ProjectGradientMode.LOSS2:
np.testing.assert_almost_equal((grad @ n1).item(), 0)
np.testing.assert_allclose((n2 - grad), n1 @ n2 / (n1 @ n1) * n1)
np.testing.assert_allclose(grad, n2_perp)
elif mode == train_utils.ProjectGradientMode.BOTH:
np.testing.assert_almost_equal(grad @ n1, n1_perp @ n1_perp)
np.testing.assert_almost_equal(grad @ n2, n2_perp @ n2_perp)
np.testing.assert_allclose(grad, n1_perp + n2_perp)
elif mode == train_utils.ProjectGradientMode.EMPHASIZE_LOSS1:
np.testing.assert_allclose(grad, n1 + n2_perp)
elif mode == train_utils.ProjectGradientMode.EMPHASIZE_LOSS2:
np.testing.assert_allclose(grad, n2 + n1_perp)
else:
raise Exception()
else:
np.testing.assert_equal(need_projection, False)
np.testing.assert_allclose(grad, n1 + n2)
class TestProjectGradient(unittest.TestCase):
def test1(self):
dtype = torch.float64
relu1 = setup_relu((2, 3),
torch.tensor([
0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5,
1.4, 0.5, 2.5, -2.3
],
dtype=dtype))
relu2 = setup_relu((2, 4),
torch.tensor([
0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5,
1.4, 0.5, 2.5, -2.3, 4.2, 0.3, 1.5, -0.3
],
dtype=dtype))
x = torch.tensor([2.0, 1.5], dtype=dtype)
for relu in (relu1, relu2):
y = relu(x)
loss1 = y * y
loss2 = y - y * y
loss3 = y + y * y
for mode in list(train_utils.ProjectGradientMode):
# The gradient of loss 1 and loss 2 should have angle > 90
# degrees.
test_project_gradient(relu, loss1, loss2, mode)
# The gradient of loss 1 and loss 3 should have angle < 90
# degrees.
test_project_gradient(relu, loss1, loss3, mode)
test_project_gradient(relu, loss2, loss3, mode)
for mode in (train_utils.ProjectGradientMode.BOTH,
train_utils.ProjectGradientMode.LOSS1,
train_utils.ProjectGradientMode.LOSS2):
# Now project the gradient of loss1 and -loss1, they have
# exact opposite gradient, so the projected gradient is 0.
train_utils.project_gradient(relu,
loss1,
-loss1,
mode,
retain_graph=True)
grad = torch.cat(
[p.grad.reshape((-1, )) for p in relu.parameters()])
np.testing.assert_allclose(grad.detach().numpy(),
np.zeros(grad.shape),
atol=3e-13)
if __name__ == "__main__":
unittest.main()
|
1667377
|
import warnings as test_warnings
from unittest.mock import patch
import pytest
import requests
from rotkehlchen.assets.asset import WORLD_TO_GEMINI
from rotkehlchen.assets.converters import UNSUPPORTED_GEMINI_ASSETS
from rotkehlchen.constants.assets import A_BCH, A_BTC, A_ETH, A_LINK, A_LTC, A_USD
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors.asset import UnknownAsset, UnprocessableTradePair, UnsupportedAsset
from rotkehlchen.exchanges.data_structures import AssetMovement, Trade, TradeType
from rotkehlchen.exchanges.gemini import gemini_symbol_to_base_quote
from rotkehlchen.fval import FVal
from rotkehlchen.tests.fixtures.exchanges.gemini import (
SANDBOX_GEMINI_WP_API_KEY,
SANDBOX_GEMINI_WP_API_SECRET,
)
from rotkehlchen.tests.utils.constants import A_PAXG, A_ZEC
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.types import AssetMovementCategory, Location, Timestamp
from rotkehlchen.utils.misc import ts_now
def test_gemini_validate_key(sandbox_gemini):
"""Test that validate api key works for a correct api key
Uses the Gemini sandbox
"""
result, msg = sandbox_gemini.validate_api_key()
assert result is True
assert msg == ''
@pytest.mark.parametrize('gemini_sandbox_api_secret', [b'<KEY>'])
def test_gemini_wrong_secret(sandbox_gemini):
"""Test that giving wrong api secret is detected
Uses the Gemini sandbox
"""
result, _ = sandbox_gemini.validate_api_key()
assert not result
balances, msg = sandbox_gemini.query_balances()
assert balances is None
assert 'Invalid API Key or API secret' in msg
@pytest.mark.parametrize('gemini_sandbox_api_key', ['fddad'])
def test_gemini_wrong_key(sandbox_gemini):
"""Test that giving wrong api key is detected
Uses the Gemini sandbox
"""
result, _ = sandbox_gemini.validate_api_key()
assert not result
balances, msg = sandbox_gemini.query_balances()
assert balances is None
assert 'Invalid API Key or API secret' in msg
@pytest.mark.parametrize('gemini_test_base_uri', ['https://api.gemini.com'])
def test_gemini_all_symbols_are_known(sandbox_gemini):
"""Test that the gemini trade pairs are all supported by rotki
Use the real gemini API
"""
unsupported_assets = set(UNSUPPORTED_GEMINI_ASSETS)
common_items = unsupported_assets.intersection(set(WORLD_TO_GEMINI.values()))
assert not common_items, f'Gemini assets {common_items} should not be unsupported'
symbols = sandbox_gemini._public_api_query('symbols')
for symbol in symbols:
try:
base, quote = gemini_symbol_to_base_quote(symbol)
except UnprocessableTradePair as e:
test_warnings.warn(UserWarning(
f'UnprocessableTradePair in Gemini. {e}',
))
except UnknownAsset as e:
test_warnings.warn(UserWarning(
f'Unknown Gemini asset detected. {e} Symbol: {symbol}',
))
except UnsupportedAsset as e:
assert str(e).split(' ')[2] in UNSUPPORTED_GEMINI_ASSETS
assert base is not None
assert quote is not None
@pytest.mark.parametrize('gemini_sandbox_api_key', [SANDBOX_GEMINI_WP_API_KEY])
@pytest.mark.parametrize('gemini_sandbox_api_secret', [SANDBOX_GEMINI_WP_API_SECRET])
def test_gemini_wrong_key_permissions(sandbox_gemini):
"""Test that using a gemini key that does not have the auditor permission is detected"""
result, _ = sandbox_gemini.validate_api_key()
assert not result
@pytest.mark.parametrize('should_mock_current_price_queries', [False])
def test_gemini_query_balances(sandbox_gemini):
"""Test that querying the balances endpoint works correctly
Uses the Gemini sandbox
"""
balances, msg = sandbox_gemini.query_balances()
assert msg == ''
assert len(balances) == 6
assert balances[A_USD].amount == FVal('723384.71365986583339')
assert balances[A_USD].usd_value == balances[A_USD].amount
assert balances[A_ETH].amount == FVal('59985.07921584')
assert balances[A_ETH].usd_value > ZERO
assert balances[A_LTC].amount == FVal('60000')
assert balances[A_LTC].usd_value > ZERO
assert balances[A_BTC].amount == FVal('2888.7177526197')
assert balances[A_BTC].usd_value > ZERO
assert balances[A_ZEC].amount == FVal('60000')
assert balances[A_ZEC].usd_value > ZERO
assert balances[A_BCH].amount == FVal('60000')
assert balances[A_BCH].usd_value > ZERO
def test_gemini_query_trades(sandbox_gemini):
"""Test that querying the trades endpoint works correctly
Uses the Gemini sandbox
"""
trades = sandbox_gemini.query_trade_history(
start_ts=0,
end_ts=Timestamp(1584881354),
only_cache=False,
)
assert len(trades) == 2
assert trades[0] == Trade(
timestamp=Timestamp(1584720549),
location=Location.GEMINI,
base_asset=A_BTC,
quote_asset=A_USD,
trade_type=TradeType.BUY,
amount=FVal('0.5'),
rate=FVal('6622.63'),
fee=FVal('33.11315'),
fee_currency=A_USD,
link='560627330',
notes='',
)
assert trades[1] == Trade(
timestamp=Timestamp(1584721109),
location=Location.GEMINI,
base_asset=A_ETH,
quote_asset=A_USD,
trade_type=TradeType.SELL,
amount=FVal('1.0'),
rate=FVal('20.0'),
fee=FVal('0.2'),
fee_currency=A_USD,
link='560628883',
notes='',
)
def test_gemini_query_all_trades_pagination(sandbox_gemini):
"""Test that querying the trades endpoint works correctly including
combining results from multiple requests
Uses the Gemini sandbox at which we've made quite a few test trades
"""
trades = sandbox_gemini.query_trade_history(start_ts=0, end_ts=ts_now(), only_cache=False)
identifiers = set()
for trade in trades:
assert trade.link not in identifiers, 'trade included multiple times in the results'
identifiers.add(trade.link)
assert len(trades) == 591
# Taken from the API docs
TRANSFERS_RESPONSE = """[
{
"type":"Deposit",
"status":"Advanced",
"timestampms":1507913541275,
"eid":320013281,
"currency":"USD",
"amount":"36.00",
"method":"ACH"
},
{
"type":"Deposit",
"status":"Advanced",
"timestampms":1499990797452,
"eid":309356152,
"currency":"ETH",
"amount":"100",
"txHash":"605c5fa8bf99458d24d61e09941bc443ddc44839d9aaa508b14b296c0c8269b2"
},
{
"type":"Deposit",
"status":"Complete",
"timestampms":1495550176562,
"eid":298112782,
"currency":"BTC",
"amount":"1500",
"txHash":"163eeee4741f8962b748289832dd7f27f754d892f5d23bf3ea6fba6e350d9ce3",
"outputIdx":0
},
{
"type":"Deposit",
"status":"Advanced",
"timestampms":1458862076082,
"eid":265799530,
"currency":"USD",
"amount":"500.00",
"method":"ACH"
},
{
"type":"Withdrawal",
"status":"Complete",
"timestampms":1450403787001,
"eid":82897811,
"currency":"BTC",
"amount":"5",
"txHash":"c458b86955b80db0718cfcadbff3df3734a906367982c6eb191e61117b810bbb",
"outputIdx":0,
"destination":"mqjvCtt4TJfQaC7nUgLMvHwuDPXMTEUGqx"
},
{
"type": "Withdrawal",
"status": "Complete",
"timestampms": 1535451930431,
"eid": 341167014,
"currency": "USD",
"amount": "1.00",
"txHash": "7bffd85893ee8e72e31061a84d25c45f2c4537c2f765a1e79feb06a7294445c3",
"destination": "0xd24400ae8BfEBb18cA49Be86258a3C749cf46853"
}
]"""
def mock_gemini_transfers(gemini, original_requests_request):
def mock_requests_requests(method, url, *args, **kwargs):
if 'transfers' not in url:
return original_requests_request(method, url, *args, **kwargs)
return MockResponse(200, TRANSFERS_RESPONSE)
return patch.object(gemini.session, 'request', wraps=mock_requests_requests)
def test_gemini_query_deposits_withdrawals(sandbox_gemini):
"""Test that querying the asset movements endpoint works correctly
Since Gemini sandbox does not support transfers, this uses a mocked call.
"""
transfers_patch = mock_gemini_transfers(sandbox_gemini, requests.post)
with transfers_patch:
movements = sandbox_gemini.query_deposits_withdrawals(
start_ts=0,
end_ts=Timestamp(1584881354),
only_cache=False,
)
assert len(movements) == 6
expected_movements = [AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.DEPOSIT,
timestamp=Timestamp(1507913541),
address=None,
transaction_id=None,
asset=A_USD,
amount=FVal('36'),
fee_asset=A_USD,
fee=ZERO,
link='320013281',
), AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id='605c5fa8bf99458d24d61e09941bc443ddc44839d9aaa508b14b296c0c8269b2',
timestamp=Timestamp(1499990797),
asset=A_ETH,
amount=FVal('100'),
fee_asset=A_ETH,
fee=ZERO,
link='309356152',
), AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id='163eeee4741f8962b748289832dd7f27f754d892f5d23bf3ea6fba6e350d9ce3',
timestamp=Timestamp(1495550176),
asset=A_BTC,
amount=FVal('1500'),
fee_asset=A_BTC,
fee=ZERO,
link='298112782',
), AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.DEPOSIT,
address=None,
transaction_id=None,
timestamp=Timestamp(1458862076),
asset=A_USD,
amount=FVal('500'),
fee_asset=A_USD,
fee=ZERO,
link='265799530',
), AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.WITHDRAWAL,
address='mqjvCtt4TJfQaC7nUgLMvHwuDPXMTEUGqx',
transaction_id='c458b86955b80db0718cfcadbff3df3734a906367982c6eb191e61117b810bbb',
timestamp=Timestamp(1450403787),
asset=A_BTC,
amount=FVal('5'),
fee_asset=A_BTC,
fee=ZERO,
link='82897811',
), AssetMovement(
location=Location.GEMINI,
category=AssetMovementCategory.WITHDRAWAL,
address='0xd24400ae8BfEBb18cA49Be86258a3C749cf46853',
transaction_id='7bffd85893ee8e72e31061a84d25c45f2c4537c2f765a1e79feb06a7294445c3',
timestamp=Timestamp(1535451930),
asset=A_USD,
amount=FVal('1'),
fee_asset=A_USD,
fee=ZERO,
link='341167014',
)]
# The deposits should be returned with the oldest first (so given list is reversed)
assert movements == expected_movements[::-1]
def test_gemini_symbol_to_base_quote():
"""Test edge cases and not yet existing cases of gemini symbol to pair"""
assert gemini_symbol_to_base_quote('btclink') == (A_BTC, A_LINK)
assert gemini_symbol_to_base_quote('linkbtc') == (A_LINK, A_BTC)
assert gemini_symbol_to_base_quote('linkpaxg') == (A_LINK, A_PAXG)
assert gemini_symbol_to_base_quote('paxglink') == (A_PAXG, A_LINK)
with pytest.raises(UnprocessableTradePair):
gemini_symbol_to_base_quote('btclinkxyz')
with pytest.raises(UnprocessableTradePair):
gemini_symbol_to_base_quote('xyzbtclink')
with pytest.raises(UnknownAsset):
gemini_symbol_to_base_quote('zzzbtc')
with pytest.raises(UnknownAsset):
gemini_symbol_to_base_quote('linkzzz')
with pytest.raises(UnknownAsset):
gemini_symbol_to_base_quote('zzzlink')
with pytest.raises(UnknownAsset):
gemini_symbol_to_base_quote('zzzzlink')
with pytest.raises(UnknownAsset):
gemini_symbol_to_base_quote('linkzzzz')
|
1667402
|
import os
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
if len(sys.argv) != 2:
print(
"Usage: [100, biscotti_output_file_dir, biscotti_input_file_dir, fedsys_output_file_dir, fedsys_input_file_dir]")
sys.exit()
# Example Usage: python generateResults.py 100 biscottiParsedResults/ BiscottiLogs fedSysParsedResults/ FedSysLogs
total_nodes = sys.argv[1]
# biscotti_output_file_dir = sys.argv[2]
# biscotti_input_file_dir = sys.argv[3]
# fedsys_output_file_dir = sys.argv[4]
# fedsys_input_file_dir = sys.argv[5]
def parse_logs(numRuns, input_file_directory, output_file_directory):
for i in range(0, numRuns):
fname = input_file_directory + str(i) + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
iteration = 0
for line in lines:
idx = line.find("Train Error")
if idx != -1:
timestamp = line[7:20]
outfile.write(str(iteration))
outfile.write(",")
outfile.write(line[(idx + 15):(idx + 22)])
outfile.write(",")
outfile.write(timestamp)
outfile.write("\n")
iteration = iteration + 1
outfile.close()
def get_completion_time(startTime, endTime):
startTime = datetime.strptime(startTime, "%H:%M:%S.%f")
endTime = datetime.strptime(endTime, "%H:%M:%S.%f")
if endTime < startTime:
endTime += timedelta(days=1)
completionTime = endTime - startTime
return str(completionTime.seconds)
def get_highest_id(list):
max = -1
for number in list:
if str(number) > max:
max = number
return max
def parse_all_noise(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_noise(input_file_directory + str(i), output_file_directory, i)
def parse_noise(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
noisingNumber = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Getting noise from")
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Sending update to verifiers") != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
outfile.write(str(noisingNumber))
outfile.write(",")
outfile.write(completionTime)
outfile.write("\n")
noisingNumber = noisingNumber + 1
break
outfile.close()
def parse_all_verif(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_verif(input_file_directory + str(i), output_file_directory, i)
def parse_verif(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
verificationNumber = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Sending update to verifiers")
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Couldn't get enough signatures") != -1 or line2.find("Sending update to miners") != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
outfile.write(str(verificationNumber))
outfile.write(",")
outfile.write(completionTime)
outfile.write("\n")
verificationNumber = verificationNumber + 1
break
outfile.close()
def parse_aggr_for_iteration(input_file_directory, iteration, lead_miner):
fname = input_file_directory + "/log_" + str(lead_miner) + "_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Got share for " + str(iteration) + ", I am at " + str(iteration))
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Sending block of iteration: " + str(iteration)) != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
return completionTime
def parse_all_aggr(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_aggr(input_file_directory + str(i), output_file_directory, i)
def parse_aggr(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
iteration = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Miners are")
if idx != -1:
miners = line[48:len(line) - 1]
miners = miners.split(" ")
leadMiner = get_highest_id(miners)
completionTime = parse_aggr_for_iteration(input_file_directory, iteration, leadMiner)
outfile.write(str(iteration))
outfile.write(",")
outfile.write(str(completionTime))
outfile.write("\n")
iteration = iteration + 1
outfile.close()
def getAvgTotalTime(parsed_files_directory):
completionTimes = np.zeros(3)
for i in range(0, 3):
df = pd.read_csv((parsed_files_directory + 'data' + str(i)), header=None)
startTime = datetime.strptime(df[2].values[0], "%H:%M:%S.%f")
endTime = datetime.strptime(df[2].values[101], "%H:%M:%S.%f")
if endTime < startTime:
endTime += timedelta(days=1)
timeToComplete = endTime - startTime
completionTimes[i] = timeToComplete.seconds
totalAvg = np.mean(completionTimes, axis=0)
return totalAvg
def getAvg(parsed_files_directory):
completionTime = [[], [], []]
for i in range(0, 3):
df = pd.read_csv((parsed_files_directory + 'data' + str(i)), header=None)
completionTime[i] = np.sum(df[1].values)
totalAvg = np.mean(completionTime)
return totalAvg
if __name__ == '__main__':
parse_logs(3, "./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedLogs/")
parse_all_aggr("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedAggr/", 3)
parse_all_verif("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedVerif/", 3)
parse_all_noise("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedNoising/", 3)
aggrAvg100 = getAvg("./performance-breakdown/100Nodes/parsedAggr/") / 100
verifAvg100 = getAvg("./performance-breakdown/100Nodes/parsedVerif/") / 100
noisingAvg100 = getAvg("./performance-breakdown/100Nodes/parsedNoising/") / 100
totalTime100 = getAvgTotalTime("./performance-breakdown/100Nodes/parsedLogs/") / 100
floodingTime100 = totalTime100 - aggrAvg100 - verifAvg100 - noisingAvg100
print("Avg Aggr 100 Nodes: " + str(aggrAvg100))
print("Avg Verif 100 Nodes: " + str(verifAvg100))
print("Avg Noising 100 Nodes: " + str(noisingAvg100))
print("Avg Flooding 100 Nodes: " + str(floodingTime100))
print("Avg total time: " + str(totalTime100))
print("")
aggrAvg80 = getAvg("./performance-breakdown/80Nodes/parsedAggr/") / 100
verifAvg80 = getAvg("./performance-breakdown/80Nodes/parsedVerif/") / 100
noisingAvg80 = getAvg("./performance-breakdown/80Nodes/parsedNoising/") / 100
totalTime80 = getAvgTotalTime("./performance-breakdown/80Nodes/parsedLogs/") / 100
floodingTime80 = totalTime80 - aggrAvg80 - verifAvg80 - noisingAvg80
print("Avg Aggr 80 Nodes: " + str(aggrAvg80))
print("Avg Verif 80 Nodes: " + str(verifAvg80))
print("Avg Noising 80 Nodes: " + str(noisingAvg80))
print("Avg Flooding 80 Nodes: " + str(floodingTime80))
print("Avg total time: " + str(totalTime80))
print("")
aggrAvg60 = getAvg("./performance-breakdown/60Nodes/parsedAggr/") / 100
verifAvg60 = getAvg("./performance-breakdown/60Nodes/parsedVerif/") / 100
noisingAvg60 = getAvg("./performance-breakdown/60Nodes/parsedNoising/") / 100
totalTime60 = getAvgTotalTime("./performance-breakdown/60Nodes/parsedLogs/") / 100
floodingTime60 = totalTime60 - aggrAvg60 - verifAvg60 - noisingAvg60
print("Avg Aggr 60 Nodes: " + str(aggrAvg60))
print("Avg Verif 60 Nodes: " + str(verifAvg60))
print("Avg Noising 60 Nodes: " + str(noisingAvg60))
print("Avg Flooding 60 Nodes: " + str(floodingTime60))
print("Avg total time: " + str(totalTime60))
print("")
aggrAvg40 = getAvg("./performance-breakdown/40Nodes/parsedAggr/") / 100
verifAvg40 = getAvg("./performance-breakdown/40Nodes/parsedVerif/") / 100
noisingAvg40 = getAvg("./performance-breakdown/40Nodes/parsedNoising/") / 100
totalTime40 = getAvgTotalTime("./performance-breakdown/40Nodes/parsedLogs/") / 100
floodingTime40 = totalTime40 - aggrAvg40 - verifAvg40 - noisingAvg40
print("Avg Aggr 40 Nodes: " + str(aggrAvg40))
print("Avg Verif 40 Nodes: " + str(verifAvg40))
print("Avg Noising 40 Nodes: " + str(noisingAvg40))
print("Avg Flooding 40 Nodes: " + str(floodingTime40))
print("Avg total time: " + str(totalTime40))
|
1667405
|
import azure.batch.models as batch_models
from azure.batch.models import BatchErrorException
from aztk.error import AztkError
def clean_up_cluster(spark_client, id):
try:
cluster = spark_client.cluster.get(id)
nodes = [node for node in cluster.nodes]
if not any([
node.state in [batch_models.ComputeNodeState.unusable, batch_models.ComputeNodeState.start_task_failed]
for node in nodes
]):
spark_client.cluster.delete(id=id)
except (BatchErrorException, AztkError) as e:
# pass in the event that the cluster does not exist
print(str(e))
acceptable_failures = [
"The specified job has been marked for deletion and is being garbage collected.",
"The specified pool has been marked for deletion and is being reclaimed."
]
if any(item in str(e) for item in acceptable_failures):
pass
else:
raise e
|
1667418
|
import unittest
import sys
try:
from django.conf import settings
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True)
except ImportError, e:
pass
from hamlpy.template.loaders import get_haml_loader, TemplateDoesNotExist
class DummyLoader(object):
"""
A dummy template loader that only loads templates from self.templates
"""
templates = {
"in_dict.txt" : "in_dict content",
"loader_test.hamlpy" : "loader_test content",
}
def __init__(self, *args, **kwargs):
self.Loader = self.__class__
def load_template_source(self, template_name, *args, **kwargs):
try:
return (self.templates[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
class LoaderTest(unittest.TestCase):
"""
Tests for the django template loader.
A dummy template loader is used that loads only from a dictionary of templates.
"""
def setUp(self):
dummy_loader = DummyLoader()
hamlpy_loader_class = get_haml_loader(dummy_loader)
self.hamlpy_loader = hamlpy_loader_class()
def _test_assert_exception(self, template_name):
try:
self.hamlpy_loader.load_template_source(template_name)
except TemplateDoesNotExist:
self.assertTrue(True)
else:
self.assertTrue(False, '\'%s\' should not be loaded by the hamlpy tempalte loader.' % template_name)
def test_file_not_in_dict(self):
# not_in_dict.txt doesn't exit, so we're expecting an exception
self._test_assert_exception('not_in_dict.hamlpy')
def test_file_in_dict(self):
# in_dict.txt in in dict, but with an extension not supported by
# the loader, so we expect an exception
self._test_assert_exception('in_dict.txt')
def test_file_should_load(self):
# loader_test.hamlpy is in the dict, so it should load fine
try:
self.hamlpy_loader.load_template_source('loader_test.hamlpy')
except TemplateDoesNotExist:
self.assertTrue(False, '\'loader_test.hamlpy\' should be loaded by the hamlpy tempalte loader, but it was not.')
else:
self.assertTrue(True)
def test_file_different_extension(self):
# loader_test.hamlpy is in dict, but we're going to try
# to load loader_test.txt
# we expect an exception since the extension is not supported by
# the loader
self._test_assert_exception('loader_test.txt')
|
1667452
|
import numpy as np
def topHatFilter(blueMovie,uvMovie,mask,topHat=300):
# Mask (spatial), resize, and rotate
# mask = np.array(Image.open('mask.tif').resize(downsampledSize, Image.BILINEAR).rotate(rotationAngle,Image.NEAREST,True))
rotatedSize3D = blueMovie.shape
# Reshape
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovie = uvMovie.reshape((uvMovie.shape[0]*uvMovie.shape[1], uvMovie.shape[2]))
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
mask_indices = np.squeeze(np.argwhere(mask))
# Creating time padding (invert time)
bluePadding = np.concatenate([-blueMovie[mask,topHat:0:-1]+2*blueMovie[mask,0][:,np.newaxis], blueMovie[mask,:]],axis=1)
uvPadding = np.concatenate([-uvMovie[mask,topHat:0:-1]+2*uvMovie[mask,0][:,np.newaxis], uvMovie[mask,:]],axis=1)
# from skimage.morphology import white_tophat
import skimage.morphology
se = skimage.morphology.rectangle(1,topHat) #(1, x) shape important!
blueFiltered = np.empty((mask.sum(), rotatedSize3D[2]+topHat))
uvFiltered = np.empty((mask.sum(), rotatedSize3D[2]+topHat))
for i in range(mask.sum()):
blueFiltered[i,np.newaxis] = skimage.morphology.white_tophat(bluePadding[i,np.newaxis],se)
uvFiltered[i,np.newaxis] = skimage.morphology.white_tophat(uvPadding[i,np.newaxis],se)
blueMovieFiltered = np.zeros(blueMovie.shape)
uvMovieFiltered = np.zeros(uvMovie.shape)
blueMovieFiltered[mask_indices,:] = blueFiltered[:,topHat:]
uvMovieFiltered[mask_indices,:] = uvFiltered[:,topHat:]
blueMovieFiltered = blueMovieFiltered.reshape(rotatedSize3D)
uvMovieFiltered = uvMovieFiltered.reshape(rotatedSize3D)
return blueMovieFiltered,uvMovieFiltered
def twoWavelengthRegression(blueMovieFiltered,uvMovieFiltered,blueMovie,uvMovie,mask):
from scipy import linalg
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
mask_indices = np.squeeze(np.argwhere(mask))
rotatedSize3D = blueMovie.shape
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovie = uvMovie.reshape((uvMovie.shape[0]*uvMovie.shape[1], uvMovie.shape[2]))
blueMovieFiltered = blueMovieFiltered.reshape((blueMovieFiltered.shape[0]*blueMovieFiltered.shape[1], blueMovieFiltered.shape[2]))
uvMovieFiltered = uvMovieFiltered.reshape((uvMovieFiltered.shape[0]*uvMovieFiltered.shape[1], uvMovieFiltered.shape[2]))
blueBase = blueMovie - blueMovieFiltered
uvBase = uvMovie - uvMovieFiltered
blueRec = blueMovieFiltered + np.tile(blueBase.mean(axis=1)[:,np.newaxis],(1,rotatedSize3D[2]))
uvRec = uvMovieFiltered + np.tile(uvBase.mean(axis=1)[:,np.newaxis],(1,rotatedSize3D[2]))
beta = np.zeros((len(mask_indices)))
blueReg = np.zeros(blueBase.shape)
for i in range(mask.sum()):
beta[i] = linalg.lstsq(uvRec[mask_indices[i],:][:,np.newaxis], blueRec[mask_indices[i],:][:,np.newaxis])[0][0][0]
blueReg[mask_indices[i],:] = blueMovieFiltered[mask_indices[i],:] - beta[i]*uvMovieFiltered[mask_indices[i],:]
return blueReg
def dFF(blueMovie,uvMovieFiltered,blueReg,mask,topHat=300):
rotatedSize3D = blueMovie.shape
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovieFiltered = uvMovieFiltered.reshape((uvMovieFiltered.shape[0]*uvMovieFiltered.shape[1], uvMovieFiltered.shape[2]))
blueF = blueMovie[mask,topHat:].mean(axis=1)
blueDFF = np.zeros(blueMovie.shape)
blueDFF[mask,:] = np.divide(blueReg[mask,:],np.tile(blueF[:,np.newaxis],(1,rotatedSize3D[2])))
#uv
uvF = uvMovieFiltered[mask,topHat:].mean(axis=1)
uvDFF = np.zeros(uvMovieFiltered.shape)
uvDFF[mask,:] = np.divide(uvMovieFiltered[mask,:],np.tile(uvF[:,np.newaxis],(1,rotatedSize3D[2])))
return blueDFF,uvDFF
|
1667453
|
import numpy as np
from copy import copy, deepcopy
from itertools import product
from envs.env import DeterministicEnv, Direction
class TrainState(object):
'''
state of the environment; describes positions of all objects in the env.
'''
def __init__(self, agent_pos, vase_states, train_pos, train_intact):
"""
agent_pos: (x, y) tuple for the agent's location
vase_states: Dictionary mapping (x, y) tuples to booleans, where True
means that the vase is intact
"""
self.agent_pos = agent_pos
self.vase_states = vase_states
self.train_pos = train_pos
self.train_intact = train_intact
def is_valid(self):
pos = self.agent_pos
# Can't be standing on the vase and have the vase intact
if pos in self.vase_states and self.vase_states[pos]:
return False
# Can't be standing on the train and have the train intact
if pos == self.train_pos and self.train_intact:
return False
return True
def __eq__(self, other):
return isinstance(other, TrainState) and \
self.agent_pos == other.agent_pos and \
self.vase_states == other.vase_states and \
self.train_pos == other.train_pos and \
self.train_intact == other.train_intact
def __hash__(self):
def get_vals(dictionary):
return tuple([dictionary[loc] for loc in sorted(dictionary.keys())])
return hash(self.agent_pos + get_vals(self.vase_states) + self.train_pos + (self.train_intact,))
class TrainEnv(DeterministicEnv):
def __init__(self, spec, compute_transitions=True):
"""
height: Integer, height of the grid. Y coordinates are in [0, height).
width: Integer, width of the grid. X coordinates are in [0, width).
init_state: TrainState, initial state of the environment
vase_locations: List of (x, y) tuples, locations of vases
num_vases: Integer, number of vases
carpet_locations: Set of (x, y) tuples, locations of carpets
feature_locations: List of (x, y) tuples, locations of features
s: TrainState, Current state
nA: Integer, number of actions
"""
self.height = spec.height
self.width = spec.width
self.init_state = deepcopy(spec.init_state)
self.vase_locations = list(self.init_state.vase_states.keys())
self.num_vases = len(self.vase_locations)
self.carpet_locations = set(spec.carpet_locations)
self.feature_locations = list(spec.feature_locations)
self.train_transition = spec.train_transition
self.train_locations = list(self.train_transition.keys())
assert set(self.train_locations) == set(self.train_transition.values())
self.default_action = Direction.get_number_from_direction(Direction.STAY)
self.nA = 5
self.num_features = len(self.s_to_f(self.init_state))
self.reset()
if compute_transitions:
states = self.enumerate_states()
self.make_transition_matrices(
states, range(self.nA), self.nS, self.nA)
self.make_f_matrix(self.nS, self.num_features)
def enumerate_states(self):
state_num = {}
all_agent_positions = product(range(self.width), range(self.height))
all_vase_states = map(
lambda vase_vals: dict(zip(self.vase_locations, vase_vals)),
product([True, False], repeat=self.num_vases))
all_states = map(
lambda x: TrainState(*x),
product(all_agent_positions, all_vase_states, self.train_locations, [True, False]))
all_states = filter(lambda state: state.is_valid(), all_states)
state_num = {}
for state in all_states:
if state not in state_num:
state_num[state] = len(state_num)
self.state_num = state_num
self.num_state = {v: k for k, v in self.state_num.items()}
self.nS = len(state_num)
return state_num.keys()
def get_num_from_state(self, state):
return self.state_num[state]
def get_state_from_num(self, num):
return self.num_state[num]
def s_to_f(self, s):
'''
Returns features of the state:
- Number of broken vases
- Whether the agent is on a carpet
- For each feature location, whether the agent is on that location
'''
num_broken_vases = list(s.vase_states.values()).count(False)
carpet_feature = int(s.agent_pos in self.carpet_locations)
train_intact_feature = int(not s.train_intact)
train_pos_features = [int(s.train_pos == pos) for pos in self.train_locations]
loc_features = [int(s.agent_pos == fpos) for fpos in self.feature_locations]
features = train_pos_features + loc_features
features = [num_broken_vases, carpet_feature, train_intact_feature] + features
return np.array(features)
def get_next_state(self, state, action):
'''returns the next state given a state and an action'''
action = int(action)
new_x, new_y = Direction.move_in_direction_number(state.agent_pos, action)
# New position is still in bounds:
if not (0 <= new_x < self.width and 0 <= new_y < self.height):
new_x, new_y = state.agent_pos
new_agent_pos = new_x, new_y
new_vase_states = deepcopy(state.vase_states)
new_train_pos, new_train_intact = state.train_pos, state.train_intact
if state.train_intact:
new_train_pos = self.train_transition[state.train_pos]
# Break the vase and train if appropriate
if new_agent_pos in new_vase_states:
new_vase_states[new_agent_pos] = False
if new_agent_pos == new_train_pos:
new_train_intact = False
return TrainState(new_agent_pos, new_vase_states, new_train_pos, new_train_intact)
def print_state(self, state):
'''Renders the state.'''
h, w = self.height, self.width
canvas = np.zeros(tuple([2*h-1, 3*w+1]), dtype='int8')
# cell borders
for y in range(1, canvas.shape[0], 2):
canvas[y, :] = 1
for x in range(0, canvas.shape[1], 3):
canvas[:, x] = 2
# vases
for x, y in self.vase_locations:
if state.vase_states[(x, y)]:
canvas[2*y, 3*x+1] = 4
else:
canvas[2*y, 3*x+1] = 6
# agent
x, y = state.agent_pos
canvas[2*y, 3*x + 2] = 3
# train
x, y = state.train_pos
if state.train_intact:
canvas[2*y, 3*x + 1] = 5
else:
canvas[2*y, 3*x + 1] = 6
black_color = '\x1b[0m'
purple_background_color = '\x1b[0;35;85m'
for line in canvas:
for char_num in line:
if char_num==0:
print('\u2003', end='')
elif char_num==1:
print('─', end='')
elif char_num==2:
print('│', end='')
elif char_num==3:
print('\x1b[0;33;85m█'+black_color, end='')
elif char_num==4:
print('\x1b[0;32;85m█'+black_color , end='')
elif char_num==5:
print(purple_background_color+'█'+black_color, end='')
elif char_num==6:
print('\033[91m█'+black_color, end='')
print('')
|
1667459
|
from Crypto.PublicKey import RSA
key = RSA.generate(2048)
print("CREDENTIAL_STORAGE_PRIVATE_KEY = " + key.exportKey("DER").__repr__())
print("CREDENTIAL_STORAGE_PUBLIC_KEY = " + key.publickey().exportKey("DER").__repr__())
|
1667460
|
from __future__ import print_function
import json
import os
import uuid
import boto3
import decimal
import sympy
# Converts DynamoDB items to JSON
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Writes to this dynamodb
dynamodb = boto3.resource('dynamodb')
TABLE_NAME = os.environ['TABLE_NAME']
def handler(event, context):
#TODO - process the message and perform the business logic
# Write to table
table = dynamodb.Table(TABLE_NAME)
response = table.put_item(
Item={
'job_id': str(uuid.uuid4())
}
)
print("PutItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
return ''
|
1667490
|
import time
import json
import boto3
REGION = ''
AWS_ACCOUNT = ''
DYNAMODB_TABLE = ''
STEP_FUNCTION_ARN = f'arn:aws:states:{REGION}:{AWS_ACCOUNT}:stateMachine:otter-state'
# Example Payload
payload = {
"assets": [
{
"hostname": "panos01.example.com",
"common_name": "panos01.example.com",
"certificate_validation": "True",
"task_definition": "otter-panos-9x-lets-encrypt",
"dns": "xxx (Route53 Hosted Zone ID)"
},
{
"hostname": "f501.example.com",
"common_name": "f501.example.com",
"certificate_validation": "False",
"task_definition": "otter-f5-14x-lets-encrypt",
"dns": "xxx (Route53 Hosted Zone ID)"
}
],
"region": REGION,
"table": DYNAMODB_TABLE
}
string_payload = json.dumps(payload)
if __name__ == "__main__":
sfn_client = boto3.client('stepfunctions')
output = sfn_client.start_execution(
stateMachineArn=STEP_FUNCTION_ARN,
name='otter_{0}'.format(time.time()),
input=string_payload
)
|
1667523
|
import jsonschema
def test_if_schema_is_valid_schema():
# The input needs to be nonempty lists of strings for it to be a valid schema
schema = {}
jsonschema.Draft7Validator.check_schema(schema)
assert False
|
1667594
|
import numpy as np, pandas as pd, random
import tensorflow as tf
from tqdm import tqdm
from matplotlib import pyplot as plt
from agent import BrawlAgent
from env.brawlstars import Brawlstars
from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey
from utilities.directkeys import B
from keras.backend import set_session
import time, math
EPISODE = 500 # Episode limitation
TRAIN_EVERY_STEPS = 256
BATCH_SIZE = 128 # size of minibatch
# reproducible
random.seed(1992)
np.random.seed(1992)
tf.set_random_seed(1992)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.log_device_placement = True
# Reset the graph
tf.reset_default_graph()
sess = tf.InteractiveSession(config=config)
set_session(sess)
def main(isLoad=False):
env = Brawlstars()
agent = BrawlAgent(env)
for i in tqdm(range(EPISODE)):
agent.is_updated_target_net = False
state = agent.env.reset() # To start the process
done = False
agent.replay_buffer.clear()
avg_reward_list = []
attack_list = []
movement_list = []
previous_reward = -1 # to prevent print too much noise
PressKey(B)
time.sleep(0.3)
ReleaseKey(B)
while done is False:
action = agent.act(state) # Return Format: [movementArray, actionArray]
state, reward, done = agent.env.step(action) # No longer needs action to be passed in
if math.isnan(reward):
continue
# if reward != previous_reward:
# previous_reward = reward
# print(reward)
# actions_list.append(action)
movement_list.append(action[0])
attack_list.append(action[1])
avg_reward_list.append(reward)
if done is False:
next_state = agent.env._getObservation() # Get the next state
agent.perceive(state, action, reward, next_state, done)
if agent.replay_buffer.size() > BATCH_SIZE and env.time_step % TRAIN_EVERY_STEPS == 0:
agent.train_dqn_network(i, batch_size=BATCH_SIZE)
# Update epsilon after every episode
if agent.epsilon > agent.final_epsilon:
agent.epsilon -= (1 - agent.final_epsilon) / (EPISODE/1.2)
# print('[{0}] Average Reward: {1}'.format(i+1, np.mean(avg_reward_list)))
log_histogram(agent.summary_writer, 'reward_dist', avg_reward_list, i)
log_histogram(agent.summary_writer, 'movement_dist', movement_list, i)
log_histogram(agent.summary_writer, 'attack_dist', attack_list, i)
log_scalars(agent.summary_writer, 'avg_reward', np.mean(avg_reward_list), i)
def test():
# Reset the graph
tf.reset_default_graph()
env = Brawlstars()
agent = BrawlAgent(env)
agent.isTest = True
state = agent.env.reset() # To start the process
done = False
while done is False:
action = agent.act(state)
state, reward, done = agent.env.step(action)
if __name__ == '__main__':
main()
# test()
|
1667607
|
import asyncio
import asynctnt
from asynctnt import Response, PushIterator
from asynctnt._testbase import ensure_version
from asynctnt.exceptions import TarantoolDatabaseError, TarantoolNotConnectedError
from tests import BaseTarantoolTestCase
class PushTestCase(BaseTarantoolTestCase):
@ensure_version(min=(1, 10))
async def test__push_invalid_future(self):
with self.assertRaises(ValueError) as e:
PushIterator(asyncio.Future())
self.assertEqual(str(e.exception),
'Future is invalid. Make sure to call with '
'a future returned from a method with '
'push_subscribe=True flag')
@ensure_version(min=(1, 10))
async def test__push_invalid_future_no_flag(self):
res = self.conn.call('async_action')
with self.assertRaises(ValueError) as e:
PushIterator(res)
self.assertEqual(str(e.exception),
'Future is invalid. Make sure to call with '
'a future returned from a method with '
'push_subscribe=True flag')
@ensure_version(min=(1, 10))
async def test__push_correct_res(self):
fut = self.conn.call('async_action', push_subscribe=True)
self.assertEqual(type(fut), asyncio.Future)
try:
it = PushIterator(fut)
self.assertIsInstance(it.response, asynctnt.Response)
except Exception as e:
self.fail(e)
@ensure_version(min=(1, 10))
async def test__push_call_iter(self):
fut = self.conn.call('async_action', push_subscribe=True)
with self.assertRaises(RuntimeError) as e:
for _ in PushIterator(fut):
pass
self.assertEqual(str(e.exception),
'Cannot use iter with PushIterator - use aiter')
@ensure_version(min=(1, 10))
async def test__push_read_all(self):
fut = self.conn.call('async_action', push_subscribe=True)
it = PushIterator(fut)
self.assertFalse(it.response.done(), 'response not done')
result = []
async for entry in it:
result.append(entry[0])
self.assertTrue(it.response.done(), 'response is done')
self.assertListEqual(result, [
'hello_1',
'hello_2',
'hello_3',
'hello_4',
'hello_5'
], 'push values ok')
fut_res = await fut
self.assertIsInstance(fut_res, Response, 'got response')
self.assertEqual(fut_res.code, 0, 'code ok')
self.assertEqual(fut_res.sync, it.response.sync, 'sync ok')
self.assertEqual(fut_res.return_code, 0, 'return code ok')
self.assertEqual(fut_res.body, ['ret'], 'return value ok')
self.assertTrue(fut_res.done(), 'response done')
@ensure_version(min=(1, 10))
async def test__push_read_in_parts(self):
fut = self.conn.call('async_action', push_subscribe=True)
it = PushIterator(fut)
result = []
i = 0
async for entry in it:
if len(entry) == 0:
self.fail("got 0 length for entry #{}".format(i))
result.append(entry[0])
i += 1
if i == 2:
break
async for entry in it:
if len(entry) == 0:
self.fail("got 0 length for entry #{}".format(i))
result.append(entry[0])
i += 1
self.assertListEqual(result, [
'hello_1',
'hello_2',
'hello_3',
'hello_4',
'hello_5'
], 'push values ok')
fut_res = await fut
self.assertIsInstance(fut_res, Response, 'got response')
self.assertEqual(fut_res.code, 0, 'code ok')
self.assertEqual(fut_res.sync, it.response.sync, 'sync ok')
self.assertEqual(fut_res.return_code, 0, 'return code ok')
self.assertEqual(fut_res.body, ['ret'], 'return value ok')
@ensure_version(min=(1, 10))
async def test__push_read_all_eval(self):
fut = self.conn.eval("""
for i = 1, 5 do
box.session.push('hello_' .. tostring(i))
require'fiber'.sleep(0.01)
end
return 'ret'
""", push_subscribe=True)
it = PushIterator(fut)
result = []
i = 0
async for entry in it:
if len(entry) == 0:
self.fail("got 0 length for entry #{}".format(i))
result.append(entry[0])
i += 1
self.assertListEqual(result, [
'hello_1',
'hello_2',
'hello_3',
'hello_4',
'hello_5'
], 'push values ok')
fut_res = await fut
self.assertIsInstance(fut_res, Response, 'got response')
self.assertEqual(fut_res.code, 0, 'code ok')
self.assertEqual(fut_res.sync, it.response.sync, 'sync ok')
self.assertEqual(fut_res.return_code, 0, 'return code ok')
self.assertEqual(fut_res.body, ['ret'], 'return value ok')
@ensure_version(min=(1, 10))
async def test__push_read_all_various_sleep(self):
fut = self.conn.eval("""
box.session.push('hello_1')
require'fiber'.sleep(0.01)
box.session.push('hello_2')
require'fiber'.sleep(1)
box.session.push('hello_3')
return 'ret'
""", push_subscribe=True)
it = PushIterator(fut)
result = []
i = 0
async for entry in it:
result.append(entry[0])
i += 1
self.assertListEqual(result, [
'hello_1',
'hello_2',
'hello_3'
], 'push values ok')
fut_res = await fut
self.assertIsInstance(fut_res, Response, 'got response')
self.assertEqual(fut_res.code, 0, 'code ok')
self.assertEqual(fut_res.sync, it.response.sync, 'sync ok')
self.assertEqual(fut_res.return_code, 0, 'return code ok')
self.assertEqual(fut_res.body, ['ret'], 'return value ok')
@ensure_version(min=(1, 10))
async def test__push_read_all_error(self):
fut = self.conn.eval("""
for i = 1, 5 do
box.session.push('hello_' .. tostring(i))
require'fiber'.sleep(0.01)
end
return 'ret'
""", push_subscribe=True)
it = PushIterator(fut)
# iter once
await it.__anext__()
# drop tarantool
self.tnt.stop()
try:
with self.assertRaises(TarantoolNotConnectedError):
await asyncio.wait_for(it.__anext__(), timeout=5)
finally:
self.tnt.start()
@ensure_version(min=(1, 10))
async def test__push_read_all_disconnect(self):
fut = self.conn.eval("error('some error')", push_subscribe=True)
it = PushIterator(fut)
with self.assertRaises(TarantoolDatabaseError):
await it.__anext__()
with self.assertRaises(TarantoolDatabaseError):
await fut
@ensure_version(min=(1, 10))
async def test__push_read_all_multiple_iterators(self):
fut = self.conn.eval("box.session.push(1);"
"box.session.push(2);"
"box.session.push(3);", push_subscribe=True)
it1 = PushIterator(fut)
it2 = PushIterator(fut)
async def f(it):
results = []
async for entry in it:
results.append(entry[0])
return results
res1, res2 = await asyncio.gather(f(it1), f(it2))
res1.extend(res2)
res1.sort()
self.assertListEqual(res1, [1, 2, 3])
@ensure_version(min=(1, 10))
async def test__push_read_all_one_iterator(self):
fut = self.conn.eval("box.session.push('hello_1');"
"box.session.push('hello_2');"
"box.session.push('hello_3');",
push_subscribe=True)
it = PushIterator(fut)
results = []
async for entry in it:
results.append(entry[0])
self.assertListEqual(results, [
'hello_1',
'hello_2',
'hello_3',
], 'push ok')
|
1667620
|
import pytest
from .speculos import SpeculosContainer
import base64
import msgpack
from algosdk import transaction
import algomsgpack
@pytest.fixture(scope='session')
def app(pytestconfig):
return pytestconfig.option.app
@pytest.fixture(scope='session')
def apdu_port(pytestconfig):
return pytestconfig.option.apdu_port
@pytest.fixture(scope='session')
def speculos(app, apdu_port):
speculos = SpeculosContainer(app=app, apdu_port=apdu_port)
speculos.start()
print("Started container")
yield speculos
print("Stopping container")
speculos.stop()
@pytest.fixture(scope='session')
def dongle(speculos, pytestconfig):
dongle = speculos.connect(debug=pytestconfig.option.verbose > 0)
print("Connected dongle")
yield dongle
print("Disconnecting dongle")
dongle.close()
def pytest_addoption(parser, pluginmanager):
parser.addoption("--app", dest="app")
parser.addoption("--apdu_port", dest="apdu_port", type=int, default=9999)
def genTxns():
yield transaction.PaymentTxn(
sender="<KEY>",
receiver="<KEY>",
fee=0.001,
flat_fee=True,
amt=1000000,
first=5667360,
last=5668360,
note="Hello World".encode(),
gen="testnet-v1.0",
gh="SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI="
)
def genTxnPayload(txns):
for txn in txns:
if isinstance(txn, Transaction):
txn = {"txn": txn.dictify()}
yield base64.b64decode(encoding.msgpack_encode(txn))
|
1667628
|
import logging
import click
from diana.utils.gateways import suppress_urllib_debug
from diana_cli import __version__
from diana import __version__ as diana_version
from diana_cli.cli import cmds as cli_cmds
from .ssde import ssde
from .classify import classify
@click.group(name="diana-plus")
@click.option('--verbose/--no-verbose', default=False)
@click.version_option(version=(__version__, diana_version),
prog_name=("diana-plus", "python-diana"))
def cli(verbose):
"""Run diana and diana-plus packages using a command-line interface."""
if verbose:
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
click.echo('Verbose mode is %s' % ('on' if verbose else 'off'))
else:
logging.basicConfig(level=logging.WARNING)
supress_urllib_debug()
cmds = [
ssde,
classify,
]
for c in cmds + cli_cmds:
cli.add_command(c)
# Indirection to set envar prefix from setuptools entry pt
def main():
cli(auto_envvar_prefix='DIANA', obj={})
if __name__ == "__main__":
main()
|
1667641
|
ent_linker_out = open('predictions/entity_linker.txt', 'w', encoding='utf-8')
ent_emb_avg_out = open('predictions/entity_emb_avg.txt', 'w')
cosine_distance_out = open('predictions/cos_out.txt', 'w')
predicted_reln = open('predictions/pred_relations.txt', 'w')
predicted_reln_top3 = open('predictions/pred_relations_top3.txt', 'w')
all_ent_emb_avg = open('predictions/ent_avg_all.txt', 'w')
all_kg_cos = open('predictions/kg_cosine_all.txt', 'w')
cand_rank = open('predictions/cand_rank.txt', 'w')
g_amb_f = open('predictions/g_amb.txt', 'w')
all_ent_score_out = open('predictions/all_ent_cand_out.txt', 'w')
predicted_e_spans = open('predictions/predicted_spans.txt', 'w')
|
1667642
|
def test_reusable_fixture(test_client):
_, response = test_client.get("/")
assert response.json == 3
_, response = test_client.get("/")
assert response.json == 4
_, response = test_client.get("/")
assert response.json == 5
|
1667775
|
from django.db import models
class Quote(object):
def __init__(self, character, line, sketch):
self.character = character
self.line = line
self.sketch = sketch
class Snippet(models.Model):
title = models.CharField(max_length=80)
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(max_length=80, default='python')
class Meta:
app_label = 'test_app'
class Explosive(object):
def __init__(self, safe, boom):
self.safe = safe
self.boom = boom
|
1667782
|
import torch
from reconstruction.model.model import *
from reconstruction.utils.inference_utils import CropParameters, IntensityRescaler, ImageFilter, ImageWriter, UnsharpMaskFilter
from reconstruction.utils.event_tensor_utils import EventPreprocessor
from reconstruction.utils.image_display_utils import ImageDisplay
from reconstruction.utils.inference_utils import upsample_color_image, merge_channels_into_color_image # for color reconstruction
from reconstruction.utils.timers import CudaTimer, cuda_timers
class ImageReconstructor:
def __init__(self, model, height, width, num_bins, options):
self.model = model
self.use_gpu = options.use_gpu
self.gpu_id = options.gpu_id
self.device = torch.device(self.gpu_id) if self.use_gpu else torch.device('cpu')
self.height = height
self.width = width
self.num_bins = num_bins
self.options = options
self.events = None
self.reconstructed_image = None
self.initialize(self.height, self.width, self.options)
def initialize(self, height, width, options):
if self.options.verbose:
print('== Image reconstruction == ')
print('Image size: {}x{}'.format(self.height, self.width))
self.last_stamp = None
self.no_recurrent = options.no_recurrent
if self.no_recurrent:
print('!!Recurrent connection disabled!!')
self.perform_color_reconstruction = options.color # whether to perform color reconstruction (only use this with the DAVIS346color)
if self.perform_color_reconstruction:
if options.auto_hdr:
print('!!Warning: disabling auto HDR for color reconstruction!!')
options.auto_hdr = False # disable auto_hdr for color reconstruction (otherwise, each channel will be normalized independently)
self.crop = CropParameters(self.width, self.height, self.model.num_encoders)
self.last_states_for_each_channel = {'grayscale': None}
if self.perform_color_reconstruction:
self.crop_halfres = CropParameters(int(width / 2), int(height / 2),
self.model.num_encoders)
for channel in ['R', 'G', 'B', 'W']:
self.last_states_for_each_channel[channel] = None
self.event_preprocessor = EventPreprocessor(options)
self.intensity_rescaler = IntensityRescaler(options)
self.image_filter = ImageFilter(options)
self.unsharp_mask_filter = UnsharpMaskFilter(options, device=self.device)
self.image_writer = ImageWriter(options)
self.image_display = ImageDisplay(options)
def update_reconstruction(self, event_tensor, event_tensor_id=None, stamp=None):
# max duration without events before we reinitialize
self.max_duration_before_reinit_s = 5.0
# we reinitialize if stamp < last_stamp, or if stamp > last_stamp + max_duration_before_reinit_s
if stamp is not None and self.last_stamp is not None:
if stamp < self.last_stamp or stamp > self.last_stamp + self.max_duration_before_reinit_s:
if self.options.verbose:
print('Reinitialization detected!')
self.initialize(self.height, self.width, self.options)
self.last_stamp = stamp
with torch.no_grad():
with CudaTimer('Reconstruction'):
with CudaTimer('NumPy (CPU) -> Tensor (GPU)'):
events = event_tensor.unsqueeze(dim=0)
events = events.to(self.device)
if self.options.use_fp16:
events = events.half()
self.events = self.event_preprocessor(events)
# Resize tensor to [1 x C x crop_size x crop_size] by applying zero padding
events_for_each_channel = {'grayscale': self.crop.pad(events)}
reconstructions_for_each_channel = {}
if self.perform_color_reconstruction:
events_for_each_channel['R'] = self.crop_halfres.pad(self.events[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::2])
events_for_each_channel['G'] = self.crop_halfres.pad(self.events[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2])
events_for_each_channel['W'] = self.crop_halfres.pad(self.events[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 0::2])
events_for_each_channel['B'] = self.crop_halfres.pad(self.events[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 1::2])
# Reconstruct new intensity image for each channel (grayscale + RGBW if color reconstruction is enabled)
for channel in events_for_each_channel.keys():
with CudaTimer('Inference'):
new_predicted_frame, states = self.model(events_for_each_channel[channel],
self.last_states_for_each_channel[channel])
if self.no_recurrent:
self.last_states_for_each_channel[channel] = None
else:
self.last_states_for_each_channel[channel] = states
# Output reconstructed image
crop = self.crop if channel == 'grayscale' else self.crop_halfres
# Unsharp mask (on GPU)
new_predicted_frame = self.unsharp_mask_filter(new_predicted_frame)
# Intensity rescaler (on GPU)
new_predicted_frame = self.intensity_rescaler(new_predicted_frame)
with CudaTimer('Tensor (GPU) -> NumPy (CPU)'):
reconstructions_for_each_channel[channel] = new_predicted_frame[0, 0, crop.iy0:crop.iy1,
crop.ix0:crop.ix1].cpu().numpy()
if self.perform_color_reconstruction:
out = merge_channels_into_color_image(reconstructions_for_each_channel)
else:
out = reconstructions_for_each_channel['grayscale']
# Post-processing, e.g bilateral filter (on CPU)
self.reconstructed_image = self.image_filter(out)
self.image_display(self.reconstructed_image, self.events)
def save_reconstruction(self, event_tensor_id):
self.image_writer(self.reconstructed_image, event_tensor_id, events=self.events)
|
1667804
|
from agpy import azimuthalAverage
from pylab import *
yy,xx = indices([10,10])
rr1 = hypot(xx-5,yy-5)
rr2 = hypot(xx-4.5,yy-4.5)
rr3 = hypot(xx-4.43,yy-4.53)
exp1 = exp(-(rr1**2)/(2.0*5**2))
exp2 = exp(-(rr2**2)/(2.0*5**2))
exp3 = exp(-(rr3**2)/(2.0*5**2))
exp1 /= exp1.max()
exp2 /= exp2.max()
exp3 /= exp3.max()
azr1,azav1 = azimuthalAverage(exp1,center=[5,5],binsize=1.0,returnradii=True)
azr2,azav2 = azimuthalAverage(exp2,center=[4.5,4.5],binsize=1.0,returnradii=True)
azr3,azav3 = azimuthalAverage(exp3,center=[4.43,4.53],binsize=1.0,returnradii=True)
azr1b,azav1b = azimuthalAverage(exp1,center=[5,5],binsize=0.5,returnradii=True)
azr2b,azav2b = azimuthalAverage(exp2,center=[4.5,4.5],binsize=0.5,returnradii=True)
azr3b,azav3b = azimuthalAverage(exp3,center=[4.43,4.53],binsize=0.5,returnradii=True)
figure(2)
subplot(231)
plot(azr1,azav1,'x')
title("Center 5,5, binsize 1")
subplot(234)
plot(azr1b,azav1b,'x')
title("Center 5,5, binsize 0.5")
subplot(232)
plot(azr2,azav2,'x')
title("Center 4.5,4.5, binsize 1")
subplot(235)
plot(azr2b,azav2b,'x')
title("Center 4.5,4.5, binsize 0.5")
subplot(233)
plot(azr3,azav3,'x')
title("Center 4.43,4.53, binsize 1")
subplot(236)
plot(azr3b,azav3b,'x')
title("Center 4.43,4.53, binsize 0.5")
savefig("azimuthalaverage_test_small.png")
yy,xx = indices([100,100])
rr1 = hypot(xx-50,yy-50)
rr2 = hypot(xx-49.5,yy-49.5)
rr3 = hypot(xx-49.43,yy-49.53)
exp1 = exp(-(rr1**2)/(2.0*50**2))
exp2 = exp(-(rr2**2)/(2.0*50**2))
exp3 = exp(-(rr3**2)/(2.0*50**2))
exp1 /= exp1.max()
exp2 /= exp2.max()
exp3 /= exp3.max()
azr1,azav1 = azimuthalAverage(exp1,center=[50,50],binsize=1.0,returnradii=True)
azr2,azav2 = azimuthalAverage(exp2,center=[49.5,49.5],binsize=1.0,returnradii=True)
azr3,azav3 = azimuthalAverage(exp3,center=[49.43,49.53],binsize=1.0,returnradii=True)
azr1b,azav1b = azimuthalAverage(exp1,center=[50,50],binsize=0.5,returnradii=True)
azr2b,azav2b = azimuthalAverage(exp2,center=[49.5,49.5],binsize=0.5,returnradii=True)
azr3b,azav3b = azimuthalAverage(exp3,center=[49.43,49.53],binsize=0.5,returnradii=True)
figure(1)
subplot(231)
plot(azr1,azav1,'x')
title("Center 50,50, binsize 1")
subplot(234)
plot(azr1b,azav1b,'x')
title("Center 50,50, binsize 0.5")
subplot(232)
plot(azr2,azav2,'x')
title("Center 49.5,49.5, binsize 1")
subplot(235)
plot(azr2b,azav2b,'x')
title("Center 49.5,49.5, binsize 0.5")
subplot(233)
plot(azr3,azav3,'x')
title("Center 49.43,49.53, binsize 1")
subplot(236)
plot(azr3b,azav3b,'x')
title("Center 49.43,49.53, binsize 0.5")
savefig("azimuthalaverage_test.png")
azr1,azav1 = azimuthalAverage(exp1,center=[50,50],binsize=1.0,steps=True)
azr2,azav2 = azimuthalAverage(exp2,center=[49.5,49.5],binsize=1.0,steps=True)
azr3,azav3 = azimuthalAverage(exp3,center=[49.43,49.53],binsize=1.0,steps=True)
azr1b,azav1b = azimuthalAverage(exp1,center=[50,50],binsize=0.5,steps=True)
azr2b,azav2b = azimuthalAverage(exp2,center=[49.5,49.5],binsize=0.5,steps=True)
azr3b,azav3b = azimuthalAverage(exp3,center=[49.43,49.53],binsize=0.5,steps=True)
figure(3)
subplot(231)
plot(azr1,azav1)
title("Center 50,50, binsize 1")
subplot(234)
plot(azr1b,azav1b)
title("Center 50,50, binsize 0.5")
subplot(232)
plot(azr2,azav2)
title("Center 49.5,49.5, binsize 1")
subplot(235)
plot(azr2b,azav2b)
title("Center 49.5,49.5, binsize 0.5")
subplot(233)
plot(azr3,azav3)
title("Center 49.43,49.53, binsize 1")
subplot(236)
plot(azr3b,azav3b)
title("Center 49.43,49.53, binsize 0.5")
savefig("azimuthalaverage_test_steps.png")
#import pdb; pdb.set_trace()
|
1667837
|
from rest_framework import routers
from .api.views import AccountViewSet, ReporterViewSet, ArticleViewSet, \
TagViewSet, ReversedArticleViewSet, ReversedReporterViewSet, \
ReversedTagViewSet, ReversedAccountViewSet
# API
router = routers.DefaultRouter()
router.register(r'accounts', AccountViewSet, base_name='account')
router.register(r'reporters', ReporterViewSet, base_name='reporter')
router.register(r'articles', ArticleViewSet, base_name='article')
router.register(r'tags', TagViewSet, base_name='tag')
router.register(r'reversed/tags', ReversedTagViewSet, base_name='reversedtag')
router.register(r'reversed/articles', ReversedArticleViewSet, base_name='reversedarticle')
router.register(r'reversed/reporters', ReversedReporterViewSet, base_name='reversedreporter')
router.register(r'reversed/accounts', ReversedAccountViewSet, base_name='reversedaccount')
urlpatterns = router.urls
|
1667845
|
import os
import operator
import hashlib
import sys
import random
import requests
import randomcolor
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from scipy.stats import gaussian_kde
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def getcolor(s):
random.seed(int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) % 10**8)
return randomcolor.RandomColor().generate()[0]
def plot_data(url,path_to_data):
r = requests.get(url)
if 'data' not in r.json():
raise Exception("problem getting url")
locationSensors = {}
for d in r.json()['data']:
if 'l' not in d or d['l'] == '':
continue
loc = d['l']
if loc not in locationSensors:
locationSensors[loc] = {}
for s in d['s']:
for mac in d['s'][s]:
sensorName = s+'-'+mac
if sensorName not in locationSensors[loc]:
locationSensors[loc][sensorName] = []
locationSensors[loc][sensorName].append(d['s'][s][mac])
# find largest variance
sensorIndex = []
locationIndex = []
for location in locationSensors:
locationIndex.append(location)
for sensorID in locationSensors[location]:
if sensorID not in sensorIndex:
sensorIndex.append(sensorID)
num_locations = len(locationIndex)
num_sensors = len(sensorIndex)
X = numpy.zeros([len(sensorIndex),len(locationSensors)])
for i,location in enumerate(locationIndex):
for j,sensorID in enumerate(sensorIndex):
if sensorID not in locationSensors[location]:
continue
X[j,i] = numpy.median((locationSensors[location][sensorID]))
varianceOfSensorID = {}
for i,row in enumerate(X):
data = []
for v in row:
if v == 0:
continue
data.append(v)
varianceOfSensorID[sensorIndex[i]] = numpy.var(data)
# collect sensor ids that are most meaningful
sensorIDs = []
for i, data in enumerate(
sorted(varianceOfSensorID.items(), key=operator.itemgetter(1),reverse=True)):
if data[1] == 0:
continue
sensorIDs.append(data[0])
if len(sensorIDs) == 10:
break
bins = numpy.linspace(-100, 0, 100)
for location in locationSensors:
pyplot.figure(figsize=(10,4))
for sensorID in sensorIDs:
if sensorID not in locationSensors[location]:
continue
try:
density = gaussian_kde(locationSensors[location][sensorID])
except Exception as e:
continue
density.covariance_factor = lambda : .5
density._compute_covariance()
pyplot.fill(bins,density(bins),alpha=0.2,label=sensorID,facecolor=getcolor(sensorID))
# pyplot.hist(
# locationSensors[location][sensorID],
# bins,
# alpha=0.5,
# label=sensorID)
if i == 10:
break
pyplot.title(location)
pyplot.legend(loc='upper right')
pyplot.savefig(os.path.join(path_to_data,location + ".png"))
pyplot.close()
|
1667865
|
import threading
thread_data = threading.local()
def set_thread_data(job_id):
if job_id and type(job_id) == int:
thread_data.job_id = job_id
|
1667866
|
import math
import unittest
import sycomore
from sycomore.units import *
class TestModel(unittest.TestCase):
def test_pulse(self):
model = sycomore.como.Model(
sycomore.Species(1*s, 0.1*s),
sycomore.Magnetization(0, 0, 1),
[["dummy", sycomore.TimeInterval(0*s)]])
model.apply_pulse(sycomore.Pulse(41*deg, 27*deg))
grid = model.magnetization()
for index, _ in sycomore.GridScanner(grid.origin(), grid.shape()):
if index == sycomore.Index(0):
self.assertAlmostEqual(
grid[index].p , 0.210607912662250-0.413341301933443j)
self.assertAlmostEqual(grid[index].z, 0.754709580222772)
self.assertAlmostEqual(
grid[index].m, 0.210607912662250+0.413341301933443j)
else:
self.assertEqual(grid[index].p, 0)
self.assertAlmostEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0)
def test_time_interval(self):
model = sycomore.como.Model(
sycomore.Species(math.log(2)*Hz, math.log(2)*Hz),
sycomore.Magnetization(0, 0, 1), [
["foo", sycomore.TimeInterval(1*s)],
["bar", sycomore.TimeInterval(1*s)]])
model.apply_pulse(sycomore.Pulse(45*deg, 90*deg))
model.apply_time_interval("foo")
grid = model.magnetization()
for index, _ in sycomore.GridScanner(grid.origin(), grid.shape()):
if index == sycomore.Index(-1, 0):
self.assertEqual(grid[index].p, 0)
self.assertEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0.25)
elif index == sycomore.Index(0, 0):
self.assertEqual(grid[index].p, 0)
self.assertEqual(grid[index].z, 0.5*(1+math.sqrt(2)/2))
self.assertEqual(grid[index].m, 0)
elif index == sycomore.Index(1, 0):
self.assertAlmostEqual(grid[index].p, 0.25)
self.assertEqual(grid[index].z, 0)
self.assertEqual(grid[index].m, 0)
else:
self.assertEqual(grid[index].p , 0)
self.assertAlmostEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0)
model.apply_time_interval("bar")
grid = model.magnetization()
for index, _ in sycomore.GridScanner(grid.origin(), grid.shape()):
if index == sycomore.Index(-1, -1):
self.assertEqual(grid[index].p, 0)
self.assertEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0.125)
elif index == sycomore.Index(0, 0):
self.assertEqual(grid[index].p, 0)
self.assertEqual(grid[index].z, 0.5+0.25*(1+math.sqrt(2)/2))
self.assertEqual(grid[index].m, 0)
elif index == sycomore.Index(1, 1):
self.assertAlmostEqual(grid[index].p, 0.125)
self.assertEqual(grid[index].z, 0)
self.assertEqual(grid[index].m, 0)
else:
self.assertEqual(grid[index].p , 0)
self.assertAlmostEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0)
isochromat = model.isochromat()
self.assertAlmostEqual(isochromat[0], 0.125*math.sqrt(2))
self.assertAlmostEqual(isochromat[1], 0)
self.assertAlmostEqual(isochromat[2], 0.5+0.25*(1+math.sqrt(2)/2))
isochromat = model.isochromat(
{sycomore.Index(0,0), sycomore.Index(-1, -1)})
self.assertAlmostEqual(isochromat[0], 0.125*math.sqrt(2)/2)
self.assertAlmostEqual(isochromat[1], 0)
self.assertAlmostEqual(isochromat[2], 0.5+0.25*(1+math.sqrt(2)/2))
def test_diffusion(self):
model = sycomore.como.Model(
sycomore.Species(0*Hz, 0*Hz, 1*um*um/ms),
sycomore.Magnetization(0, 0, 1), [
["foo", sycomore.TimeInterval(500*ms, 0.1*rad/um)]])
model.apply_pulse(sycomore.Pulse(40*deg, 0*deg))
model.apply_time_interval("foo")
grid = model.magnetization()
for index, _ in sycomore.GridScanner(grid.origin(), grid.shape()):
if index == sycomore.Index(-1):
self.assertEqual(grid[index].p, 0)
self.assertEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0+0.003062528150606j)
elif index == sycomore.Index(0):
self.assertEqual(grid[index].p, 0)
self.assertAlmostEqual(grid[index].z, 0.766044443118978)
self.assertEqual(grid[index].m, 0)
elif index == sycomore.Index(1):
self.assertAlmostEqual(grid[index].p, 0-0.003062528150606j)
self.assertEqual(grid[index].z, 0)
self.assertEqual(grid[index].m, 0)
else:
self.assertEqual(grid[index].p , 0)
self.assertAlmostEqual(grid[index].z, 0)
self.assertAlmostEqual(grid[index].m, 0)
if __name__ == "__main__":
unittest.main()
|
1667875
|
from chalicelib.core import users
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
def update(tenant_id, user_id, role_id, changes):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
if len(changes.keys()) == 0:
return None
ALLOW_EDIT = ["name", "description", "permissions"]
sub_query = []
for key in changes.keys():
if key in ALLOW_EDIT:
sub_query.append(f"{helper.key_to_snake_case(key)} = %({key})s")
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
UPDATE public.roles
SET {" ,".join(sub_query)}
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND deleted_at ISNULL
AND protected = FALSE
RETURNING *;""",
{"tenant_id": tenant_id, "role_id": role_id, **changes})
)
row = cur.fetchone()
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
return helper.dict_to_camel_case(row)
def create(tenant_id, user_id, name, description, permissions):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions)
VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[])
RETURNING *;""",
{"tenant_id": tenant_id, "name": name, "description": description, "permissions": permissions})
)
row = cur.fetchone()
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
return helper.dict_to_camel_case(row)
def get_roles(tenant_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT *
FROM public.roles
where tenant_id =%(tenant_id)s
AND deleted_at IS NULL
ORDER BY role_id;""",
{"tenant_id": tenant_id})
)
rows = cur.fetchall()
for r in rows:
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
return helper.list_to_camel_case(rows)
def get_role_by_name(tenant_id, name):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT *
FROM public.roles
where tenant_id =%(tenant_id)s
AND deleted_at IS NULL
AND name ILIKE %(name)s
;""",
{"tenant_id": tenant_id, "name": name})
)
row = cur.fetchone()
if row is not None:
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
return helper.dict_to_camel_case(row)
def delete(tenant_id, user_id, role_id):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT 1
FROM public.roles
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND protected = TRUE
LIMIT 1;""",
{"tenant_id": tenant_id, "role_id": role_id})
)
if cur.fetchone() is not None:
return {"errors": ["this role is protected"]}
cur.execute(
cur.mogrify("""SELECT 1
FROM public.users
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
LIMIT 1;""",
{"tenant_id": tenant_id, "role_id": role_id})
)
if cur.fetchone() is not None:
return {"errors": ["this role is already attached to other user(s)"]}
cur.execute(
cur.mogrify("""UPDATE public.roles
SET deleted_at = timezone('utc'::text, now())
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND protected = FALSE;""",
{"tenant_id": tenant_id, "role_id": role_id})
)
return get_roles(tenant_id=tenant_id)
|
1667876
|
import argparse
import filecmp
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Directory comparison")
parser.add_argument("--recurse", "-r", action="store_true", default=False)
parser.add_argument('dirs', nargs=2)
options = parser.parse_args()
dd = filecmp.dircmp(options.dirs[0], options.dirs[1])
if options.recurse:
dd.report_full_closure()
else:
dd.report()
|
1667892
|
from model import DeepJIT
from utils import mini_batches_test
from sklearn.metrics import roc_auc_score
import torch
from tqdm import tqdm
def eval(labels, predicts, thresh=0.5):
TP, FN, FP, TN = 0, 0, 0, 0
for lable, predict in zip(labels, predicts):
# print(predict)
if predict >= thresh and lable == 1:
TP += 1
if predict >= thresh and lable == 0:
FP += 1
if predict < thresh and lable == 1:
FN += 1
if predict < thresh and lable == 0:
TN += 1
# print(TP)
P = TP/(TP+FP)
R = TP/(TP+FN)
A = (TP+TN)/len(labels)
E = FP/(TP+FP)
print('Test data at Threshold %.2f -- Accuracy: %.2f, False Positives: %.2f, Precision: %.2f, Recall: %.2f'%(thresh, A, E, P, R))
def save_result(ids, labels, predicts, path):
results = []
for id, lable, predict in zip(ids, labels, predicts):
results.append('{}\t{}\n'.format(lable, predict))
with open(path, 'w', encoding='utf-8') as f:
f.writelines(results)
def evaluation_model(data, params):
ids, pad_msg, pad_code, labels, dict_msg, dict_code = data
batches = mini_batches_test(ids=ids, X_msg=pad_msg, X_code=pad_code, Y=labels)
params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code)
if len(labels.shape) == 1:
params.class_num = 1
else:
params.class_num = labels.shape[1]
# set up parameters
params.cuda = (not params.no_cuda) and torch.cuda.is_available()
del params.no_cuda
params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]
model = DeepJIT(args=params)
if torch.cuda.is_available():
model = model.cuda()
model.load_state_dict(torch.load(params.load_model))
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
all_id, all_predict, all_label = list(), list(), list()
with torch.no_grad():
for i, (batch) in enumerate(tqdm(batches)):
_id, pad_msg, pad_code, label = batch
if torch.cuda.is_available():
pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(
pad_code).cuda(), torch.cuda.FloatTensor(label)
else:
pad_msg, pad_code, label = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(
label).float()
if torch.cuda.is_available():
predict = model.forward(pad_msg, pad_code)
predict = predict.cpu().detach().numpy().tolist()
else:
predict = model.forward(pad_msg, pad_code)
predict = predict.detach().numpy().tolist()
all_predict += predict
all_label += label.tolist()
all_id += _id
# with open('result.txt', 'w', encoding='utf-8') as f:
# results = ['{}, {}\n'.format(label, predict) for label, predict in zip(all_label, all_predict)]
# f.writelines(results)
for thresh in [i/10 for i in range(1,10)]:
try:
eval(all_label, all_predict, thresh=thresh)
except Exception as identifier:
print("No predict larger than %f" % (thresh))
save_result(all_id, all_label, all_predict, params.load_model+'.result')
auc_score = roc_auc_score(y_true=all_label, y_score=all_predict)
print('Test data -- AUC score:', auc_score)
|
1667950
|
import gzip
import sys
import os
import argparse
import subprocess
import config_parser
import re
from binner_analysis import *
from genfunc import *
from checkm_analysis import *
from novel_analysis import *
from kraken_analysis import *
# default to look for polish contig, if polished contig not exist, use unpolish.
# if user want to always use unpolish contig, specifiy flag --unploshed SHOULD WE ADD THAT ?
#For problem due to gz file that are uncompress before running opera-ms
def get_long_read_file(read_file):
res_file = read_file
if not os.path.isfile(res_file):
res_file = res_file + ".gz"
if not os.path.isfile(res_file):
exit("Long read file not found : " + read_file + " or " + res_file)
return res_file
#def run_hybrid_binning(contig_file, short_read1, short_read2, assembly_dir, sample_name, nb_thread):
def download_utils_db(db_type):
try:
cmd = "mkdir {}/../utils_db".format(util_dir)
run_exe(cmd, True)
except:
pass
#mash_db = "/home/bertrandd/PROJECT_LINK/OPERA_LG/META_GENOMIC_HYBRID_ASSEMBLY/OPERA-MS-DEV/OPERA-MS/genomeDB_Sketch.msh";
if db_type == "read-concordance":
if not os.path.exists(("{}/../utils_db/kraken_db/hash.k2d".format(util_dir))):
cmd = "wget -nc -P {}/../utils_db/ ftp://ftp.ccb.jhu.edu/pub/data/kraken2_dbs/minikraken_8GB_202003.tgz".format(util_dir)
run_exe(cmd, True)
cmd = "tar -xvzf {}/../utils_db/minikraken_8GB_202003.tgz -C {}/../utils_db".format(util_dir, util_dir)
run_exe(cmd, True)
cmd = cmd = "mv {}/../utils_db/minikraken_8GB_20200312 {}/../utils_db/kraken_db".format(util_dir, util_dir)
run_exe(cmd, True)
cmd = "rm {}/../utils_db/minikraken_8GB_202003.tgz".format(util_dir)
run_exe(cmd, True)
if db_type == "novel-species":
cmd = "wget --no-check-certificate -nc -O {}/../utils_db/GTDB.msh https://ndownloader.figshare.com/files/22471499".format(util_dir)
run_exe(cmd, True)
cmd = "wget --no-check-certificate -nc -O {}/../utils_db/MAG.msh https://ndownloader.figshare.com/files/22471505".format(util_dir)
run_exe(cmd, True)
def read_taxonomy_file(genomes_dir, taxonomy, db_genome_dir, genome_list, genome_length):
OUT_LIST = open(genome_list, "w")
OUT_LENGTH = open(genome_length, "w")
FILE = open(taxonomy, "r")
line_list = {}
tax_info = {}
genome = ""
species_name = ""
DIRECTORY_MAX_NB_GENOME = 4000
dir_id = 1
current_db_genomes_dir = db_genome_dir + "_" + str(dir_id)
create_dir(current_db_genomes_dir)
nb_genomes_in_dir = 0
for line in FILE:
#print line
line_list = (line.rstrip('\n')).split("\t")
genome = line_list[0]
tax_info = line_list[1].split(";")
species_name = ""
for t in tax_info:
current_tax = t.split("__")
if current_tax[0] == "s":
species_name = current_tax[1].replace(" ", "_")
if species_name == "":
exit("Malformed taxonomy file: " + taxonomy + "\n" + line)
else:
#
if nb_genomes_in_dir == DIRECTORY_MAX_NB_GENOME:
dir_id += 1
current_db_genomes_dir = db_genome_dir + "_" + str(dir_id)
nb_genomes_in_dir = 0
create_dir(current_db_genomes_dir)
#copy the file in the opera-ms-db directory
novel_genome_name = current_db_genomes_dir + "/" + species_name + "__" + genome #Need to fix this !!!
#Check for gzip file
run_exe("cp {}/{} {}".format(genomes_dir, genome, novel_genome_name), True)
OUT_LIST.write(novel_genome_name + "\n")
#Write the length
OUT_LENGTH.write("{}\t{}\n".format(novel_genome_name, "\t".join([str(x) for x in compute_genome_length(novel_genome_name)])))
nb_genomes_in_dir += 1
OUT_LENGTH.close()
OUT_LIST.close()
FILE.close()
def compute_genome_length(genome):
res = [0, 0]
with gzip.open(genome, "r") as FILE:
for line in FILE:
if not (line[0] == ">"):
res[1] += (len(line)-1)
else:
res[0] += 1
return res
def create_mash_sketch(genome_list, out_file, nb_thread):
run_exe("{}/mash sketch -o {} -p {} -l {}".format(util_dir, out_file, nb_thread, genome_list), True) #other potential parameters -k -s
def opera_ms_db(genomes_dir, taxonomy, db_name, nb_thread):
#Create the output database directory
create_dir(db_name)
genome_db = db_name + "/genomes"
#Read the taxonomy file
#The genome name will be renamed according to the taxonomy file: SPECIES_NAME__GENOME_NAME
genome_list = db_name + "/genomes_list.txt"
genome_size = db_name + "/genomes_length.txt"
read_taxonomy_file(genomes_dir, taxonomy, genome_db, genome_list, genome_size)
#Create mash sketch
create_mash_sketch(genome_list, db_name+"/genomes.msh", nb_thread)
#Create 8Gb kraken db
def run_circular_sequence_identification(assembly_dir):
scaffold_file = assembly_dir + "/intermediate_files/opera_long_read/scaffolds.scaf"
edge_files_dir = assembly_dir + "/intermediate_files/read_mapping/"
ana_dir = assembly_dir + "/circular_sequence"
contig_info_file = assembly_dir + "/contig_info.txt"
contig_file = get_contig_file(assembly_dir)
create_dir(ana_dir)
run_exe(util_dir + "/../bin/detect_circular_scaffold.pl " + contig_file + " " + scaffold_file + " " + edge_files_dir + " " + contig_info_file + " " + ana_dir, True)
def check_software(cmd, tool):
try:
run_exe(cmd, False)
print("{} functioning".format(tool))
except Exception as e:
print("{} *** NOT FUNCTIONING ***".format(tool))
def check_installation():
cmd = "{}/metabat -h 2> /dev/null".format(util_dir)
check_software(cmd, "MetaBAT2")
cmd = "{}/kraken2/kraken2 -v > /dev/null ".format(util_dir)
check_software(cmd, "Kraken2")
cmd = "{}/checkm -h > /dev/null".format(util_dir)
check_software(cmd, "CheckM")
cmd = "perl {}/maxbin2/run_MaxBin.pl -h > /dev/null".format(util_dir)
check_software(cmd, "MaxBin2")
def main(args):
command = args.command
nb_thread = 0
if command == "read-concordance" or command == "circular-sequence" or command == "binning" or command == "bin-evaluation" or command == "mash" :
#Parse the config file
config_dict = read_opera_ms_config_file(args.config)
#Set the number of thread
nb_thread = args.thread
if command == "binning":
bin_method = args.binner
if args.sample_name == None:
sample_name = os.path.basename(os.path.normpath(config_dict["OUTPUT_DIR"]))
else:
sample_name = args.sample_name
#sample_name = config_dict["OUTPUT_DIR"].split("/")[-1]
run_binner(bin_method, sample_name, config_dict["OUTPUT_DIR"], config_dict["ILLUMINA_READ_1"], config_dict["ILLUMINA_READ_2"], nb_thread)
elif command == "bin-evaluation":
checkm_analysis(config_dict["OUTPUT_DIR"], args.binner, nb_thread, args.high_qual_mags, args.medium_qual_mags)
elif command == "read-concordance":
abundance_threshold = args.abundance_threshold
run_kraken2(config_dict["OUTPUT_DIR"], config_dict["ILLUMINA_READ_1"], config_dict["ILLUMINA_READ_2"], get_long_read_file(config_dict["LONG_READ"]), nb_thread, float(abundance_threshold))
elif command == "circular-sequence":
run_circular_sequence_identification(config_dict["OUTPUT_DIR"])
#Command without config file
else:
if command == "novel-species":
run_novel_species_analysis(args.known_species, args.taxonomy_database, args.thread, args.configs, args.binner, args.mags_qual, args.out)
if command == "opera-ms-db":
opera_ms_db(args.genomes_dir, args.taxonomy, args.db_name, args.thread)
elif command == "utils-db":
download_utils_db(args.dbtype)
elif command == "check-dependency":
check_installation()
def run_binner(binner, sample_name, assembly_dir, short_read1, short_read2, nb_thread):
contig_file = get_contig_file(assembly_dir)
if binner == "maxbin2":
run_maxbin2(contig_file, short_read1, assembly_dir, sample_name, nb_thread)
if binner == "metabat2":
run_metabat2(contig_file, short_read1, short_read2, assembly_dir, sample_name, nb_thread)
if binner == "hybrid":
run_hybrid_binning(contig_file, short_read1, short_read2, assembly_dir, sample_name, nb_thread)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#group = parser.add_mutually_exclusive_group()
#The type of software
subparsers = parser.add_subparsers(help='commands', dest='command')
#this
mandatory = parser.add_argument_group("mandatory arguments")
#opera-db
opera_db_parser = subparsers.add_parser('opera-ms-db', help='Generate a custom OPERA-MS genome databasee')
mandatory = opera_db_parser.add_argument_group("mandatory arguments")
opera_db_parser._action_groups[-1].add_argument("-g", "--genomes-dir", required=True, help='Directory that contains genome files')
opera_db_parser._action_groups[-1].add_argument("-x", "--taxonomy", required=True, help='Species-level taxonomy of genomes')
opera_db_parser._action_groups[-1].add_argument("-d", "--db-name", required=True, help='Database name')
opera_db_parser.add_argument("-t", "--thread", help='Number of threads [default: 2]', default=2, type = int)
#kraken
kraken_parser = subparsers.add_parser('read-concordance', parents=[config_parser.parser], help='Compute the abundance profile correlation between long and short-reads')
kraken_parser.add_argument("-a", "--abundance-threshold", default=0.1, help="Lower percentage abundance threshold [default: 0.1]", type=float)
kraken_parser.add_argument("-t", "--thread", help='Number of threads [default: 2]', default=2, type = int)
#binner
binner_parser = subparsers.add_parser('binning', parents=[config_parser.parser], help='Streamline binning of OPERA-MS assembled contigs using MetaBAT2 or MaxBin2')
binner_parser.add_argument("-b", "--binner", required=False, default = "metabat2", choices=["maxbin2", "metabat2"], help='binning method [default: metabat2]' )
binner_parser.add_argument("-s", "--sample-name", required=False, help='Sample name [default: OPERA-MS output folder]')
binner_parser.add_argument("-t", "--thread", help='Number of threads [default: 2]', default=2, type = int)
#checkm
checkm_parser = subparsers.add_parser('bin-evaluation', parents=[config_parser.parser], help='Streamline bin evaluation using CheckM')
checkm_parser.add_argument("-b", "--binner", required=False, default = "metabat2", choices=["maxbin2", "metabat2", "opera_ms_clusters"], help = "Bins for evaluation [default: MetaBat2]")
checkm_parser.add_argument("-H", "--high-qual-mags", default="90,5", help = 'Completeness and contamination thresholds for high quality bins [default: 90,5]', type=str)
checkm_parser.add_argument("-M", "--medium-qual-mags", default="50,10", help = 'Completeness and contamination thresholds for medium quality bins [default: 50,10]', type=str)
checkm_parser.add_argument("-t", "--thread", help='Number of threads [default: 2]', default=2, type = int)
#circular identification
#circular_sequence_parser = subparsers.add_parser('circular-sequence', parents=[config_parser.parser], help='Identify circular sequences')
#novel species
novel_species_parser = subparsers.add_parser('novel-species', help='Identification of OPERA-MS MAGs most closely related species and identification of novel species')
mandatory = novel_species_parser.add_argument_group("mandatory arguments")
novel_species_parser._action_groups[-1].add_argument("-o", "--out", required=True, help='Output directory')
#
novel_species_parser.add_argument("-k", "--known-species", required=False, default = "{}/../utils_db/MAG.msh".format(util_dir), help=argparse.SUPPRESS)#'Mash sketch of known species reference genomes (default utils_db/small_newgut_segata.msh)')
novel_species_parser.add_argument("-x", "--taxonomy-database", required=False, default = "{}/../utils_db/GTDB.msh".format(util_dir), help=argparse.SUPPRESS) #'Mash sketch of reference genomes with taxonomy info (default utils_db/genomes)')
novel_species_parser.add_argument("-b", "--binner", required=False, default = "metabat2", choices=["maxbin2", "metabat2", "opera_ms_clusters"], help='Bins used for the analysis [default: MetaBat2]')
novel_species_parser.add_argument('configs', metavar='C', nargs='+', help='Path to OPERA-MS configuration file(s)')
#
novel_species_parser.add_argument("-q", "--mags-qual", help='Quality of the MAGS used [default: high]', choices=["high", "medium"], default="high")
novel_species_parser.add_argument("-c", "--cluster-threshold", help='Maximum distance at which 2 genomes are considered to be from the same species [default: 0.05]', default=0.05, type = float)
#
novel_species_parser.add_argument("-t", "--thread", help='Number of threads [default: 2]', default=2, type = int)
#utils-db
utils_db_parser = subparsers.add_parser('utils-db', help=' Download the database required by the utils command')
mandatory = utils_db_parser.add_argument_group("mandatory arguments")
utils_db_parser._action_groups[-1].add_argument("-db", "--dbtype", choices = ["read-concordance", "novel-species"], required=True, help='read-concordance or novel species analysis database')
#check if the utils sofware are functional in the current system
check_install_parser = subparsers.add_parser('check-dependency', help='Check which OPERA-MS-UTILS software are functional in the current system')
args=parser.parse_args()
#print(args.checkm)#print(args.metabat2)
main(args)
|
1667957
|
from pso import pso
from optitestfuns import ackley
import unittest
from numpy import isclose, array
'''Tests for the nD PSO implementation.
To run it please execute the following command in your terminal or cmd
python -m unittest test_pso.py
'''
class PSOfunctionMethodTests(unittest.TestCase):
def test_pso1D(self):
intVar = []
result = pso(ackley, [-5], [5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: variable didn't converged to 0"
def test_pso1Dinteger(self):
intVar = [0]
result = pso(ackley, [-5], [5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: variable didn't converged to 0"
assert float(result.xopt[0]).is_integer(), "ERROR: variable obtained wasn't an integer"
def test_pso2D(self):
intVar = []
result = pso(ackley, [-5,-5], [5,5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: first variable didn't converged to 0"
assert isclose(result.xopt[1], theo_min, atol=1e-3), "ERROR: second variable didn't converged to 0"
# def test_pso2Dinteger(self):
# intVar = [0,1]
# result = pso(ackley, [-5, -5], [5, 5], intVar)
#
# print(result.exit)
# print('x_opt: {}'.format(result.xopt))
# print('FO: {:2e}'.format(result.FO))
#
# assert math.isclose(result.xopt[0], 0, abs_tol=1e-3), "ERROR: first variable didn't converged to 0"
# assert math.isclose(result.xopt[1], 0, abs_tol=1e-3), "ERROR: second variable didn't converged to 0"
# assert float(result.xopt[0]).is_integer(), "ERROR: first variable obtained wasn't an integer"
# assert float(result.xopt[1]).is_integer(), "ERROR: second variable obtained wasn't an integer"
if __name__ == '__main__':
unittest.main()
|
1667965
|
import sys
import unittest
import module_utils.helpers as helpers
sys.modules['ansible.module_utils.helpers'] = helpers
from library.cartridge_get_disabled_instances import count_disabled_instances
def call_count_disabled_instances(
instances_info,
play_hosts=None,
ignore_split_brain=False,
):
module_hostvars = {
instance_name: {
'instance_info': {
'disabled_instances': instance_info['disabled_instances'],
'topology_checksum': instance_info.get('checksum', 1234567890),
},
'disabled': instance_info.get('disabled', False),
}
for instance_name, instance_info
in instances_info.items()
}
if play_hosts is None:
play_hosts = module_hostvars.keys()
return count_disabled_instances({
'module_hostvars': module_hostvars,
'play_hosts': play_hosts,
'ignore_split_brain': ignore_split_brain,
})
class TestCountDisabledInstances(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_count_disabled_instances(self):
# Healthy cluster
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': []},
'instance-2': {'disabled_instances': []},
'instance-3': {'disabled_instances': []},
})
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], [])
self.assertEqual(res.kwargs['inventory'], [])
self.assertEqual(helpers.WARNINGS, [])
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': ['instance-2', 'instance-3'], 'checksum': 2},
'instance-2': {'disabled_instances': [], 'checksum': 1, 'disabled': True},
'instance-3': {'disabled_instances': [], 'checksum': 1},
})
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], ['instance-2', 'instance-3'])
self.assertEqual(res.kwargs['inventory'], ['instance-2'])
self.assertEqual(helpers.WARNINGS, [])
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': None, 'checksum': None, 'disabled': True},
'instance-2': {'disabled_instances': [], 'checksum': 1},
'instance-3': {'disabled_instances': ['instance-2'], 'checksum': 2},
})
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], ['instance-2'])
self.assertEqual(res.kwargs['inventory'], ['instance-1'])
self.assertEqual(helpers.WARNINGS, [])
# Split brain
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': ['instance-2', 'instance-3'], 'disabled': True},
'instance-2': {'disabled_instances': []},
'instance-3': {'disabled_instances': []},
}, ignore_split_brain=False)
self.assertTrue(res.failed)
self.assertEqual(
res.msg,
"It seems that you have split brain in your cluster. "
"Set 'cartridge_ignore_split_brain' flag to ignore this error."
)
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': ['instance-2', 'instance-3'], 'disabled': True},
'instance-2': {'disabled_instances': []},
'instance-3': {'disabled_instances': []},
}, ignore_split_brain=True)
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], [])
self.assertEqual(res.kwargs['inventory'], ['instance-1'])
self.assertEqual(helpers.WARNINGS, ["It seems that you have split brain in your cluster."])
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': ['instance-2', 'instance-3'], 'disabled': True},
'instance-2': {'disabled_instances': ['instance-1']},
'instance-3': {'disabled_instances': ['instance-1']},
}, ignore_split_brain=True)
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], ['instance-1'])
self.assertEqual(res.kwargs['inventory'], ['instance-1'])
self.assertEqual(helpers.WARNINGS, ["It seems that you have split brain in your cluster."])
helpers.WARNINGS = []
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': ['instance-3', 'instance-4']},
'instance-2': {'disabled_instances': ['instance-3', 'instance-4']},
'instance-3': {'disabled_instances': ['instance-1', 'instance-2']},
'instance-4': {'disabled_instances': ['instance-1', 'instance-2']},
}, ignore_split_brain=True)
self.assertFalse(res.failed)
self.assertEqual(res.kwargs['cluster'], ['instance-1', 'instance-2', 'instance-3', 'instance-4'])
self.assertEqual(res.kwargs['inventory'], [])
self.assertEqual(helpers.WARNINGS, ["It seems that you have split brain in your cluster."])
# No correct topology config
res = call_count_disabled_instances({
'instance-1': {'disabled_instances': None, 'checksum': None},
'instance-2': {'disabled_instances': [], 'checksum': 1},
'instance-3': {'disabled_instances': [], 'checksum': 2},
})
self.assertTrue(res.failed)
self.assertEqual(res.msg, 'All instances in cluster has different topology configs')
|
1667970
|
import os
from mlstabilitytest.stability.StabilityAnalysis import StabilityAnalysis, EdAnalysis
from shutil import copyfile
here = os.path.abspath(os.path.dirname(__file__))
def main():
models = ['ElFrac', 'Meredig', 'Magpie', 'AutoMat', 'ElemNet', 'Roost',
'CGCNN']
experiments = ['LiMnTMO', 'allMP', 'smact',
'random1', 'random2', 'random3',
'classifier']
training_props = ['Ef', 'Ed']
path_to_ml_data = os.path.join(here, 'ml_data')
for training_prop in training_props:
print('\n____ models trained on %s ____\n' % training_prop)
for experiment in experiments:
print('\n ~~~ %s ~~~\n' % experiment)
experiment_dir = os.path.join(path_to_ml_data, training_prop, experiment)
if (('random' not in experiment) and (training_prop == 'Ed')) or ((experiment != 'classifier') and (training_prop == 'Ef')):
if not os.path.exists(experiment_dir):
os.mkdir(experiment_dir)
for model in models:
print('\n %s ' % model)
process(training_prop, model, experiment, path_to_ml_data)
def process(training_prop, model, experiment, path_to_ml_data):
"""
Args:
training_prop (str) - 'Ef' if models trained on formation energies; 'Ed' if decomposition energies
model (str) - ML model
experiment (str) - 'allMP', 'LiMnTMO', or 'smact'
path_to_ml_data (os.PathLike) - path to ml_data directory in .../TestStabilityML/mlstabilitytest/ml_data
Returns:
Runs all relevant analyses
Prints a summary
"""
if (model == 'CGCNN') and (experiment == 'smact'):
print('CGCNN cannot be applied directly to the SMACT problem because the structures are not known')
return
if ('random' in experiment) and (training_prop == 'Ed'):
print('Random perturbations only apply to models trained on Ef as written')
return
if (experiment == 'classifier') and (training_prop == 'Ef'):
print('Classifier experiment only applies to training on Ed')
return
if (model == 'CGCNN') and (training_prop == 'Ed'):
print('CGCNN not trained on Ed')
return
data_dir = os.path.join(path_to_ml_data, training_prop, experiment, model)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
data_file = 'ml_input.json'
finput = os.path.join(data_dir, data_file)
if 'random' in experiment:
src = finput.replace(experiment, 'allMP')
copyfile(src, finput)
if not os.path.exists(finput):
print('missing data for %s-%s' % (model, experiment))
return
if training_prop == 'Ef':
nprocs = 'all'
obj = StabilityAnalysis(data_dir,
data_file,
experiment,
nprocs=nprocs)
elif training_prop == 'Ed':
obj = EdAnalysis(data_dir,
data_file,
experiment)
else:
raise NotImplementedError
obj.results_summary
print('got results')
return
if __name__ == '__main__':
main()
|
1668095
|
import copy
import dlib
import os
import bz2
import random
from tqdm.notebook import tqdm
import shutil
from utils import image_to_array, load_image, download_data
from utils.face_detection import crop_face, get_face_keypoints_detecting_function
from mask_utils.mask_utils import mask_image
class DataGenerator:
def __init__(self, configuration):
self.configuration = configuration
self.path_to_data = configuration.get('input_images_path')
self.path_to_patterns = configuration.get('path_to_patterns')
self.minimal_confidence = configuration.get('minimal_confidence')
self.hyp_ratio = configuration.get('hyp_ratio')
self.coordinates_range = configuration.get('coordinates_range')
self.test_image_count = configuration.get('test_image_count')
self.train_image_count = configuration.get('train_image_count')
self.train_data_path = configuration.get('train_data_path')
self.test_data_path = configuration.get('test_data_path')
self.predictor_path = configuration.get('landmarks_predictor_path')
self.check_predictor()
self.valid_image_extensions = ('png', 'jpg', 'jpeg')
self.face_keypoints_detecting_fun = get_face_keypoints_detecting_function(self.minimal_confidence)
def check_predictor(self):
""" Check if predictor exists. If not downloads it. """
if not os.path.exists(self.predictor_path):
print('Downloading missing predictor.')
url = self.configuration.get('landmarks_predictor_download_url')
download_data(url, self.predictor_path + '.bz2', 64040097)
print(f'Decompressing downloaded file into {self.predictor_path}')
with bz2.BZ2File(self.predictor_path + '.bz2') as fr, open(self.predictor_path, 'wb') as fw:
shutil.copyfileobj(fr, fw)
def get_face_landmarks(self, image):
"""Compute 68 facial landmarks"""
landmarks = []
image_array = image_to_array(image)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(self.predictor_path)
face_rectangles = detector(image_array)
if len(face_rectangles) < 1:
return None
dlib_shape = predictor(image_array, face_rectangles[0])
for i in range(0, dlib_shape.num_parts):
landmarks.append([dlib_shape.part(i).x, dlib_shape.part(i).y])
return landmarks
def get_files_faces(self):
"""Get path of all images in dataset"""
image_files = []
for dirpath, dirs, files in os.walk(self.path_to_data):
for filename in files:
fname = os.path.join(dirpath, filename)
if fname.endswith(self.valid_image_extensions):
image_files.append(fname)
return image_files
def generate_images(self, image_size=None, test_image_count=None, train_image_count=None):
"""Generate test and train data (images with and without the mask)"""
if image_size is None:
image_size = self.configuration.get('image_size')
if test_image_count is None:
test_image_count = self.test_image_count
if train_image_count is None:
train_image_count = self.train_image_count
if not os.path.exists(self.train_data_path):
os.mkdir(self.train_data_path)
os.mkdir(os.path.join(self.train_data_path, 'inputs'))
os.mkdir(os.path.join(self.train_data_path, 'outputs'))
if not os.path.exists(self.test_data_path):
os.mkdir(self.test_data_path)
os.mkdir(os.path.join(self.test_data_path, 'inputs'))
os.mkdir(os.path.join(self.test_data_path, 'outputs'))
print('Generating testing data')
self.generate_data(test_image_count,
image_size=image_size,
save_to=self.test_data_path)
print('Generating training data')
self.generate_data(train_image_count,
image_size=image_size,
save_to=self.train_data_path)
def generate_data(self, number_of_images, image_size=None, save_to=None):
""" Add masks on `number_of_images` images
if save_to is valid path to folder images are saved there otherwise generated data are just returned in list
"""
inputs = []
outputs = []
if image_size is None:
image_size = self.configuration.get('image_size')
for i, file in tqdm(enumerate(random.sample(self.get_files_faces(), number_of_images)), total=number_of_images):
# Load images
image = load_image(file)
# Detect keypoints and landmarks on face
face_landmarks = self.get_face_landmarks(image)
if face_landmarks is None:
continue
keypoints = self.face_keypoints_detecting_fun(image)
# Generate mask
image_with_mask = mask_image(copy.deepcopy(image), face_landmarks, self.configuration)
# Crop images
cropped_image = crop_face(image_with_mask, keypoints)
cropped_original = crop_face(image, keypoints)
# Resize all images to NN input size
res_image = cropped_image.resize(image_size)
res_original = cropped_original.resize(image_size)
# Save generated data to lists or to folder
if save_to is None:
inputs.append(res_image)
outputs.append(res_original)
else:
res_image.save(os.path.join(save_to, 'inputs', f"{i:06d}.png"))
res_original.save(os.path.join(save_to, 'outputs', f"{i:06d}.png"))
if save_to is None:
return inputs, outputs
def get_dataset_examples(self, n=10, test_dataset=False):
"""
Returns `n` random images form dataset. If `test_dataset` parameter
is not provided or False it will return images from training part of dataset.
If `test_dataset` parameter is True it will return images from testing part of dataset.
"""
if test_dataset:
data_path = self.test_data_path
else:
data_path = self.train_data_path
images = os.listdir(os.path.join(data_path, 'inputs'))
images = random.sample(images, n)
inputs = [os.path.join(data_path, 'inputs', img) for img in images]
outputs = [os.path.join(data_path, 'outputs', img) for img in images]
return inputs, outputs
|
1668138
|
from typing import List
from pygls.lsp.types import Model
class LanguageServerConfiguration(Model): # type: ignore
enable_lint_on_save: bool
enable_code_action: bool
lint_targets: List[str]
format_targets: List[str]
@classmethod
def default(cls) -> "LanguageServerConfiguration":
return cls(
enable_lint_on_save=True,
enable_code_action=True,
lint_targets=["lint"],
format_targets=["format", "lint"],
)
|
1668186
|
import json
from .stitch.stitch import Stitch
import psutil
import traceback
# -------------------------------------------
# Pandoc JSON AST filter
# -------------------------------------------
def safe_spawn(func):
"""
Safely run function: if func spawns child processes they are closed even on python error.
It can be useful when calling Stitch from Atom. For some reason RTerm.exe does not close
and Node.js isn't aware of it. So spawned Node.js process cannot exit.
"""
# noinspection PyBroadException
try:
func()
except Exception:
traceback.print_exc()
procs = psutil.Process().children(recursive=True)
for p in procs:
p.terminate()
gone, still_alive = psutil.wait_procs(procs, timeout=50)
for p in still_alive:
p.kill()
print("Killed process that was still alive after 'timeout=50' from 'terminate()' command.")
def knitty_pandoc_filter(json_ast: str, name: str, filter_to: str, standalone: bool, self_contained: bool,
pandoc_format: str, pandoc_extra_args: list) -> str:
"""
Changes Pandoc JSON AST string
"""
ast = json.loads(json_ast)
stitcher = Stitch(name=name, filter_to=filter_to, standalone=standalone, self_contained=self_contained,
pandoc_format=pandoc_format, pandoc_extra_args=pandoc_extra_args)
def work():
nonlocal ast
ast = stitcher.stitch_ast(ast)
safe_spawn(work)
return json.dumps(ast)
|
1668216
|
import common as c
from config import ssl_dir, os_name
import sys
import xml.etree.ElementTree as ET
c.print('>> Downloading ssl for Qt for {}'.format(os_name))
if os_name == 'linux':
os_url = 'linux_x64'
tool_name = 'tools_openssl_x64'
root_path = 'Tools/OpenSSL/binary'
elif os_name == 'win32':
os_url = 'windows_x86'
tool_name = 'tools_openssl_x86'
root_path = 'Tools/OpenSSL/Win_x86'
elif os_name == 'win64':
os_url = 'windows_x86'
tool_name = 'tools_openssl_x64'
root_path = 'Tools/OpenSSL/Win_x64'
elif os_name == 'macos':
exit(0)
base_url = 'https://download.qt.io/online/qtsdkrepository/{}/desktop/{}' \
.format(os_url, tool_name)
updates_file = 'Updates-{}-{}.xml'.format(tool_name, os_name)
c.download(base_url + '/Updates.xml', updates_file)
updates = ET.parse(updates_file)
updates_root = updates.getroot()
url = ''
file_name = ''
for i in updates_root.iter('PackageUpdate'):
name = i.find('Name').text
if not 'qt.tools.openssl' in name:
continue
archives = i.find('DownloadableArchives')
if archives.text is None:
continue
version = i.find('Version').text
url = base_url + '/' + name + '/' + version + archives.text
file_name = archives.text
if len(url) == 0:
c.print('>> No ssl url found')
exit(1)
c.download(url, file_name)
c.extract(file_name, '.')
c.symlink(root_path, ssl_dir)
|
1668239
|
from .settings import ( # noqa
SECRET_KEY,
MIDDLEWARE_CLASSES,
INSTALLED_APPS,
ROOT_URLCONF,
MEDIA_ROOT,
BROKER_URL,
CELERY_RESULT_BACKEND,
WQ_DEFAULT_REPORT_STATUS,
)
SWAP = True
INSTALLED_APPS += ("tests.swap_app",)
WQ_SITE_MODEL = "swap_app.Site"
WQ_RESULT_MODEL = "swap_app.Result"
WQ_EVENTRESULT_MODEL = "swap_app.EventResult"
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'vera_swap_test',
'USER': 'postgres',
}
}
|
1668241
|
from datetime import timedelta
class Config(object):
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = ''
APP_NAME = 'ApplicationName'
SECRET_KEY = 'add_secret'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_AUTH_URL_RULE = '/api/v1/auth'
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_TRACKABLE = True
SECURITY_PASSWORD_HASH = '<PASSWORD>'
SECURITY_PASSWORD_SALT = '<PASSWORD>'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql://username:password@localhost/db'
class DevelopmentConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///data.sqlite'
DEBUG = True
class TestingConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite://'
TESTING = True
|
1668245
|
import collections
class BasicBlock(object):
def __init__(self, st_instr_id, ed_instr_id, func_instr_id):
super(BasicBlock, self).__init__()
self.name = st_instr_id
self.st_instr_id = st_instr_id
self.ed_instr_id = ed_instr_id
self.func_instr_id = func_instr_id
class CFG(object):
'''
Abbreviations:
- bb: basic block
- bbn: basic block name
'''
def __init__(self):
super(CFG, self).__init__()
self.bbns_of_func = collections.defaultdict(set)
self.bbs = dict()
self.edges = collections.defaultdict(list)
self.predecessors = collections.defaultdict(set)
self.successors = collections.defaultdict(set)
def add_basic_block(self, bb):
self.bbs[bb.name] = bb
self.bbns_of_func[bb.func_instr_id].add(bb.name)
return self
def add_edge(self, src_bb, dst_bb):
'''
src_bb and dst_bb can be strings, which enables adding edges before relevant bbs are created!
'''
src_bbn = src_bb if not isinstance(src_bb, BasicBlock) else src_bb.name
dst_bbn = dst_bb if not isinstance(dst_bb, BasicBlock) else dst_bb.name
# if self.bbs[src_bbn].func_instr_id != self.bbs[dst_bbn].func_instr_id:
# return self
self.edges[src_bbn].append(dst_bbn)
self.predecessors[dst_bbn].add(src_bbn)
self.successors[src_bbn].add(dst_bbn)
return self
def _sort(self):
for func in self.bbns_of_func.keys():
self.bbns_of_func[func] = sorted(self.bbns_of_func[func])
for src_bbn in self.edges.keys():
self.edges[src_bbn] = sorted(self.edges[src_bbn])
return self
def display(self):
self._sort()
for func in sorted(self.bbns_of_func.keys()):
bbns = self.bbns_of_func[func]
print 'Function: %s\nBasic blocks: %s\nCFG:' % (func, ' '.join(map(str, bbns)))
for src_bbn in bbns:
dst_bbns = self.edges[src_bbn]
print '%s ->%s' % (src_bbn, '' if len(dst_bbns) == 0 else (' ' + ' '.join(map(str, dst_bbns))))
return self
def _strongconnect(self, bbn):
self.lowlink[bbn] = self.Tarjan_cnt
self.index[bbn] = self.Tarjan_cnt
self.Tarjan_cnt += 1
self.bbns_stack.append(bbn)
for successor in self.successors[bbn]:
if successor not in self.index:
self._strongconnect(successor)
self.lowlink[bbn] = min(self.lowlink[bbn], self.lowlink[successor])
if self.index[bbn] == self.lowlink[bbn]:
while True:
w = self.bbns_stack.pop()
self.scrs[bbn].add(w)
if w == bbn:
break
def _Tarjan(self):
'''
Tarjan Algorithm
'''
self.scrs = collections.defaultdict(set)
self.bbns_stack = list()
self.lowlink = dict()
self.index = dict()
self.Tarjan_cnt = 0
for bbn in self.bbs:
if bbn not in self.index:
self._strongconnect(bbn)
for k in self.scrs:
self.scrs[k] = sorted(self.scrs[k])
def SCR_analysis(self):
'''
Perform Strongly Connected Region (SCR) analysis.
Given a CFG G = (N, E, h), an SCR is a nonempty set of nodes S \Subseteq N,
for which, given any q, r, \in S, there exists a path from q to r and from r to q.
'''
self._Tarjan()
def _calc_heads_tails_of_func(self):
self.heads_of_func = collections.defaultdict(set)
self.tails_of_func = collections.defaultdict(set)
for func, bbns in self.bbns_of_func.items():
for bbn in bbns:
if len(self.predecessors[bbn]) == 0:
self.heads_of_func[func].add(bbn)
if len(self.successors[bbn]) == 0:
self.tails_of_func[func].add(bbn)
def construct(self):
self._calc_heads_tails_of_func()
|
1668256
|
class A:
def spam(self):
print('A.spam')
class B(A):
def spam(self):
print('B.spam')
super().spam() # Call parent spam()
if __name__ == '__main__':
b = B()
b.spam()
|
1668262
|
from heybooster.helpers.database.mongodb import MongoDBHelper
NAME = "database_name"
URI = "database_uri"
"""
Usage 'with'
"""
with MongoDBHelper(uri=URI, database=NAME) as db:
result = db.find_one('test_collection', query={'email': '<EMAIL>'})
result = db.find('test_collection', query={'email': '<EMAIL>'})
db.insert('test_collection', query={'email': '<EMAIL>'})
db.insert('test_collection', query={'email': '<EMAIL>'})
db.find_and_modify('test_collection', query={'email': '<EMAIL>'}, update={"$set": '<EMAIL>'})
"""
Usage for connection manual closing
"""
db = MongoDBHelper(uri=URI, database=NAME)
result = db.find_one('test_collection', query={'email': '<EMAIL>'})
db.close()
print(result)
|
1668281
|
import pymortar
import os
import pandas as pd
def _query_and_qualify(sensor):
"""
Build query to return zone air temperature measurements and qualify
which site can run this application
Parameters
----------
sensor : sensor name type to evaluate e.g. Zone_Air_Temperature
Returns
-------
qualify_resp : Mortar QualifyResponse object
query : dictionary with query and sensor
"""
# connect to client
client = pymortar.Client()
# initialize container for query information
query = dict()
# define queries for input sensors and setpoints
sensor_query = """SELECT ?sensor WHERE {{
?sensor rdf:type/rdfs:subClassOf* brick:{0}_Sensor .
?sensor bf:isPointOf ?equip .
}};""".format(sensor)
setpoint_query = """SELECT ?sp ?equip WHERE {{
?sp rdf:type/rdfs:subClassOf* brick:{0}_Setpoint .
?sp bf:isPointOf ?equip .
}};""".format(sensor)
# find sites with input sensors and setpoints
qualify_resp = client.qualify([sensor_query, setpoint_query])
if qualify_resp.error != "":
print("ERROR: ", qualify_resp.error)
os.exit(1)
# save queries and sensor information
query['query'] = dict()
query['query']['sensor'] = sensor_query
query['query']['setpoint'] = setpoint_query
query['sensor'] = sensor
print("running on {0} sites".format(len(qualify_resp.sites)))
print(qualify_resp.sites)
return qualify_resp, query
def _fetch(qualify_resp, query, eval_start_time, eval_end_time, window=15):
"""
Build the fetch query and define the thermal comfort evaluation time.
Parameters
----------
qualify_resp : Mortar QualifyResponse object
query : dictionary with query and sensor
eval_start_time : start date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
comfort evaluation period
eval_end_time : end date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
comfort evaluation period
window : aggregation window in minutes to average the measurement data
Returns
-------
fetch_resp : Mortar FetchResponse object
"""
sensor = query['sensor']
sensor_query = query['query']['sensor']
setpoint_query = query['query']['setpoint']
# build the fetch request
request = pymortar.FetchRequest(
sites=qualify_resp.sites,
views=[
pymortar.View(
name="{}_sensors".format(sensor),
definition=sensor_query,
),
pymortar.View(
name="{}_sps".format(sensor),
definition=setpoint_query,
)
],
dataFrames=[
pymortar.DataFrame(
name="sensors",
aggregation=pymortar.MEAN,
window="{}m".format(window),
timeseries=[
pymortar.Timeseries(
view="{}_sensors".format(sensor),
dataVars=["?sensor"],
)
]
),
pymortar.DataFrame(
name="setpoints",
aggregation=pymortar.MEAN,
window="{}m".format(window),
timeseries=[
pymortar.Timeseries(
view="{}_sps".format(sensor),
dataVars=["?sp"],
)
]
)
],
time=pymortar.TimeParams(
start=eval_start_time,
end=eval_end_time,
)
)
# call the fetch api
client = pymortar.Client()
fetch_resp = client.fetch(request)
print(fetch_resp)
return fetch_resp
def _clean(sensor, fetch_resp):
"""
Clean data by deleting streams with zero values.
Parameters
----------
sensor : sensor name type to evaluate e.g. Zone_Air_Temperature
fetch_resp : Mortar FetchResponse object
Returns
-------
sensor_df : dataframe of nonzero sensor measurements
setpoint_df : dataframe of setpoint values
equipment : equipment related to the sensor measurement
"""
# get all the equipment we will run the analysis for. Equipment relates sensors and setpoints
equipment = [r[0] for r in fetch_resp.query("select distinct equip from {}_sensors".format(sensor))]
# find sensor measurements that aren't just all zeros
valid_sensor_cols = (fetch_resp['sensors'] > 0).any().where(lambda x: x).dropna().index
sensor_df = fetch_resp['sensors'][valid_sensor_cols]
setpoint_df = fetch_resp['setpoints']
return sensor_df, setpoint_df, equipment
def _analyze(query, fetch_resp, th_type='abs', th_diff=0.25, th_time=15, window=15):
"""
Parameters
----------
query : dictionary with query and sensor
fetch_resp : Mortar FetchResponse object
th_type : Type of comparison performed when evaluating sensor measurement against the setpoint value.
Available options are (any input value within list is valid):
['under', 'u', '-', 'neg', '<'] = return sensors that are under setpoint by th_diff for th_time
['over', 'o', '+', 'pos', '>'] = return sensors that are over setpoint by th_diff for th_time
['outbound', 'outbounds', 'ob', '><'] = return sensors that are either under minimum setpoint value by th_diff
or over maximum setpoint value by th_diff for th_time
['bounded', 'inbounds','inbound', 'ib', '<>'] = return sensors that are within minimum setpoint value + th_diff
and maximum setpoint value - th_diff
['abs', ''] (default type) = return sensors that are +/- th_diff of setpoint value.
th_diff: threshold allowance for determining if sensor measurement is not adhereing to setpoint
in the same units of selected sensor e.g. if 'over' is selected for th_type and 2 for
th_diff then 'bad sensors' will return whenever sensor measurement is setpoint + 2.
th_time : Amount of time in minutes that a sensor measurment needs to meet the selected criteria in order to qualify as 'bad'.
Must be greater or equal and a multiple of the data aggregation window.
window : aggregation window in minutes that the data from sensors and setpoint are in
Returns
-------
None
The app produces a CSV file called `<sensor>_measure_vs_setpoint_<type of analysis>.csv` when run
where '<sensor>' states the sensor type and '<analysis>' states the type of analysis performed.
"""
sensor = query['sensor']
sensor_df, setpoint_df, equipment = _clean(sensor, fetch_resp)
records = []
for idx, equip in enumerate(equipment):
# for each equipment, pull the UUID for the sensor and setpoint
q = """
SELECT sensor_uuid, sp_uuid, {1}_sps.equip, {1}_sps.site
FROM {1}_sensors
LEFT JOIN {1}_sps
ON {1}_sps.equip = {1}_sensors.equip
WHERE {1}_sensors.equip = "{0}";
""".format(equip, sensor)
res = fetch_resp.query(q)
if len(res) == 0:
continue
sensor_col = res[0][0]
setpoint_col = res[0][1]
if sensor_col is None or setpoint_col is None:
continue
if sensor_col not in sensor_df:
print('no sensor', sensor_col)
continue
if setpoint_col not in setpoint_df:
print('no sp', setpoint_col)
continue
# create the dataframe for this pair of sensor and setpoint
df = pd.DataFrame([sensor_df[sensor_col], setpoint_df[setpoint_col]]).T
df.columns = ["{}_sensors".format(sensor), "{}_sps".format(sensor)]
if th_type in ['under', 'u', '-', 'neg', '<']: # if measurement is under sp by th_diff
bad = (df["{}_sensors".format(sensor)]) < (df["{}_sps".format(sensor)] - th_diff)
str_th_type = 'Undershooting'
elif th_type in ['over', 'o', '+', 'pos', '>']: # if measurement is over sp by th_diff
bad = (df["{}_sensors".format(sensor)]) > (df["{}_sps".format(sensor)] + th_diff)
str_th_type = 'Overshooting'
elif th_type in ['outbound', 'outbounds', 'ob', '><']: # if measurement is either below min sp or above max sp by th_diff
max_sp = df["{}_sps".format(sensor)].max()
min_sp = df["{}_sps".format(sensor)].min()
bad_max = (df["{}_sensors".format(sensor)]) > (max_sp + th_diff)
bad_min = (df["{}_sensors".format(sensor)]) < (min_sp - th_diff)
bad = pd.DataFrame([bad_min, bad_max]).all()
str_th_type = 'Exceedance_of_min-max'
elif th_type in ['bounded', 'inbounds','inbound', 'ib', '<>']: # if measurement is either within min and max sp by th_diff
max_sp = df["{}_sps".format(sensor)].max()
min_sp = df["{}_sps".format(sensor)].min()
bad_max = (df["{}_sensors".format(sensor)]) < (max_sp - th_diff)
bad_min = (df["{}_sensors".format(sensor)]) > (min_sp + th_diff)
bad = pd.DataFrame([bad_min, bad_max]).all()
str_th_type = 'Within_min-max'
else:
bad = abs(df["{}_sensors".format(sensor)] - df["{}_sps".format(sensor)]) > th_diff
str_th_type = 'Not_within_setpoint'
if len(df[bad]) == 0: continue
df['same'] = bad.astype(int).diff(1).cumsum()
# this increments every time we get a new run of sensor being below the setpoint
# use this to group up those ranges
df['same2'] = bad.astype(int).diff().ne(0).cumsum()
lal = df[bad].groupby('same2')['same']
# grouped by ranges that meet the predicate (df.sensor + th_diff < df.setpoint)
for g in lal.groups:
idx = list(lal.groups[g])
if len(idx) < 2: continue
data = df[idx[0]:idx[-1]]
if len(data) >= (60/th_time): # multiply by window frame to get hours
fmt = {
'site': res[0][3],
'equipment': equip,
'hours': len(data) / (60/window),
'start': idx[0],
'end': idx[-1],
'sensor_val': (data["{}_sps".format(sensor)]).mean(),
'setpoint_val': (data["{}_sensors".format(sensor)]).mean(),
'diff': (data["{}_sps".format(sensor)] - data["{}_sensors".format(sensor)]).mean(),
}
records.append(fmt)
print("{str_th_type} {sensor} for {hours} hours From {start} to {end}, avg diff {diff:.2f}".format(**fmt,
sensor=sensor,
str_th_type=str_th_type))
r = pd.DataFrame(records)
print('##### Saving Results #####')
r.to_csv('{}_measure_vs_setpoint_{}.csv'.format(sensor, str_th_type), index=False)
def evaluate_sensors(sensor, eval_start_time, eval_end_time, th_type, th_diff, th_time, window):
"""
Compare sensor measurements against their respective setpoint values
Parameters
----------
sensor : sensor name type to evaluate e.g. Zone_Air_Temperature
eval_start_time : start date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
comfort evaluation period
eval_end_time : end date and time in format (yyyy-mm-ddTHH:MM:SSZ) for the thermal
comfort evaluation period
th_type : Type of comparison performed when evaluating sensor measurement against the setpoint value.
Available options are (any input value within list is valid):
['under', 'u', '-', 'neg', '<'] = return sensors that are under setpoint by th_diff for th_time
['over', 'o', '+', 'pos', '>'] = return sensors that are over setpoint by th_diff for th_time
['outbound', 'outbounds', 'ob', '><'] = return sensors that are either under minimum setpoint value by th_diff
or over maximum setpoint value by th_diff for th_time
['bounded', 'inbounds','inbound', 'ib', '<>'] = return sensors that are within minimum setpoint value + th_diff
and maximum setpoint value - th_diff
['abs', ''] (default type) = return sensors that are +/- th_diff of setpoint value.
th_diff: threshold allowance for determining if sensor measurement is not adhereing to setpoint
in the same units of selected sensor e.g. if 'over' is selected for th_type and 2 for
th_diff then 'bad sensors' will return whenever sensor measurement is setpoint + 2.
th_time : Amount of time in minutes that a sensor measurment needs to meet the selected criteria in order to qualify as 'bad'.
Must be greater or equal and a multiple of the data aggregation window.
window : aggregation window in minutes that the data from sensors and setpoint are in
Returns
-------
None
The app produces a CSV file called `<sensor>_measure_vs_setpoint_<type of analysis>.csv` when run
where '<sensor>' states the sensor type and '<analysis>' states the type of analysis performed.
Returns
-------
"""
# build query and determine which sites have the point to do this analysis
qualify_resp, query = _query_and_qualify(sensor)
# find sites with these sensors and setpoints or else exit
if qualify_resp.error != "":
print("ERROR: ", qualify_resp.error)
os.exit(1)
# build the request to fetch data for qualified sites
fetch_resp = _fetch(qualify_resp, query, eval_start_time, eval_end_time, window)
# analyze and print out measurements/sensors that are not meeting its setpoints
_analyze(query, fetch_resp, th_type, th_diff, th_time, window)
print('##### App has finish evaluating sensors #####')
if __name__ == '__main__':
# define input values
sensor = "Zone_Air_Temperature"
eval_start_time = "2018-03-01T00:00:00Z"
eval_end_time = "2018-07-31T00:00:00Z"
th_diff = 2
th_time = 30
th_type = 'abs'
window = 15
# Run the app
evaluate_sensors(sensor, eval_start_time, eval_end_time, th_type, th_diff, th_time, window)
|
1668296
|
import unittest
import json
from discord_webhooks import DiscordWebhooks
class BaseTest(unittest.TestCase):
def test_standard_message(self):
"""
Tests a standard messgae payload with nothing but content.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma')
expected_payload = {
'content': 'Montezuma',
'embeds': [
{
'fields': [],
'image': {},
'author': {},
'thumbnail': {},
'footer': {},
}
]
}
self.assertEqual(webhook.format_payload(), expected_payload)
def test_generic_embed_message(self):
"""
Tests a generic message payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma', title='Best Cat Ever', description='Seriously', \
url='http://github.com/JamesIves', color=0xF58CBA, timestamp='2018-11-09T04:10:42.039Z')
expected_payload = \
{
'content': 'Montezuma',
'embeds': [
{
'title': 'Best Cat Ever',
'description': 'Seriously',
'url': 'http://github.com/JamesIves',
'color': 16092346,
'timestamp': '2018-11-09T04:10:42.039Z',
'fields': [],
'image': {},
'author': {},
'thumbnail': {},
'footer': {},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_set_image(self):
"""
Tests the set_image method and ensures the data gets added to the payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma')
webhook.set_image(url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
expected_payload = \
{
'content': 'Montezuma',
'embeds': [
{
'fields': [],
'image': {
'url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'author': {},
'thumbnail': {},
'footer': {},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_set_thumbnail(self):
"""
Tests the set_thumbnail method and ensures the data gets added to the payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma')
webhook.set_thumbnail(url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
expected_payload = \
{
'content': 'Montezuma',
'embeds': [
{
'fields': [],
'image': {},
'author': {},
'thumbnail': {
'url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'footer': {},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_set_author(self):
"""
Tests the set_author method and ensures the data gets added to the payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma')
webhook.set_author(name='<NAME>', url='https://jamesiv.es', icon_url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
expected_payload = \
{
'content': 'Montezuma',
'embeds': [
{
'fields': [],
'image': {},
'author': {
'name': '<NAME>',
'url': 'https://jamesiv.es',
'icon_url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'thumbnail': {},
'footer': {},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_set_footer(self):
"""
Tests the set_footer method and ensures the data gets added to the payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_footer(text='Footer', icon_url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
expected_payload = \
{
'embeds': [
{
'fields': [],
'image': {},
'author': {},
'thumbnail': {},
'footer': {
'text': 'Footer',
'icon_url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_add_field(self):
"""
Tests the set_field method and ensures the data gets added to the payload.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.add_field(name='Field1', value='Value1', inline=True)
webhook.add_field(name='Field2', value='Value2', inline=True)
webhook.add_field(name='Field3', value='Value3', inline=False)
# Inline should default to false
webhook.add_field(name='Field4', value='Value4')
expected_payload = \
{
'embeds': [
{
'fields': [
{
'name': 'Field1',
'value': 'Value1',
'inline': True
},
{
'name': 'Field2',
'value': 'Value2',
'inline': True
},
{
'name': 'Field3',
'value': 'Value3',
'inline': False
},
{
'name': 'Field4',
'value': 'Value4',
'inline': False
},
],
'image': {},
'author': {},
'thumbnail': {},
'footer': {},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
def test_complex_embed(self):
"""
Tests a combination of all methods to form a complex payload object.
"""
webhook = DiscordWebhooks('webhook_url')
webhook.set_content(content='Montezuma', title='Best Cat Ever', description='Seriously', \
url='http://github.com/JamesIves', color=0xF58CBA, timestamp='2018-11-09T04:10:42.039Z')
webhook.set_image(url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
webhook.set_thumbnail(url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
webhook.set_author(name='<NAME>', url='https://jamesiv.es', icon_url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
webhook.set_footer(text='Footer', icon_url='https://avatars1.githubusercontent.com/u/10888441?s=460&v=4')
webhook.add_field(name='Field', value='Value!')
self.maxDiff = None
expected_payload = \
{
'content': 'Montezuma',
'embeds': [
{
'title': 'Best Cat Ever',
'description': 'Seriously',
'url': 'http://github.com/JamesIves',
'color': 16092346,
'timestamp': '2018-11-09T04:10:42.039Z',
'fields': [
{
'name': 'Field',
'value': 'Value!',
'inline': False
}
],
'image': {
'url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'author': {
'name': '<NAME>',
'url': 'https://jamesiv.es',
'icon_url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'thumbnail': {
'url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
'footer': {
'text': 'Footer',
'icon_url': 'https://avatars1.githubusercontent.com/u/10888441?s=460&v=4'
},
}
]
}
self.assertEquals(webhook.format_payload(), expected_payload)
if __name__ == '__main__':
unittest.main()
|
1668312
|
import re
import os
from pathlib import Path
import gdgen
from gdgen import common
from gdgen import methods
from gdgen import gdtypes
class TemplateWriter:
src = ''
dest = ''
def __init__(self, src, dest):
self.src = src
self.dest = dest
def write_out(self, template={}):
with open(self.src, 'r') as src_template:
text = src_template.read()
for placeholder, value in template.items():
text = text.replace(placeholder, value)
with open(self.dest, 'w') as dest_file:
dest_file.write(text)
class FileWriter:
dest = ''
def __init__(self, dest, enc="utf-8"):
self.dest = dest
self.f = open(dest, "w", encoding=enc)
def write_line(self, line="", ident_count=0):
self.f.write("\t" * ident_count + line + "\n")
def close(self):
self.f.close()
def configure(module):
gdtypes.update(module)
def make_config(module):
config_dest = os.path.join(module.path, "config.py")
config = FileWriter(config_dest)
config.write_line()
ver = module.get_engine_version(False)
if ver['major'] >= 3:
# 3.0 vs 3.1: https://github.com/godotengine/godot/pull/19275
if ver['minor'] >= 1:
config.write_line("def can_build(env, platform):")
else:
config.write_line("def can_build(env):")
config.write_line("return True", 1)
config.write_line()
config.write_line("def configure(env):")
config.write_line("pass", 1)
config.write_line()
if module.get_docs_path():
config.write_line("def get_doc_path():")
config.write_line("return \"" + module.get_docs_path() + "\"", 1)
config.write_line()
config.write_line("def get_doc_classes():")
config.write_line("return [", 1)
for c in module.get_classes():
name = c['name']
if not name:
name = module.get_default_class_name()
config.write_line('\"' + name + '\"' + ',', 2)
config.write_line("]", 1)
config.write_line()
if module.get_icons_path():
config.write_line("def get_icons_path():")
config.write_line("return \"" + module.get_icons_path() + "\"", 1)
config.write_line()
config.close()
def make_readme(module):
readme_dest = os.path.join(module.path, "README.md")
readme = FileWriter(readme_dest)
readme.write_line("#" + " " + module.get_name())
readme.write_line()
ver = module.get_engine_version()
if ver == common.engine_latest_version:
ver = "latest"
readme.write_line("## Installation")
readme.write_line()
readme.write_line("Before installing, you must be able to")
readme.write_line("[compile Godot Engine](https://docs.godotengine.org/en/" + ver + "/development/compiling/)")
readme.write_line("from source.")
readme.write_line()
readme.write_line("```bash")
readme.write_line("# Copy the module under directory named " + module.get_internal_name() + " (must be exactly that)")
readme.write_line("cp " + module.get_internal_name() + " <godot_path>/modules/" + module.get_internal_name() + " && cd <godot_path>")
readme.write_line("# Compile the engine manually, for instance:")
readme.write_line("scons platform=linux target=release_debug bits=64")
readme.write_line("```")
readme.close()
def make_license(module, author):
license_src = os.path.join(gdgen.get_path(), common.licenses_path, module.get_license()) + ".txt"
license_dest = os.path.join(module.path, "LICENSE.txt")
import datetime
license_template = {
"__YEAR__" : str(datetime.datetime.now().year),
"__AUTHOR__" : author,
}
license_text = TemplateWriter(license_src, license_dest)
license_text.write_out(license_template)
def make_register_types(module):
# Header
reg_types_header_dest = os.path.join(module.path, "register_types.h")
header = FileWriter(reg_types_header_dest)
header.write_line("void register_" + module.get_internal_name() + "_types();")
header.write_line("void unregister_" + module.get_internal_name() + "_types();")
header.close()
# Source
reg_types_source_dest = os.path.join(module.path, "register_types.cpp")
source = FileWriter(reg_types_source_dest)
source.write_line("#include \"register_types.h\"")
source.write_line()
for c in module.get_classes():
name = methods.to_snake_case(c['name'])
if not name:
name = module.get_default_class_underscore_name()
source.write_line("#include " + '\"' + name + ".h" + '\"')
source.write_line()
source.write_line("void register_" + module.get_internal_name() + "_types() {")
source.write_line()
for c in module.get_classes():
name = c['name']
if not name:
name = module.get_default_class_name()
source.write_line("ClassDB::register_class<" + name + ">();", 1)
source.write_line("}")
source.write_line()
source.write_line("void unregister_" + module.get_internal_name() + "_types() {")
source.write_line()
source.write_line("// nothing to do here", 1)
source.write_line("}")
source.close()
def make_scsub(module):
scsub_dest = os.path.join(module.path, "SCsub")
scsub = FileWriter(scsub_dest)
scsub.write_line("#!/usr/bin/env python")
scsub.write_line()
scsub.write_line("Import('env')")
scsub.write_line("Import('env_modules')")
scsub.write_line()
env_module = "env_" + module.get_internal_name()
scsub.write_line(env_module + " = env_modules.Clone()")
scsub.write_line()
if module.get_thirdparty_path():
scsub.write_line("# Thirdparty source files")
scsub.write_line("thirdparty_dir = '" + module.get_thirdparty_path() + "/'")
scsub.write_line("thirdparty_sources = []")
scsub.write_line("thirdparty_sources += Glob(thirdparty_dir + '**/*.cpp')")
scsub.write_line("thirdparty_sources += Glob(thirdparty_dir + '**/*.c')")
scsub.write_line()
scsub.write_line(env_module + ".Prepend(CPPPATH=[thirdparty_dir])")
scsub.write_line()
scsub.write_line("env_thirdparty = " + env_module + ".Clone()")
scsub.write_line("env_thirdparty.add_source_files(env.modules_sources, thirdparty_sources)")
scsub.write_line("env_thirdparty.disable_warnings()")
scsub.write_line()
ver = module.get_engine_version(False)
cpp_ver = module.get_cpp_version()
if ver['major'] >= 3:
if ver['minor'] >= 2 and cpp_ver == "c++11":
pass # 3.2+ enables C++11 on the whole codebase by default
# https://github.com/godotengine/godot/commit/5dae2ea777da5395cf1b1e9a8bc6abc93f6ae6bb
else:
scsub.write_line("if (not env.msvc):")
scsub.write_line(env_module + ".Prepend(CXXFLAGS=['-std=" + cpp_ver + "'])", 1)
scsub.write_line()
scsub.write_line("# Module source files")
scsub.write_line("source_dirs = [")
for path in module.get_source_dirs():
if not path:
continue
scsub.write_line("\"" + path + "/" + "\"" + ",", 1)
scsub.write_line("]")
scsub.write_line(env_module + ".Prepend(CPPPATH=source_dirs)")
scsub.write_line("sources = [Glob(d + \"*.cpp\") for d in source_dirs]")
scsub.write_line()
scsub.write_line(env_module + ".add_source_files(env.modules_sources, sources)")
scsub.close()
def make_classes(module):
already_got_default = False
for c_data in module.get_classes():
name = c_data['name']
if not name: # get default
if already_got_default:
continue
class_name = module.get_default_class_name()
underscore_name = module.get_internal_name()
already_got_default = True
else:
class_name = name
underscore_name = methods.to_snake_case(class_name)
inherits = c_data['inherits']
class_dir = os.path.join(module.path, c_data['path'])
if not os.path.exists(class_dir):
os.makedirs(class_dir)
# Header
header_dest = os.path.join(class_dir, underscore_name + '.h')
write_class_header(header_dest, class_name, underscore_name, inherits)
# Source
source_dest = os.path.join(class_dir, underscore_name + '.cpp')
write_class_source(source_dest, class_name, underscore_name, inherits)
def write_class_header(header_dest, name, underscore_name, inherits):
header = FileWriter(header_dest)
HEADER_GUARD = underscore_name.upper()
header.write_line("#ifndef " + HEADER_GUARD + "_H")
header.write_line("#define " + HEADER_GUARD + "_H")
header.write_line()
header.write_line(gdtypes.get_include(inherits))
header.write_line()
header.write_line("class " + name + " : " + "public " + inherits + " {")
header.write_line("GDCLASS(" + name + ", " + inherits + ");", 1)
header.write_line()
header.write_line("protected:")
header.write_line("static void _bind_methods();", 1)
header.write_line("};")
header.write_line()
header.write_line("#endif " + "// " + HEADER_GUARD + "_H")
header.close()
def write_class_source(source_dest, name, underscore_name, inherits):
source = FileWriter(source_dest)
source.write_line("#include " + '\"' + underscore_name + ".h" + '\"')
source.write_line()
source.write_line("void " + name + "::_bind_methods() {")
source.write_line()
source.write_line("}")
source.close()
def make_gdignore(module):
gdignore_dest = os.path.join(module.path, ".gdignore")
Path(gdignore_dest).touch()
|
1668342
|
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# vectors normal to faces
a_perp = np.cross(self.vectors[1], self.vectors[2])
b_perp = np.cross(self.vectors[2], self.vectors[0])
c_perp = np.cross(self.vectors[0], self.vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
# dimensions of the final supercell (translations)
trans_count = np.array([0, 0, 0])
# lattice vectors of the quadrant supercell
supercell_vectors = np.zeros((3,3))
# distances from origin to each face
distances = np.array([0.0, 0.0, 0.0])
# loop over lattice vectors
for comp in range(3):
while distances[comp] <= clust_rad:
trans_count[comp] += 1
supercell_vectors[comp] = trans_count[comp] * self.vectors[comp]
distances[comp] = np.dot(supercell_vectors[comp], perp[comp])
return trans_count
def supercell_for_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Make a supercell which will be used to make a cluster
Parameters
----------
clust_rad : float
The radius of the cluster
mode : str
'exc' is for exclusive clusters, whereas 'inc' is for inclusive clusters
which will need an extra layer of unit cells
central_mol : Mol
Molecule which serves as a center for the cluster (optional)
Returns
-------
out_supercell : Mol
The supercell from which the cluster will be taken
"""
# if there is a central mol, account for nearest neighbour molecules
# bleeding out of the original radius
if central_mol:
central_rad = 0
for atom in central_mol:
dis = atom.v_dist([0, 0, 0])
if dis < central_rad:
central_rad = dis
# get the translations of the unit cell necessary to enclose the required mols
trans = self.trans_from_rad(clust_rad + central_rad)
else:
trans = self.trans_from_rad(clust_rad)
# if the cluster is inclusive, then extra mols might be required from
# an additional layer of the supercell
if mode == 'inc':
trans += np.array([1, 1, 1]) # one buffer cell layer
# make a supercell which includes the desired cluster
out_supercell = self.centered_supercell(trans, from_origin=True)
return out_supercell
def gen_exclusive_clust(self, seed_atoms):
"""
Remove all non complete molecules
This only works if the input contains at least one full molecule
Parameters
----------
seed_atoms : Mol
Aggregate of molecules, not necessarily all complete
Returns
-------
out_clust : Mol
The full molecules of seed_atoms
"""
import fromage.utils.mol as mol_init
max_mol_len = 0
while len(seed_atoms) > 0:
# pick out a molecule from the seed atoms
mol = seed_atoms.select(0)
# if the molecule is the biggest so far
if len(mol) > max_mol_len:
# molecules are supposed to be this long now
max_mol_len = len(mol)
out_clust = mol_init.Mol([])
# if the molecule is the right size
if len(mol) == max_mol_len:
# include this molecule
out_clust += mol
# discard the molecule from seed atoms
for atom in mol:
seed_atoms.remove(atom)
return out_clust
def gen_inclusive_clust(self, seed_atoms, supercell):
"""
Select all complete molecules of supercell which contain seed atoms
This only works if the input contains at least one full molecule
Parameters
----------
seed_atoms : Mol
Aggregate of molecules, not necessarily all complete
supercell : Mol
Supercell which contains all seed atoms. It should have at least one
buffer layer of unit cells around the seed atoms
Returns
-------
out_clust : Mol
The full molecules of supercell which contain seed atoms
"""
import fromage.utils.mol as mol_init
max_mol_len = 0
out_clust = mol_init.Mol([])
# here, the molecule with the atom seed_atoms[0] is necessarily complete
# in supercell
max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))
while len(seed_atoms) > 0:
# the part of the mol detected in seed_atoms
mol_tmp = seed_atoms.select(0)
if len(mol_tmp) < max_mol_len:
# The whole mol, which could potentially include even more
# seed_atoms
mol = supercell.select(supercell.index(seed_atoms[0]))
else:
mol = mol_tmp
out_clust += mol
for atom in mol_tmp:
seed_atoms.remove(atom)
for atom in mol:
supercell.remove(atom)
# remove all atoms of the mol which are part of seed_atoms
try:
seed_atoms.remove(atom)
except ValueError:
pass
return out_clust
def make_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Generate a cluster of molecules from a primitive cell
This first makes a supercell of the correct size which will contain with
one additional buffer shell. Then the sphere is generated from this new
supercell by connectivity.
A central molecule can also be supplied which will turn the spheres
defining the clusters into the union of spheres stemming from each atom
of the central molecule.
This algorithm is designed for crystals where one same molecule does not
extend into more than two unit cells in the case of inclusive clusters.
Parameters
----------
clust_rad : float
Radius for generating the cluster. If no central molecule is specified,
this will generate seed atoms in a sphere from the radius
mode : str
Switches between inclusive and exclusive selecting. Inclusive,
'inc', selects all molecules which have atoms within the radius.
Exclusive, 'exc', selects all molecules fully in the radius.
Default: 'exc'
central_mol : Mol
If this is supplied, the central molecule will act as a kernel for the
cluster which will end up being of the appropriate shape. (optional)
Returns
-------
cluster : Mol object
Cluster of molecules from their crystal positions
"""
import fromage.utils.mol as mol_init
# generate a supercell which will include the cluster.
# inclusive clusters will have an extra layer of supercell.
# if a central mol is supplied, the supercell will include the whole
# molecule and the supplied radius.
supercell = self.supercell_for_cluster(clust_rad, mode=mode, central_mol=central_mol)
# seed_atoms will initialise the cluster
seed_atoms = mol_init.Mol([])
# conserve the bonding properties of the original cell
seed_atoms.bonding = supercell.bonding
seed_atoms.thresh = supercell.thresh
# get seed atoms in the shape of the central mol if pertinent
if central_mol:
for atom_i in supercell:
for atom_j in central_mol:
if atom_i.dist(atom_j) < clust_rad:
seed_atoms.append(atom_i)
break
# get spherical seedatoms otherwise
else:
for atom in supercell:
if atom.v_dist([0, 0, 0]) < clust_rad:
seed_atoms.append(atom)
# remove incomplete molecules
if mode == 'exc':
clust_atoms = self.gen_exclusive_clust(seed_atoms)
# complete incomplete molecules
elif mode == 'inc':
clust_atoms = self.gen_inclusive_clust(seed_atoms, supercell)
else:
raise ValueError("Invalid cluster generation mode. Use 'exc' or 'inc'")
return clust_atoms
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
def confined(self):
"""Move all atoms to fit inside the primitive cell"""
frac_mol = self.dir_to_frac_pos()
out_mol = frac_mol.frac_to_dir_pos()
return out_mol
|
1668350
|
import setuptools
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="knockpy",
version=get_version('knockpy/__init__.py'),
author="<NAME>",
author_email="<EMAIL>",
description="Knockoffs for variable selection",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/amspector100/knockpy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"numpy>=1.17.4",
"scipy>=1.5.2",
"cvxpy>=1.0.25",
"scikit_learn>=0.22",
"networkx>=2.4",
"tqdm>=4.36.1",
"group_lasso",
"pyglmnet"
],
extras_require={
"kpytorch":["torch>=1.4.0"],
"fast":["cython>=0.29.14", "choldate", "scikit-dsdp"]
}
)
|
1668365
|
import os, sys, json, re, uuid, time
import logging, argparse
import requests
logging.basicConfig(
filename='publish_to_marketplace.log',
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
headers = {'content-type': 'application/json', 'Accept': 'application/json'}
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def help_parser():
parser = argparse.ArgumentParser(
description='Standard Arguments for talking to vCenter or ESXi')
parser.add_argument('--pc',
required=True,
action='store',
help='vSphere service to connect to')
parser.add_argument('--port',
type=int,
default=9440,
action='store',
help='Port to connect on')
parser.add_argument('--user',
required=True,
action='store',
help='User name to use when connecting to pc')
parser.add_argument('--password',
required=True,
action='store',
help='Password to use when connecting to pc')
parser.add_argument('--blueprint_name',
required=True,
action='store',
help='Blueprint name to be published')
parser.add_argument('-v', '--version',
required=True,
action='store',
help='Marketplace app version')
parser.add_argument('-n', '--name',
required=True,
action='store',
help='Marketplace app Name')
parser.add_argument('-p', '--project',
required=True,
action='store',
help='Projects for marketplace blueprint (used for approving blueprint)')
parser.add_argument('-i', '--icon',
required=True,
action='store',
help='Marketplace app Icon')
parser.add_argument('-d', '--description',
required=True,
action='store',
help='Marketplace app description')
parser.add_argument("--with_secrets", type=str2bool, nargs='?',
const=True, default=False,
help="Publish with secrets")
parser.add_argument("--publish_to_marketplace", type=str2bool, nargs='?',
const=True, default=False,
help="Publish to Marketplace")
parser.add_argument("--auto_approve", type=str2bool, nargs='?',
const=True, default=False,
help="Approve from Marketplace manager")
parser.add_argument("--existing_markeplace_bp", type=str2bool, nargs='?',
const=True, default=False,
help="Existing marketplace app")
return parser
### --------------------------------------------------------------------------------- ###
def get_blueprint_uuid(base_url, auth, blueprint_name):
method = 'POST'
url = base_url + "/blueprints/list"
resp = None
blueprint_uuid = ""
payload = {
"length":100,
"offset":0,
"filter":"name=={}".format(blueprint_name)
}
try:
resp = requests.request(
method,
url,
data=json.dumps(payload),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp['metadata']['total_matches'] > 0:
for bp in json_resp['entities']:
if bp["metadata"]["name"] == blueprint_name:
return bp["metadata"]["uuid"]
else:
logging.error("Not able to find blueprint {}".format(blueprint_name))
sys.exit(-1)
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def get_blueprint(base_url, auth, blueprint_uuid):
method = 'GET'
url = base_url + "/blueprints/{}/export_json?keep_secrets=true".format(blueprint_uuid)
resp = None
try:
resp = requests.request(
method,
url,
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
return resp.json()
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def get_icon_uuid(base_url, auth, icon_name):
method = 'POST'
url = base_url + "/app_icons/list"
app_icon_uuid = None
payload = {
"length":100,
"offset":0
}
try:
resp = requests.request(
method,
url,
data=json.dumps(payload),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp['metadata']['total_matches'] > 0:
for icon in json_resp['entities']:
if icon["metadata"]["name"] == icon_name:
app_icon_uuid = icon["metadata"]["uuid"]
else:
logging.error("Not able to find icon {}".format(blueprint_name))
sys.exit(-1)
return app_icon_uuid
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def remove_platform_data(bp_spec):
for substrate in bp_spec["resources"]["substrate_definition_list"]:
if substrate['type'] == "VMWARE_VM":
substrate["create_spec"]["cluster"] = ""
substrate["create_spec"]["storage_pod"] = ""
substrate["create_spec"]["host"] = ""
substrate["create_spec"]["datastore"] = ""
for nic in substrate["create_spec"]["resources"]["nic_list"]:
nic["net_name"] = ""
nic["nic_type"] = ""
return bp_spec
### --------------------------------------------------------------------------------- ###
def publish_bp_to_marketplace_manager(
bp_json,
marketplace_bp_name,
version,
description="",
app_group_uuid=None,
icon_name=None,
icon_file=None,
):
bp_data = bp_json
bp_status = bp_data["status"]["state"]
if bp_status != "ACTIVE":
logging.error("Blueprint is in {} state. Unable to publish it to marketplace manager".format(bp_status))
sys.exit(-1)
bp_template = {
"spec": {
"name": marketplace_bp_name,
"description": description,
"resources": {
"app_attribute_list": ["FEATURED"],
"icon_reference_list": [],
"author": "admin",
"version": version,
"app_group_uuid": app_group_uuid or str(uuid.uuid4()),
"app_blueprint_template": {
"status": bp_data["status"],
"spec": bp_data["spec"],
},
},
},
"api_version": "3.0",
"metadata": {"kind": "marketplace_item"},
}
if icon_name:
app_icon_uuid = get_icon_uuid(base_url, auth, icon_name)
bp_template["spec"]["resources"]["icon_reference_list"] = [
{
"icon_type": "ICON",
"icon_reference": {"kind": "file_item", "uuid": app_icon_uuid},
}
]
method = 'POST'
url = base_url + "/calm_marketplace_items"
resp = None
try:
resp = requests.request(
method,
url,
data=json.dumps(bp_template),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp["spec"]["resources"]["app_state"] != "PENDING":
logging.info("Failed to publish blueprint to Marketplace")
sys.exit(-1)
else:
return json_resp
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
logging.info("Marketplace Blueprint is published to marketplace manager successfully")
### --------------------------------------------------------------------------------- ###
def get_project_uuid(base_url, auth, project_name):
method = 'POST'
url = base_url + "/projects/list"
payload = {
"length":100,
"offset":0,
"filter":"name=={0}".format(project_name)
}
resp = requests.request(
method,
url,
data=json.dumps(payload),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
if resp.ok:
json_resp = resp.json()
if json_resp['metadata']['total_matches'] > 0:
project = json_resp['entities'][0]
project_uuid = project["metadata"]["uuid"]
return project_uuid
else:
logging.error("Could not find project")
sys.exit(-1)
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def approve_marketplace_bp(marketplace_json, projects=[], category=None):
method = 'PUT'
url = base_url + "/calm_marketplace_items/{}".format(marketplace_json["metadata"]["uuid"])
resp = None
marketplace_json["spec"]["resources"]["app_state"] = "ACCEPTED"
marketplace_json["spec"]["resources"]["project_reference_list"] = projects
del marketplace_json["status"]
try:
resp = requests.request(
method,
url,
data=json.dumps(marketplace_json),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp["spec"]["resources"]["app_state"] != "ACCEPTED":
logging.info("Failed to approve Marketplace Application")
sys.exit(-1)
else:
return json_resp
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def publish_marketplace_bp(marketplace_json):
method = 'PUT'
url = base_url + "/calm_marketplace_items/{}".format(marketplace_json["metadata"]["uuid"])
resp = None
marketplace_json["spec"]["resources"]["app_state"] = "PUBLISHED"
marketplace_json["metadata"]["spec_version"] = 1
try:
resp = requests.request(
method,
url,
data=json.dumps(marketplace_json),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp["spec"]["resources"]["app_state"] != "PUBLISHED":
logging.info("Failed to Publish Marketplace Application")
sys.exit(-1)
else:
return json_resp
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def get_app_group_uuid(marketplace_bp_name):
method = 'POST'
url = base_url + "/groups"
app_group_uuid = None
group = None
payload = {
"filter_criteria": "marketplace_item_type_list==APP;app_source==LOCAL;name=={}.*".format(marketplace_bp_name),
"entity_type": "marketplace_item",
"group_member_offset": 0,
"group_member_count": 1,
"group_count": 64,
"grouping_attribute": "app_group_uuid",
"group_member_attributes": [
{
"attribute": "name"
},
{
"attribute": "app_group_uuid"
}
],
"group_member_sort_attribute": "name",
"group_member_sort_order": "DESCENDING"
}
try:
resp = requests.request(
method,
url,
data=json.dumps(payload),
headers=headers,
auth=(auth["username"], auth["password"]),
verify=False
)
except requests.exceptions.ConnectionError as e:
logging.error("Failed to connect to PC: {}".format(e))
sys.exit(-1)
finally:
if resp.ok:
json_resp = resp.json()
if json_resp["filtered_group_count"] != 0:
for result in json_resp["group_results"]:
for data in result["entity_results"][0]["data"]:
if data["name"] == "name" and data["values"][0]["values"][0] == marketplace_bp_name:
group = result
else:
logging.info("Failed to Find exiting Marketplace Application")
if group != None:
for data in group["entity_results"][0]["data"]:
if data["name"] == "app_group_uuid":
app_group_uuid = data["values"][0]["values"][0]
return app_group_uuid
else:
logging.info("Failed to Find exiting Marketplace Application")
return app_group_uuid
else:
logging.error("Request failed")
logging.error("Headers: {}".format(headers))
logging.error('Status code: {}'.format(resp.status_code))
logging.error('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
sys.exit(-1)
### --------------------------------------------------------------------------------- ###
def publish_bp_as_existing_marketplace_bp(
bp_json,
marketplace_bp_name,
version,
description="",
publish_to_marketplace=False,
auto_approve=False,
projects=[],
category=None,
icon_name=None
):
app_group_uuid = get_app_group_uuid(marketplace_bp_name)
published_json = publish_bp_to_marketplace_manager(bp_json, marketplace_bp_name, version,
description=description, app_group_uuid=app_group_uuid, icon_name=icon_name)
if publish_to_marketplace or auto_approve:
project_reference_list = []
for project in projects:
project_uuid = get_project_uuid(base_url, auth, project)
project_reference = {"name": project, "kind": "project", "uuid": project_uuid}
project_reference_list.append(project_reference)
approved_json = approve_marketplace_bp(
published_json,
projects=project_reference_list,
category=category
)
if publish_to_marketplace:
publish_marketplace_bp(published_json)
if __name__ == "__main__":
parser = help_parser().parse_args()
pc_ip = parser.pc
pc_port = parser.port
blueprint_name = parser.blueprint_name
marketplace_bp_name = parser.name
version = parser.version
description = parser.description
project = parser.project
icon = parser.icon
auto_approve = parser.auto_approve
publish_to_marketplace = parser.publish_to_marketplace
with_secrets = parser.with_secrets
existing_markeplace_bp = parser.existing_markeplace_bp
base_url = "https://{}:{}/api/nutanix/v3".format(pc_ip,str(pc_port))
auth = { "username": parser.user, "password": <PASSWORD>}
blueprint_uuid = get_blueprint_uuid(base_url, auth, blueprint_name)
blueprint_json = get_blueprint(base_url, auth, blueprint_uuid)
blueprint_json["spec"] = remove_platform_data(blueprint_json["spec"])
blueprint_json["status"] = remove_platform_data(blueprint_json["status"])
publish_bp_as_existing_marketplace_bp(blueprint_json, marketplace_bp_name,
version, description=description, publish_to_marketplace=publish_to_marketplace,
auto_approve=auto_approve, projects=project.split(','), icon_name=icon)
|
1668382
|
import FWCore.ParameterSet.Config as cms
from CommonTools.ParticleFlow.pfNoPileUpIso_cff import pfPileUpIso, pfNoPileUpIso, pfNoPileUpIsoTask
from RecoEgamma.EgammaIsolationAlgos.egmIsoConeDefinitions_cfi import IsoConeDefinitions as _IsoConeDefinitions
from RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff import pfNoPileUpCandidates
import PhysicsTools.IsolationAlgos.CITKPFIsolationSumProducer_cfi as _mod
egmPhotonIsolation = _mod.CITKPFIsolationSumProducer.clone(
srcToIsolate = "gedPhotons",
srcForIsolationCone = 'pfNoPileUpCandidates',
isolationConeDefinitions = _IsoConeDefinitions
)
egmPhotonIsolationAODTask = cms.Task(pfNoPileUpIsoTask,
pfNoPileUpCandidates,
egmPhotonIsolation)
egmPhotonIsolationAODSequence = cms.Sequence(egmPhotonIsolationAODTask)
|
1668435
|
from wx.lib.agw.aui import AuiNotebook
from wx.lib.agw.aui import AUI_NB_CLOSE_ON_ACTIVE_TAB, AUI_NB_MIDDLE_CLICK_CLOSE, \
AUI_NB_TAB_MOVE, AUI_NB_TAB_EXTERNAL_MOVE, AUI_NB_TAB_SPLIT, AUI_NB_CLOSE_BUTTON
from kurier.interfaces import IStateRestorable
from kurier.widgets.request.headers import RequestHeadersTab
from kurier.widgets.request.properties import RequestPropertiesTab
from kurier.widgets.request.data import RequestDataTab
class RequestNotebook(IStateRestorable, AuiNotebook):
def __init__(self, *args, **kwargs):
super(RequestNotebook, self).__init__(*args, **kwargs)
self.SetAGWWindowStyleFlag(
self.GetAGWWindowStyleFlag()
& ~AUI_NB_CLOSE_ON_ACTIVE_TAB
& ~AUI_NB_MIDDLE_CLICK_CLOSE
& ~AUI_NB_TAB_MOVE
& ~AUI_NB_TAB_EXTERNAL_MOVE
& ~AUI_NB_TAB_SPLIT
& ~AUI_NB_CLOSE_BUTTON
)
self.properties_tab = None
self.headers_tab = None
self.data_tab = None
self.InitUI()
def InitUI(self):
self.properties_tab = RequestPropertiesTab(self)
self.AddPage(self.properties_tab, "Properties", select=True)
self.headers_tab = RequestHeadersTab(self)
self.AddPage(self.headers_tab, "Headers", select=False)
self.data_tab = RequestDataTab(self)
self.AddPage(self.data_tab, "Message body", select=False)
def InitFromState(self, **state):
self.properties_tab.InitFromState(**state)
self.headers_tab.InitFromState(**state)
self.data_tab.InitFromState(**state)
def GetRequestProperties(self):
return self.properties_tab.GetProperties()
def GetRequestHeaders(self):
return self.headers_tab.GetHeaders()
def GetRequestData(self):
return self.data_tab.GetData()
|
1668454
|
class Node:
def __init__(self):
self.left = None
self.right = None
def count_nodes(root, lspine=0, rspine=0):
if not root:
return 0
if not lspine:
node = root
while node:
node = node.left
lspine += 1
if not rspine:
node = root
while node:
node = node.right
rspine += 1
if lspine == rspine:
return 2**lspine - 1
return 1 + \
count_nodes(root.left, lspine=lspine-1) + \
count_nodes(root.right, rspine=rspine-1)
# Tests
a = Node()
b = Node()
c = Node()
a.left = b
a.right = c
assert count_nodes(a) == 3
d = Node()
b.left = d
assert count_nodes(a) == 4
e = Node()
b.right = e
assert count_nodes(a) == 5
f = Node()
c.left = f
assert count_nodes(a) == 6
|
1668478
|
import os
import time
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, cast
import hydra
import jax
import numpy as np
import ptvsd
import pytorch_lightning as pl
import wandb
from hydra.utils import instantiate
from omegaconf import OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from typer import Argument, Typer
from fourierflow.utils import (delete_old_results, get_experiment_id,
import_string, upload_code_to_wandb)
app = Typer()
@app.callback(invoke_without_command=True)
def main(config_path: Path,
overrides: Optional[List[str]] = Argument(None),
force: bool = False,
resume: bool = False,
checkpoint_id: Optional[str] = None,
trial: int = 0,
debug: bool = False,
no_logging: bool = False):
"""Train a Pytorch Lightning experiment."""
config_dir = config_path.parent
config_name = config_path.stem
hydra.initialize(config_path=Path('../..') /
config_dir, version_base='1.2')
config = hydra.compose(config_name, overrides=overrides)
OmegaConf.set_struct(config, False)
# This debug mode is for those who use VS Code's internal debugger.
if debug:
ptvsd.enable_attach(address=('0.0.0.0', 5678))
ptvsd.wait_for_attach()
# ptvsd doesn't play well with multiple processes.
config.builder.num_workers = 0
jax.config.update('jax_disable_jit', True)
# jax.config.update("jax_debug_nans", True)
# Set up directories to save experimental outputs.
delete_old_results(config_dir, force, trial, resume)
# Set seed for reproducibility.
rs = np.random.RandomState(7231 + trial)
seed = config.get('seed', rs.randint(1000, 1000000))
pl.seed_everything(seed, workers=True)
config.seed = seed
wandb_id = get_experiment_id(checkpoint_id, trial, config_dir, resume)
config.trial = trial
if 'seed' in config.trainer:
config.trainer.seed = seed
# Initialize the dataset and experiment modules.
builder = instantiate(config.builder)
routine = instantiate(config.routine)
# Support fine-tuning mode if a pretrained model path is supplied.
pretrained_path = config.get('pretrained_path', None)
if pretrained_path:
routine.load_lightning_model_state(pretrained_path)
# Resume from last checkpoint. We assume that the checkpoint file is from
# the end of the previous epoch. The trainer will start the next epoch.
# Resuming from the middle of an epoch is not yet supported. See:
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
chkpt_path = Path(config_dir) / 'checkpoints' / wandb_id / 'last.ckpt' \
if resume else None
# Initialize the main trainer.
callbacks = [instantiate(p) for p in config.get('callbacks', [])]
multi_gpus = config.trainer.get('gpus', 0) > 1
plugins = DDPPlugin(find_unused_parameters=False) if multi_gpus else None
if no_logging:
logger = False
enable_checkpointing = False
callbacks = []
else:
# We use Weights & Biases to track our experiments.
config.wandb.name = f"{config.wandb.group}/{trial}"
wandb_opts = cast(dict, OmegaConf.to_container(config.wandb))
logger = WandbLogger(save_dir=str(config_dir),
mode=os.environ.get('WANDB_MODE', 'offline'),
config=deepcopy(OmegaConf.to_container(config)),
id=wandb_id,
**wandb_opts)
upload_code_to_wandb(Path(config_dir) / 'config.yaml', logger)
enable_checkpointing = True
c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache()
c.cleanup(wandb.util.from_human_size("100GB"))
Trainer = import_string(config.trainer.pop(
'_target_', 'pytorch_lightning.Trainer'))
trainer = Trainer(logger=logger,
enable_checkpointing=enable_checkpointing,
callbacks=callbacks,
plugins=plugins,
weights_save_path=config_dir,
resume_from_checkpoint=chkpt_path,
enable_model_summary=False,
**OmegaConf.to_container(config.trainer))
# Tuning only has an effect when either auto_scale_batch_size or
# auto_lr_find is set to true.
trainer.tune(routine, datamodule=builder)
trainer.fit(routine, datamodule=builder)
# Load best checkpoint before testing.
chkpt_dir = Path(config_dir) / 'checkpoints'
paths = list(chkpt_dir.glob(f'trial-{trial}-*/epoch*.ckpt'))
assert len(paths) == 1
checkpoint_path = paths[0]
routine.load_lightning_model_state(str(checkpoint_path))
trainer.test(routine, datamodule=builder)
# Compute inference time
if logger:
batch = builder.inference_data()
T = batch['data'].shape[-1]
n_steps = routine.n_steps or (T - 1)
routine = routine.cuda()
batch = routine.convert_data(batch)
routine.warmup()
start = time.time()
routine.infer(batch)
elapsed = time.time() - start
elapsed /= len(batch['data'])
elapsed /= routine.step_size * n_steps
logger.experiment.log({'inference_time': elapsed})
if __name__ == "__main__":
app()
|
1668502
|
import math
import cv2
import numpy as np
from dtld_parsing.calibration import CalibrationData
from typing import Tuple
__author__ = "<NAME>, <NAME> and <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ThreeDPosition(object):
"""
Three dimensional position with respect to a defined frame_id.
"""
def __init__(self, x: float, y: float, z: float, frame_id: str = "stereo_left"):
self._x = x
self._y = y
self._z = z
self._frame_id = frame_id
def set_pos(self, x: float, y: float, z: float):
self._x = x
self._y = y
self._z = z
def move_pos(self, x: float, y: float, z: float):
self._x += x
self._y += y
self._z += z
def get_pos(self) -> Tuple[float, float, float]:
return self._x, self._y, self._z
class ThreeDimensionalPosition(object):
def __init__(
self,
calibration_left: CalibrationData,
calibration_right: CalibrationData,
binning_x: int = 0,
binning_y: int = 0,
roi_offset_x: int = 0,
roi_offset_y: int = 0,
):
"""
Class determining the 3D position of objects from disparity images.
Args:
calibration_left(CalibrationData): calibration for left camera
calibration_right(CalibrationData): calibration for right camera
binning_x(int): binning between original camera and disparity image in x direction
binning_y(int): binning between original camera and disparity image in y direction
roi_offset_x(int): RoI offset in x
roi_offset_y(int): RoI offset in y
"""
self._calibration_left = calibration_left
self._calibration_right = calibration_right
self._binning_x = binning_x
self._binning_y = binning_y
self._roi_offset_x = roi_offset_x
self._roi_offset_y = roi_offset_y
def unrectify_rectangle(self, x: int, y: int, width: int, height: int):
"""
Converts rectified to unrectified coordinates using calibration matrices.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
Returns:
x, y, width, height in unrectified coordinates
"""
# not rectified coordinates
pt_distorted = np.array([[float(x), float(y)], [float(x + width), float(y + height),],])
pt_distorted = pt_distorted[:, np.newaxis, :]
# rectify points
pt_undistorted = cv2.undistortPoints(
pt_distorted,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
self._calibration_left.distortion_calibration.distortion_matrix,
R=self._calibration_left.rectification_matrix.rectification_matrix,
P=self._calibration_left.projection_matrix.projection_matrix,
)
# get new coords
x_out = pt_undistorted[0][0][0]
y_out = pt_undistorted[0][0][1]
w_out = pt_undistorted[1][0][0] - pt_undistorted[0][0][0]
h_out = pt_undistorted[1][0][1] - pt_undistorted[0][0][1]
# binning in x and y (camera images were binned before
# disparity calculation)
return (
int(round(x_out / float(self._binning_x))),
int(round(y_out / float(self._binning_y))),
int(round(w_out / float(self._binning_x))),
int(round(h_out / float(self._binning_y))),
)
def determine_disparity(self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray) -> float:
"""
Calculates disparity from unrectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
disparity_image(np.ndarray): disparity image
Returns:
float: median disparity in RoI
"""
disparity_crop = disparity_image[y : y + height, x : x + width]
# image = cv2.rectangle(
# disparity_image, (int(x), int(y)), (int(x) + int(width), int(y) + int(height)), (255, 255, 255), 1,
# )
# cv2.imwrite("/home/muelju3/disp.png", image)
return np.nanmedian(disparity_crop)
def determine_three_dimensional_position(
self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray
) -> ThreeDPosition:
"""
Calculates 3d position from rectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): weight of bbox
disparity_image(np.ndarray): disparity image
Returns:
ThreeDPosition: ThreeDPosition
"""
x_u, y_u, width_u, height_u = self.unrectify_rectangle(x=x, y=y, width=width, height=height)
disparity = self.determine_disparity(
x=x_u - int(round(self._roi_offset_x / self._binning_x)),
y=y_u - int(round(self._roi_offset_y / self._binning_y)),
width=width_u,
height=height_u,
disparity_image=disparity_image,
)
# all values inside bbox are nan --> no depth
if disparity == 0.0 or math.isnan(disparity):
return ThreeDPosition(x=-1.0, y=-1.0, z=-1.0, frame_id="stereo_left")
return self.twod_point_to_threed_from_disparity(x=x + width / 2.0, y=y + height / 2.0, disparity=disparity)
def twod_point_to_threed_from_disparity(self, x, y, disparity):
# get calibration values
left_fx = self._calibration_left.intrinsic_calibration.fx
left_fy = self._calibration_left.intrinsic_calibration.fy
left_cx = self._calibration_left.intrinsic_calibration.cx
left_cy = self._calibration_left.intrinsic_calibration.cy
tx = -1.0 * self._calibration_right.projection_matrix.baseline
# determine 3d pos
x_world = left_fy * tx * x - left_fy * left_cx * tx
y_world = left_fx * tx * y - left_fx * left_cy * tx
z_world = left_fx * left_fy * tx
# normalize
w = -1.0 * self._binning_x * left_fy * disparity
return ThreeDPosition(x=x_world / w, y=y_world / w, z=z_world / w, frame_id="stereo_left")
def twod_point_to_threed_from_depth(self, x: int, y: int, depth: float) -> float:
disparity = self.depth_to_disparity(depth)
return self.twod_point_to_threed_from_disparity(x, y, disparity)
def disparity_to_depth(self, disparity: float) -> float:
"""
Converts disparity to depth.
Args:
disparity(float): Disparity in pixels
Returns:
float: depth value in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (disparity * self._binning_x)
def depth_to_disparity(self, depth: float) -> float:
"""
Converts depth to disparity.
Args:
depth(float): Depth in meters
Returns:
float: disparity in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (depth * self._binning_x)
def twod_from_threed(self, x: float, y: float, z: float):
"""
Calculates hypothesis size in pixels based on depth of object.
Args:
x(float): 3D position x coordinate
y(float): 3D position z coordinate
z(float): 3D position y coordinate
Returns:
int, int: 2d pos
"""
# translation = depth
t_vec = np.array([0.0, 0.0, 0.0])
r_vec = np.array([0.0, 0.0, 0.0])
# world corner points of object (float object assumption)
world_points = np.array([[x, y, z],])
# project world points on image plane
image_points = cv2.projectPoints(
world_points,
r_vec,
t_vec,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
distCoeffs=self._calibration_left.distortion_calibration.distortion_matrix,
)[0].tolist()
# determine box width and height
return image_points[0][0][0], image_points[0][0][1]
|
1668546
|
from koursaros.gnes_addons import Flow
flow = (
Flow(check_version=True)
.add_client(name='postgres', yaml_path='clients/postgres/wikititles.yml')
.add_preprocessor(name='sentsplit', replicas=2,
yaml_path='services/preprocessors/sentsplit/jsonmode.yml')
.add_encoder(name='textbyte', recv_from='sentsplit', replicas=2,
yaml_path='services/encoders/textbyte/max256.yml')
.add_indexer(name='keyword', replicas=2,
yaml_path='services/indexers/keyword/base.yml')
.add_indexer(name='lvdb', replicas=2, yaml_path='services/indexers/lvdb/base.yml')
.add_encoder(name='robertainfer', replicas=2,
yaml_path='services/encoders/robertainfer/dim64.yml')
.add_router(name='reduce', num_part=2, yaml_path='BaseReduceRouter')
)
# checkout how the flow looks like (...and post it on Twitter, but hey what do I know about promoting OSS)
# funny!
|
1668569
|
import struct
import numpy as np
import rospy
from sensor_msgs.msg import PointCloud2, PointField
from nclt2ros.extractor.base_raw_data import BaseRawData
from nclt2ros.converter.base_convert import BaseConvert
class VelodyneData(BaseRawData, BaseConvert):
"""Class to convert the velodyne binary file to ROS PointCloud2 messages
USAGE:
VelodyneData('2013-01-10')
"""
def __init__(self, date):
# init base class
BaseRawData.__init__(self, date=date)
BaseConvert.__init__(self, date=date)
# load velodyne_binary file
if self.velodyne_data_flag:
self.f_bin_velodyne = open(self.velodyne_data_dir + '/%s/velodyne_hits.bin' % self.date, 'r')
else:
raise ValueError('velodyne_data directory not exists')
def verify_magic(self, s):
"""verifies the binary data
:param s:
:return: True, if data is correct
False
"""
magic = 44444
m = struct.unpack('<HHHH', s)
return (len(m) >= 3) and (m[0] == magic) and (m[1] == magic) and (m[2] == magic) and (m[3] == magic)
def convert_velodyne(self, x_s, y_s, z_s):
"""converts the velodyne binary data to corrected values, check out the paper http://robots.engin.umich.edu/nclt/nclt.pdf
:param x_s: x value from binary file
:param y_s: y value from binary file
:param z_s: z value from binary file
:return: converted x, y, z values
"""
scaling = 0.005
offset = -100.0
x = x_s * scaling + offset
y = y_s * scaling + offset
z = z_s * scaling + offset
return x, y, z
def read_next_velodyne_packet(self):
"""reads the velodyne binary file
:return: utime:
data:
num_hits:
"""
try:
magic = self.f_bin_velodyne.read(8)
if magic == '': # EOF reached
return -1, None
if not self.verify_magic(magic):
print "Could not verify magic"
return -1, None
num_hits = struct.unpack('<I', self.f_bin_velodyne.read(4))[0]
utime = struct.unpack('<Q', self.f_bin_velodyne.read(8))[0]
self.f_bin_velodyne.read(4) # padding
data = []
for i in range(num_hits):
x = struct.unpack('<H', self.f_bin_velodyne.read(2))[0]
y = struct.unpack('<H', self.f_bin_velodyne.read(2))[0]
z = struct.unpack('<H', self.f_bin_velodyne.read(2))[0]
intensity = struct.unpack('B', self.f_bin_velodyne.read(1))[0]
laser_id = struct.unpack('B', self.f_bin_velodyne.read(1))[0]
x, y, z = self.convert_velodyne(x, y, z)
data += [x, y, z, float(intensity), float(laser_id)]
return utime, data, num_hits
except Exception as e:
print(e)
def xyzil_array_to_pointcloud2(self, utime, hits):
"""reads the x, y, z, intensity, laser_id list and convert it into a pointcloud2 message
:param utime: timestamp in microseconds from the binary file
:param hits: list, containing the x, y, z intensity, laser_id
:return: timestamp: ros time object,
pc2_msg: pointcloud2 message
"""
timestamp = rospy.Time.from_sec(utime / 1e6)
points = np.array(hits)
pc2_msg = PointCloud2()
pc2_msg.header.stamp = timestamp
pc2_msg.header.frame_id = self.velodyne_frame
num_values = points.shape[0]
assert(num_values > 0)
NUM_FIELDS = 5
assert(np.mod(num_values, NUM_FIELDS) == 0)
num_points = num_values / NUM_FIELDS
assert(len(points.shape) == 1)
pc2_msg.height = 1
FLOAT_SIZE_BYTES = 4
pc2_msg.width = num_values * FLOAT_SIZE_BYTES
pc2_msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('i', 12, PointField.FLOAT32, 1),
PointField('l', 16, PointField.FLOAT32, 1)
]
pc2_msg.is_bigendian = False
pc2_msg.point_step = NUM_FIELDS * FLOAT_SIZE_BYTES
pc2_msg.row_step = pc2_msg.point_step * num_points
pc2_msg.is_dense = False
pc2_msg.width = num_points
pc2_msg.data = np.asarray(points, np.float32).tostring()
return timestamp, pc2_msg
|
1668572
|
import os
import unittest
import pprint
from google.protobuf.json_format import MessageToDict
from spaceone.core import utils, pygrpc
from spaceone.core.unittest.runner import RichTestRunner
class TestEndpoint(unittest.TestCase):
config = utils.load_yaml_from_file(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
pp = pprint.PrettyPrinter(indent=4)
identity_v1 = None
domain = None
domain_owner = None
owner_id = None
owner_pw = None
owner_token = None
@classmethod
def setUpClass(cls):
super(TestEndpoint, cls).setUpClass()
endpoints = cls.config.get('ENDPOINTS', {})
cls.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity', {}).get('v1'), version='v1')
cls._create_domain()
cls._create_domain_owner()
cls._issue_owner_token()
@classmethod
def tearDownClass(cls):
super(TestEndpoint, cls).tearDownClass()
cls.identity_v1.DomainOwner.delete(
{
'domain_id': cls.domain.domain_id,
'owner_id': cls.owner_id
},
metadata=(('token', cls.owner_token),)
)
print(f'>> delete domain owner: {cls.owner_id}')
if cls.domain:
cls.identity_v1.Domain.delete(
{
'domain_id': cls.domain.domain_id
},
metadata=(('token', cls.owner_token),)
)
print(f'>> delete domain: {cls.domain.name} ({cls.domain.domain_id})')
@classmethod
def _create_domain(cls):
name = utils.random_string()
params = {
'name': name
}
cls.domain = cls.identity_v1.Domain.create(params)
print(f'domain_id: {cls.domain.domain_id}')
print(f'domain_name: {cls.domain.name}')
@classmethod
def _create_domain_owner(cls):
cls.owner_id = utils.random_string()
cls.owner_pw = utils.generate_password()
owner = cls.identity_v1.DomainOwner.create({
'owner_id': cls.owner_id,
'password': <PASSWORD>,
'domain_id': cls.domain.domain_id
})
cls.domain_owner = owner
print(f'owner_id: {cls.owner_id}')
print(f'owner_pw: {cls.owner_pw}')
@classmethod
def _issue_owner_token(cls):
token_params = {
'user_type': 'DOMAIN_OWNER',
'user_id': cls.owner_id,
'credentials': {
'password': <PASSWORD>
},
'domain_id': cls.domain.domain_id
}
issue_token = cls.identity_v1.Token.issue(token_params)
cls.owner_token = issue_token.access_token
def setUp(self):
pass
def tearDown(self):
pass
def _print_data(self, message, description=None):
print()
if description:
print(f'[ {description} ]')
self.pp.pprint(MessageToDict(message, preserving_proto_field_name=True))
def test_list_endpoints(self):
params = {}
result = self.identity_v1.Endpoint.list(
params, metadata=(('token', self.owner_token),))
self._print_data(result, 'test_list_endpoints')
if __name__ == '__main__':
unittest.main(testRunner=RichTestRunner)
|
1668600
|
import json
import pytest
import yaml
from satosa.backends.base import BackendModule
from satosa.exception import SATOSAConfigurationError
from satosa.frontends.base import FrontendModule
from satosa.micro_services.base import RequestMicroService, ResponseMicroService
from satosa.plugin_loader import backend_filter, frontend_filter, _request_micro_service_filter, _response_micro_service_filter, _load_plugin_config
class TestFilters(object):
class BackendTestPluginModule(BackendModule):
pass
class FrontendTestPluginModule(FrontendModule):
pass
class RequestTestMicroService(RequestMicroService):
pass
class ResponseTestMicroService(ResponseMicroService):
pass
def test_backend_filter_rejects_base_class(self):
assert not backend_filter(BackendModule)
def test_backend_filter_rejects_frontend_plugin(self):
assert not backend_filter(TestFilters.FrontendTestPluginModule)
def test_backend_filter_accepts_backend_plugin(self):
assert backend_filter(TestFilters.BackendTestPluginModule)
def test_frontend_filter_rejects_base_class(self):
assert not frontend_filter(FrontendModule)
def test_frontend_filter_rejects_backend_plugin(self):
assert not frontend_filter(TestFilters.BackendTestPluginModule)
def test_frontend_filter_accepts_backend_plugin(self):
assert frontend_filter(TestFilters.FrontendTestPluginModule)
def test_request_micro_service_filter_rejects_base_class(self):
assert not _request_micro_service_filter(RequestMicroService)
def test_request_micro_service_filter_rejects_response_micro_service(self):
assert not _request_micro_service_filter(TestFilters.ResponseTestMicroService)
def test_request_micro_service_filter_accepts_request_micro_service(self):
assert _request_micro_service_filter(TestFilters.RequestTestMicroService)
def test_response_micro_service_filter_rejects_base_class(self):
assert not _response_micro_service_filter(ResponseMicroService)
def test_response_micro_service_filter_rejects_request_micro_service(self):
assert not _response_micro_service_filter(TestFilters.RequestTestMicroService)
def test_response_micro_service_filter_accepts_response_micro_service(self):
assert _response_micro_service_filter(TestFilters.ResponseTestMicroService)
class TestLoadPluginConfig(object):
def test_load_json(self):
data = {"foo": "bar"}
config = _load_plugin_config(json.dumps(data))
assert config == data
def test_can_load_yaml(self):
data = {"foo": "bar"}
config = _load_plugin_config(yaml.dump(data, default_flow_style=False))
assert config == data
def test_handles_malformed_data(self):
data = """{foo: bar""" # missing closing bracket
with pytest.raises(SATOSAConfigurationError):
_load_plugin_config(data)
|
1668678
|
import cv2
import numpy
import sys
import os
if len(sys.argv) == 2:
folder_path = str(sys.argv[1])
else:
print('## USAGE ## \n python readTiff_folder.py path_to_folder. \n Space Bar for next image. Any other key to exit. \n##')
exit
dirs = os.listdir(folder_path)
cv2.namedWindow(folder_path)
for imagePath in dirs:
image = cv2.imread(os.path.join(folder_path,imagePath), -1)
img_scaled = cv2.normalize(image, dst=None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX)
smallest = numpy.amin(image)
biggest = numpy.amax(image)
print('Min: {} - Max: {}'.format(smallest, biggest))
print(image)
print(imagePath)
cv2.imshow(folder_path, img_scaled)
if cv2.waitKey() == 32:
continue
else:
break
cv2.destroyAllWindows()
|
1668684
|
from dtcwt.coeffs import biort, qshift
from pytest import raises
def test_antonini():
h0o, g0o, h1o, g1o = biort('antonini')
assert h0o.shape[0] == 9
assert g0o.shape[0] == 7
assert h1o.shape[0] == 7
assert g1o.shape[0] == 9
def test_legall():
h0o, g0o, h1o, g1o = biort('legall')
assert h0o.shape[0] == 5
assert g0o.shape[0] == 3
assert h1o.shape[0] == 3
assert g1o.shape[0] == 5
def test_near_sym_a():
h0o, g0o, h1o, g1o = biort('near_sym_a')
assert h0o.shape[0] == 5
assert g0o.shape[0] == 7
assert h1o.shape[0] == 7
assert g1o.shape[0] == 5
def test_near_sym_a():
h0o, g0o, h1o, g1o = biort('near_sym_b')
assert h0o.shape[0] == 13
assert g0o.shape[0] == 19
assert h1o.shape[0] == 19
assert g1o.shape[0] == 13
def test_qshift_06():
coeffs = qshift('qshift_06')
assert len(coeffs) == 8
for v in coeffs:
assert v.shape[0] == 10
def test_qshift_a():
coeffs = qshift('qshift_a')
assert len(coeffs) == 8
for v in coeffs:
assert v.shape[0] == 10
def test_qshift_b():
coeffs = qshift('qshift_b')
assert len(coeffs) == 8
for v in coeffs:
assert v.shape[0] == 14
def test_qshift_c():
coeffs = qshift('qshift_c')
assert len(coeffs) == 8
for v in coeffs:
assert v.shape[0] == 16
def test_qshift_d():
coeffs = qshift('qshift_d')
assert len(coeffs) == 8
for v in coeffs:
assert v.shape[0] == 18
def test_non_exist_biort():
with raises(IOError):
biort('this-does-not-exist')
def test_non_exist_qshift():
with raises(IOError):
qshift('this-does-not-exist')
def test_wrong_type_a():
with raises(ValueError):
biort('qshift_06')
def test_wrong_type_b():
with raises(ValueError):
qshift('antonini')
# vim:sw=4:sts=4:et
|
1668713
|
import numpy as np
from itertools import product
from analysis.utils import one_hot_to_int
def get_oq_keys(X_i, task, to_int=True):
"""extract obs/query keys from the input matrix, for one sample
Parameters
----------
X_i : np array
a sample from SequenceLearning task
task : object
the SequenceLearning task that generated X_i
to_int : bool
whether convert to integer representation
Returns
-------
list, list, list
observation keys, query keys, observation values
"""
# get the observation / query keys
o_key = X_i[:, :task.k_dim]
q_key = X_i[:, -task.k_dim:]
o_val = X_i[:, task.k_dim:task.k_dim + task.v_dim]
# convert to integer representation
if to_int:
o_key = [one_hot_to_int(o_key[t]) for t in range(len(o_key))]
q_key = [one_hot_to_int(q_key[t]) for t in range(len(q_key))]
o_val = [one_hot_to_int(o_val[t]) for t in range(len(o_val))]
return o_key, q_key, o_val
def set_nanadd(input_set, new_element):
"""set.add a new element, don't add np.nan
Parameters
----------
input_set : set
a set of int
new_element : int
a new element to be added to the set
Returns
-------
set
the set updated by the new element
"""
if not np.isnan(new_element):
input_set.add(new_element)
return input_set
def _compute_true_dk(o_key, q_key, o_val, task):
"""compute ground truth uncertainty for a trial
Parameters
----------
o_key : list of int
Description of parameter `o_key`.
q_key : list of int
Description of parameter `q_key`.
o_val : list of int
Description of parameter `o_val`.
task : obj
the SL task
Returns
-------
type
Description of returned object.
"""
assert task.n_parts == 2, 'this function only works for 2-part seq'
assert len(o_key) == len(q_key), 'obs seq length must match query seq'
T_total_ = len(o_key)
# T_part_ = T_total_ // task.n_parts
# prealloc
o_key_up_to_t, q_key_up_to_t = set(), set()
dk = np.ones(T_total_, dtype=bool)
# compute uncertainty info over time
for t in range(T_total_):
q_key_up_to_t = set_nanadd(q_key_up_to_t, q_key[t])
# if the observation is not nan (removed), consider it as an observed key
if not np.isnan(o_val[t]):
# if the key is not nan (due to delay), add it as an observed key
o_key_up_to_t = set_nanadd(o_key_up_to_t, o_key[t])
# if the query is in the observed key up to time t
if q_key[t] in o_key_up_to_t:
# shouldn't say don't know
dk[t] = False
# log info
# t_relative = np.mod(t, T_part_)
# print(f'time = {t}, {t_relative} / {T_total_} | dk = {dk[t]}')
# print(o_key_up_to_t)
# print(q_key_up_to_t)
return dk
def compute_true_dk(X_i, task):
"""compute objective uncertainty w/ or w/o EM (EM vs. WM), where ...
- with EM == no flusing, which applies to the RM condition
- WM == w/o EM == EM flushed, which applies to the NM and DM
Parameters
----------
X_i : np array
a sample from SequenceLearning task
task : object
the SequenceLearning task that generated X_i
Returns
-------
dict
ground truth / objective uncertainty
"""
assert task.n_parts == 2, 'this function only works for 2-part seq'
o_key, q_key, o_val = get_oq_keys(X_i, task, to_int=True)
T_total_ = len(o_key)
T_part_ = T_total_ // task.n_parts
dk = {}
dk['EM'] = _compute_true_dk(o_key, q_key, o_val, task)
dk['WM'] = _compute_true_dk(
o_key[T_part_:], q_key[T_part_:], o_val[T_part_:], task
)
return dk
def batch_compute_true_dk(X, task, dtype=bool):
"""compute the uncertainty ground truth for a sample/batch of data
- a wrapper for `compute_true_dk()`
Parameters
----------
X : 3d array
a sample from the SL task
task : obj
the SL task
Returns
-------
2d array, 2d array
uncertainty w/ w/o episodic flush
"""
n_samples = len(X)
dk_wm = np.zeros((n_samples, task.n_param), dtype=dtype)
dk_em = np.zeros((n_samples, task.n_param * task.n_parts), dtype=dtype)
# dk = [compute_true_dk(X[i], task) for i in range(n_samples)]
# pred_time_mask = [None] * n_samples
for i in range(n_samples):
T_total_i = np.shape(X[i])[0]
T_part_i, pad_len_i, _, _ = task.get_time_param(T_total_i)
pred_time_mask_i = task.get_pred_time_mask(
T_total_i, T_part_i, pad_len_i)
# compute objective uncertainty, w/ or w/o EM
dk_i = compute_true_dk(X[i], task)
dk_wm[i] = dk_i['WM'][pred_time_mask_i[T_part_i:]]
dk_em[i] = dk_i['EM'][pred_time_mask_i]
return dk_wm, dk_em
def compute_event_similarity_matrix(Y, normalize=False):
"""compute the inter-event similarity matrix of a batch of data
e.g.
task = SequenceLearning(n_param, n_branch, n_parts=1)
X, Y = task.sample(n_samples)
similarity_matrix = compute_event_similarity_matrix(Y, normalize=False)
Parameters
----------
Y : 3d array (n_examples, _, _) or 2d array (n_examples, _)
the target values
normalize : bool
whether to normalize by vector dim
Returns
-------
2d array (n_examples, n_examples)
the inter-event similarity matrix
"""
if len(np.shape(Y)) == 3:
Y_int = np.argmax(Y, axis=-1)
elif len(np.shape(Y)) == 2:
Y_int = Y
else:
raise ValueError('Invalid Y shape')
# prealloc
n_samples = np.shape(Y)[0]
similarity_matrix = np.zeros((n_samples, n_samples))
for i, j in product(range(n_samples), range(n_samples)):
similarity_matrix[i, j] = compute_event_similarity(
Y_int[i], Y_int[j], normalize=normalize)
return similarity_matrix
def compute_event_similarity(event_i, event_j, normalize=True):
"""compute the #shared elements for two arrays
e.g.
event_i = np.argmax(q_vals_vec[i], axis=-1)
event_j = np.argmax(q_vals_vec[j], axis=-1)
sim_ij = compute_event_similarity(event_i, event_j, normalize=True)
Parameters
----------
event_i/j : 1d np array
event representation
normalize : bool
whether to normalize by vector dim
Returns
-------
float
similarity
"""
assert np.shape(event_i) == np.shape(event_j)
similarity = np.sum(event_i == event_j)
if normalize:
return similarity / len(event_i)
return similarity
#
# def remove_identical_events(Ys, n_param):
# """remove events that are identical
#
# Parameters
# ----------
# Ys : 3d array
# number of examples x number of time points x feature dim
# n_param : int
# indicate max(number of shared parameters)
#
# Returns
# -------
# Ys : 3d array
# number of examples' x number of time points x feature dim
#
# """
# event_similarity_matrix = compute_event_similarity(Ys, tril_k=-1)
# repeated_id = np.where(event_similarity_matrix == n_param)
# rm_axis = 0
# Ys_ = np.delete(Ys, repeated_id[rm_axis], axis=rm_axis)
# return Ys_
|
1668754
|
from collections import Counter
from collections import defaultdict
from dataclasses import dataclass
from itertools import product
from math import ceil
from math import exp
from math import floor
from math import isclose
from math import log2
from math import sqrt
from typing import List
from typing import Tuple
import altair as alt
import pandas as pd
from rich.text import Text
from scipy.special import gammaincc
from scipy.special import hyp1f1
from scipy.stats import chisquare
from coinflip._randtests.common.collections import defaultlist
from coinflip._randtests.common.core import *
from coinflip._randtests.common.pprint import pretty_subseq
from coinflip._randtests.common.result import MultiTestResult
from coinflip._randtests.common.result import SubTestResult
from coinflip._randtests.common.result import TestResult
from coinflip._randtests.common.result import make_chisquare_table
from coinflip._randtests.common.result import make_testvars_table
from coinflip._randtests.common.result import plot_chi2_dist
from coinflip._randtests.common.testutils import blocks
from coinflip._randtests.common.testutils import rawblocks
from coinflip._randtests.common.testutils import slider
from coinflip._randtests.common.typing import Face
from coinflip._randtests.common.typing import Float
from coinflip._randtests.common.typing import Integer
__all__ = ["non_overlapping_template_matching", "overlapping_template_matching"]
# ------------------------------------------------------------------------------
# Non-overlapping Template Matching Test
@randtest()
def non_overlapping_template_matching(
series,
heads,
tails,
ctx,
template_size=None,
blocksize=None,
):
n = len(series)
if not blocksize:
blocksize = max(ceil(0.01 * n), 6)
if blocksize % 2 != 0:
blocksize -= 1
nblocks = n // blocksize
if not template_size:
template_size = max(min(blocksize // 3, 9), 2)
nblocks_sub = blocksize // template_size
set_task_total(ctx, 1 + nblocks * (nblocks_sub + 1) + 1)
failures = check_recommendations(
ctx,
{
"n ≥ 100": n >= 100,
"template_size = 9 or 10": template_size == 9 or template_size == 10,
"blocksize > 0.01 * n": blocksize > 0.01 * n,
"nblocks ≤ 100": nblocks <= 100, # TODO same thing as above?
"nblocks = ⌊n / blocksize⌋": nblocks == n // blocksize,
},
)
matches_expect = (blocksize - template_size + 1) / 2 ** template_size
variance = blocksize * (
(1 / 2 ** template_size) - ((2 * template_size - 1)) / 2 ** (2 * template_size)
)
advance_task(ctx)
template_block_matches = defaultdict(lambda: defaultlist(int))
for i, block in enumerate(blocks(series, blocksize)):
matches = defaultdict(int)
for window_tup in rawblocks(block, template_size):
matches[window_tup] += 1
advance_task(ctx)
for template, matches in matches.items():
template_block_matches[template][i] = matches
advance_task(ctx)
results = {}
for template in product([heads, tails], repeat=template_size):
block_matches = template_block_matches[template][:nblocks]
match_diffs = [matches - matches_expect for matches in block_matches]
statistic = sum(diff ** 2 / variance for diff in match_diffs)
p = gammaincc(nblocks / 2, statistic / 2)
results[template] = NonOverlappingTemplateMatchingSubTestResult(
statistic,
p,
template,
block_matches,
match_diffs,
)
advance_task(ctx)
return NonOverlappingTemplateMatchingMultiTestResult(
heads,
tails,
failures,
results,
template_size,
blocksize,
nblocks,
matches_expect,
variance,
)
@dataclass(unsafe_hash=True)
class NonOverlappingTemplateMatchingSubTestResult(SubTestResult):
template: Tuple[Face, ...]
block_matches: List[Integer]
match_diffs: List[Float]
@dataclass(unsafe_hash=True)
class NonOverlappingTemplateMatchingMultiTestResult(MultiTestResult):
template_size: Integer
blocksize: Integer
nblocks: Integer
matches_expect: Float
variance: Float
def _pretty_feature(self, result: NonOverlappingTemplateMatchingSubTestResult):
f_template = pretty_subseq(result.template, self.heads, self.tails)
return f_template
# TODO q value
def _render(self):
yield self._pretty_inputs(
("blocksize", self.blocksize),
("nblocks", self.nblocks),
)
yield self._results_table("template", "χ²")
def _render_sub(self, result: NonOverlappingTemplateMatchingSubTestResult):
yield result._pretty_result("chi-square")
title = Text("matches of ")
title.append(pretty_subseq(result.template, self.heads, self.tails))
title.append(" per block")
f_matches_expect = round(self.matches_expect, 1)
caption = f"expected {f_matches_expect} matches"
matches_count = Counter(result.block_matches)
table = sorted(matches_count.items())
f_table = make_testvars_table(
"matches", "nblocks", title=title, caption=caption
)
for matches, nblocks in table:
f_table.add_row(str(matches), str(nblocks))
yield f_table
# ------------------------------------------------------------------------------
# Overlapping Template Matching Test
# TODO Review paper "Correction of OTM Test Included in the NIST Randomness Test Suite"
@randtest() # TODO appropiate min input
def overlapping_template_matching(
series,
heads,
tails,
ctx,
template_size=None,
blocksize=None,
matches_ceil=None,
):
n = len(series)
if matches_ceil is None:
matches_ceil = 5
df = matches_ceil # there are matches_ceil + 1 independent scores
if not blocksize:
blocksize = floor(sqrt(n))
nblocks = n // blocksize
if not template_size:
template_size = min(max(floor(sqrt(blocksize)), 2), 12)
template = [heads for _ in range(template_size)]
lambda_ = (blocksize - template_size + 1) / 2 ** template_size
eta = lambda_ / 2
first_prob = exp(-eta)
probabilities = [first_prob]
for matches in range(1, matches_ceil):
prob = ((eta * exp(-2 * eta)) / 2 ** matches) * hyp1f1(matches + 1, 2, eta)
probabilities.append(prob)
last_prob = 1 - sum(probabilities)
probabilities.append(last_prob)
set_task_total(ctx, 1 + nblocks + 2)
expected_tallies = [prob * nblocks for prob in probabilities]
failures = check_recommendations(
ctx,
{
"n ≥ 288": n >= 288,
"n ≥ nblocks * blocksize": n >= nblocks * blocksize,
"min(expected_tallies) > df": min(expected_tallies) > df,
"λ ≈ 2": isclose(lambda_, 2),
"len(template) ≈ log2(nblocks)": isclose(template_size, log2(nblocks)),
"df ≈ 2 * λ": isclose(template_size, 2 * lambda_),
},
)
advance_task(ctx)
block_matches = []
for block in blocks(series, blocksize):
matches = 0
for window_tup in slider(block, template_size):
if all(x == y for x, y in zip(window_tup, template)):
matches += 1
advance_task(ctx)
block_matches.append(matches)
tallies = [0 for _ in range(matches_ceil + 1)]
for matches in block_matches:
i = min(matches, matches_ceil)
tallies[i] += 1
advance_task(ctx)
statistic, p = chisquare(tallies, expected_tallies)
advance_task(ctx)
return OverlappingTemplateMatchingTestResult(
heads,
tails,
failures,
statistic,
p,
template_size,
blocksize,
matches_ceil,
nblocks,
lambda_,
template,
expected_tallies,
tallies,
)
@dataclass
class OverlappingTemplateMatchingTestResult(TestResult):
template_size: Integer
blocksize: Integer
matches_ceil: Integer
nblocks: Integer
lambda_: Float
template: Tuple[Face, ...]
expected_tallies: List[Integer]
tallies: List[Integer]
def _fmt_matches(self):
f_matches = [str(x) for x in range(self.matches_ceil + 1)]
f_matches[-1] = f"{f_matches[-1]}+"
return f_matches
def _render(self):
yield self._pretty_result("chi-square")
yield TestResult._pretty_inputs(
("template size", self.template_size),
("blocksize", self.blocksize),
("nblocks", self.nblocks),
)
title = Text("matches of ")
title.append(pretty_subseq(self.template, self.heads, self.tails))
title.append(" per block")
f_matches = self._fmt_matches()
table = make_chisquare_table(
title,
"matches",
f_matches,
self.expected_tallies,
self.tallies,
)
yield table
def plot_template_matches(self):
df = pd.DataFrame(
{
"matches": self._fmt_matches(),
"expected": self.expected_tallies,
"observed": self.tallies,
}
)
df = df.melt("matches", var_name="type", value_name="nblocks")
f_template = pretty_subseq(self.template, self.heads, self.tails)
chart = (
alt.Chart(df)
.mark_bar()
.encode(
alt.X("matches", title="Matches"),
alt.Y(
"nblocks:Q",
title="Number of blocks",
),
column=alt.Column(
"type:N",
title=None,
),
)
.properties(title=f"Overlapping matches of {f_template} per block")
)
return chart
def plot_refdist(self):
return plot_chi2_dist(self.statistic, self.matches_ceil)
|
1668770
|
import json
import logging
from datetime import datetime
from typing import List, cast
from chaos_genius.connectors import (
get_schema_names,
get_sqla_db_conn,
get_table_info,
get_table_list,
)
from chaos_genius.controllers.data_source_controller import get_datasource_data_from_id
from chaos_genius.databases.models.data_source_metadata_model import DataSourceMetadata
from chaos_genius.databases.models.data_source_model import DataSource
from chaos_genius.utils.metadata_api_config import NON_THIRD_PARTY_DATASOURCES
logger = logging.getLogger(__name__)
def fetch_schema_list(data_source_id: int, as_obj: bool = False):
"""Fetch the schema list from the metadata of the given data source."""
schema_list = []
data_source_metadata: DataSourceMetadata = (
DataSourceMetadata.query.filter(
DataSourceMetadata.data_source_id == data_source_id,
DataSourceMetadata.metadata_type == "schema_list",
)
.order_by(DataSourceMetadata.created_at.desc())
.first()
)
if data_source_metadata:
schema_list: List[str] = data_source_metadata.metadata_info.get("schema_list", [])
if as_obj:
return data_source_metadata
else:
return schema_list
def fetch_table_list(data_source_id: int, schema: str, as_obj: bool=False):
"""Fetch the table list from the metadata of the given data source and schema."""
table_list = []
data_source_metadata: DataSourceMetadata = (
DataSourceMetadata.query.filter(
DataSourceMetadata.data_source_id == data_source_id,
DataSourceMetadata.metadata_type == "table_list",
DataSourceMetadata.metadata_param == get_metadata_param_str([schema]),
)
.order_by(DataSourceMetadata.created_at.desc())
.first()
)
if data_source_metadata:
table_list = data_source_metadata.metadata_info.get("table_list", [])
if as_obj:
return data_source_metadata
else:
return table_list
def delete_table_list(data_source_id: int, schema: str):
"""Delete the table list from the metadata of the given data source and schema."""
data_source_metadata: DataSourceMetadata = (
DataSourceMetadata.query.filter(
DataSourceMetadata.data_source_id == data_source_id,
DataSourceMetadata.metadata_type == "table_list",
DataSourceMetadata.metadata_param == get_metadata_param_str([schema]),
)
.order_by(DataSourceMetadata.created_at.desc())
.first()
)
if data_source_metadata:
data_source_metadata.delete(commit=True)
def fetch_table_info(data_source_id: int, schema: str, table: str, as_obj: bool=False):
"""Fetch the table info from the metadata of the given data source and table."""
table_info = {}
data_source_metadata: DataSourceMetadata = (
DataSourceMetadata.query.filter(
DataSourceMetadata.data_source_id == data_source_id,
DataSourceMetadata.metadata_type == "table_info",
DataSourceMetadata.metadata_param
== get_metadata_param_str([schema, table]),
)
.order_by(DataSourceMetadata.created_at.desc())
.first()
)
if data_source_metadata:
table_info = data_source_metadata.metadata_info
if as_obj:
return data_source_metadata
else:
return table_info
def delete_table_info(data_source_id: int, schema: str, table: str):
"""Delete the table info from the metadata of the given data source and table"""
data_source_metadata: DataSourceMetadata = (
DataSourceMetadata.query.filter(
DataSourceMetadata.data_source_id == data_source_id,
DataSourceMetadata.metadata_type == "table_info",
DataSourceMetadata.metadata_param
== get_metadata_param_str([schema, table]),
)
.order_by(DataSourceMetadata.created_at.desc())
.first()
)
if data_source_metadata:
data_source_metadata.delete(commit=True)
def run_metadata_prefetch(data_source_id: int):
"""Fetch the metadata of the given data source."""
data_source_obj = cast(DataSource, get_datasource_data_from_id(data_source_id, as_obj=True))
sync_error = False
if data_source_obj.connection_type not in NON_THIRD_PARTY_DATASOURCES:
logger.warning(
f"Datasource with id: {data_source_id} is a third-party datasource"
)
return False
if data_source_obj.sync_status == "In Progress":
logger.warning(
f"Datasource with id: {data_source_id} already in Progress, skipping.."
)
return True
try:
data_source_obj.sync_status = "In Progress"
data_source_obj.update(commit=True)
db_connection = get_sqla_db_conn(data_source_obj.as_dict)
schema_list, old_schemas_list = scan_db_and_save_schema_list(
data_source_id, db_connection
)
for schema in schema_list:
table_list, old_tables_list = scan_db_and_save_table_list(
data_source_id, db_connection, schema
)
for table in table_list:
_ = scan_db_and_save_table_info(
data_source_id, db_connection, schema, table
)
table_to_delete = list(set(old_tables_list) - set(table_list))
for table in table_to_delete:
delete_table_info(data_source_id, schema, table)
schema_to_delete = list(set(old_schemas_list) - set(schema_list))
for schema in schema_to_delete:
delete_table_list(data_source_id, schema)
except Exception as err:
sync_error = True
logger.error("Error in metadata prefetch.", exc_info=err)
data_source_obj = cast(DataSource, get_datasource_data_from_id(data_source_id, as_obj=True))
data_source_obj.sync_status = "Completed" if not sync_error else "Error"
data_source_obj.last_sync = datetime.now()
data_source_obj.update(commit=True)
return True if not sync_error else False
def scan_db_and_save_schema_list(data_source_id, db_connection):
"""Scan the database for schema list."""
schema_list = get_schema_names({}, from_db_conn=True, db_conn=db_connection)
old_schemas = fetch_schema_list(data_source_id, as_obj=True)
data_source_metadata = DataSourceMetadata(
data_source_id=data_source_id,
metadata_type="schema_list",
metadata_param=get_metadata_param_str(),
metadata_info={"schema_list": schema_list},
)
data_source_metadata.save(commit=True)
old_schemas_list = []
if old_schemas:
old_schemas_list: List[str] = old_schemas.metadata_info.get("schema_list", [])
old_schemas.delete(commit=True)
return schema_list, old_schemas_list
def scan_db_and_save_table_list(data_source_id, db_connection, schema):
"""Scan the database for table list."""
table_list = get_table_list({}, schema, from_db_conn=True, db_conn=db_connection)
old_tables = fetch_table_list(data_source_id, schema, as_obj=True)
data_source_metadata = DataSourceMetadata(
data_source_id=data_source_id,
metadata_type="table_list",
metadata_param=get_metadata_param_str([schema]),
metadata_info={"table_list": table_list},
)
data_source_metadata.save(commit=True)
old_tables_list = []
if old_tables:
old_tables_list = old_tables.metadata_info.get("table_list", [])
old_tables.delete(commit=True)
return table_list, old_tables_list
def scan_db_and_save_table_info(data_source_id, db_connection, schema, table):
"""Scan the database for table info."""
table_info = get_table_info(
{}, schema, table, from_db_conn=True, db_conn=db_connection
)
old_table_info = fetch_table_info(data_source_id, schema, table, as_obj=True)
data_source_metadata = DataSourceMetadata(
data_source_id=data_source_id,
metadata_type="table_info",
metadata_param=get_metadata_param_str([schema, table]),
metadata_info=table_info,
)
data_source_metadata.save(commit=True)
if old_table_info:
old_table_info.delete(commit=True)
return table_info
def get_metadata_param_str(list_of_params=[]):
"""Get the metadata param string."""
return json.dumps(list_of_params)
|
1668775
|
from httmock import HTTMock, with_httmock
from xml.dom.minidom import parseString
from django.test import TestCase
from authorizenet.models import CustomerProfile
from .utils import create_user, xml_to_dict
from .mocks import cim_url_match, customer_profile_success, delete_success
from .test_data import create_empty_profile_success, delete_profile_success
class RequestError(Exception):
pass
def error_on_request(url, request):
raise RequestError("CIM Request")
class CustomerProfileModelTests(TestCase):
"""Tests for CustomerProfile model"""
def setUp(self):
self.user = create_user(id=42, username='billy', password='password')
def create_profile(self):
return CustomerProfile.objects.create(
customer=self.user, profile_id='6666', sync=False)
def test_create_sync_no_data(self):
@cim_url_match
def request_handler(url, request):
request_xml = parseString(request.body)
self.assertEqual(xml_to_dict(request_xml),
create_empty_profile_success)
return customer_profile_success.format(
'createCustomerProfileResponse')
profile = CustomerProfile(customer=self.user)
with HTTMock(error_on_request):
self.assertRaises(RequestError, profile.save)
self.assertEqual(profile.profile_id, '')
with HTTMock(request_handler):
profile.save(sync=True)
self.assertEqual(profile.profile_id, '6666')
@with_httmock(error_on_request)
def test_create_no_sync(self):
profile = CustomerProfile(customer=self.user)
profile.save(sync=False)
self.assertEqual(profile.profile_id, '')
@with_httmock(error_on_request)
def test_edit(self):
profile = self.create_profile()
self.assertEqual(profile.profile_id, '6666')
profile.profile_id = '7777'
profile.save()
self.assertEqual(profile.profile_id, '7777')
profile.profile_id = '8888'
profile.save(sync=True)
self.assertEqual(profile.profile_id, '8888')
profile.profile_id = '9999'
profile.save(sync=False)
self.assertEqual(profile.profile_id, '9999')
def test_delete(self):
@cim_url_match
def request_handler(url, request):
request_xml = parseString(request.body)
self.assertEqual(xml_to_dict(request_xml),
delete_profile_success)
return delete_success.format(
'deleteCustomerProfileResponse')
profile = self.create_profile()
with HTTMock(request_handler):
profile.delete()
self.assertEqual(profile.__class__.objects.count(), 0)
|
1668811
|
from chainer.backends import cuda
from chainerkfac.optimizers.fisher_block import compute_pi
from chainerkfac.optimizers.fisher_block import FisherBlock
class FisherBlockConnection(FisherBlock):
def __init__(self, *args, **kwargs):
self._A = None
self._G = None
super(FisherBlockConnection, self).__init__(*args, **kwargs)
@property
def cov_forward(self):
return self.covs[0]
@property
def cov_backward(self):
return self.covs[1]
@property
def inv_forward(self):
return self.invs[0]
@property
def inv_backward(self):
return self.invs[1]
def is_mine(self, func, in_data, out_grad_data=None):
if not isinstance(func, self.funcclass):
return False
if in_data[1] is not self.link.W.data:
return False
return True
def forward_postprocess(self, func, in_data):
self._A = self.compute_A(in_data)
self.covs = [self._A, self._G]
def backward_preprocess(self, func, in_data, out_grad_data):
self._G = self.compute_G(in_data, out_grad_data)
self.covs = [self._A, self._G]
def compute_A(self, in_data):
raise NotImplementedError
def compute_G(self, in_data, out_grad_data):
raise NotImplementedError
def update_kfgrads(self):
self.check_attr('invs')
W = self.link.W
b = self.link.b
invs = self.invs
kfgrads = self.compute_kfgrads(W, b, invs)
if b is not None:
W.kfgrad = kfgrads[:, :-1].reshape(W.shape)
b.kfgrad = kfgrads[:, -1].reshape(b.shape)
else:
W.kfgrad = kfgrads.reshape(W.shape)
def compute_kfgrads(self, W, b, invs):
raise NotImplementedError
def get_diagvals(self):
A, G = self.cov_emas
xp = cuda.get_array_module(A)
rW = self.get_diagval('W') ** 0.5
diagvalsA = rW * xp.ones(A.shape[0])
diagvalsG = rW * xp.ones(G.shape[0])
if self.link.b is not None:
diagvalsA[-1] = rW
pi = compute_pi(A, G, self._pi_type)
setattr(self, 'diag_val_forward', pi * rW)
setattr(self, 'diag_val_backward', (1 / pi) * rW)
return [pi * diagvalsA, (1 / pi) * diagvalsG]
|
1668817
|
import abc
import os
import SimpleITK as sitk
import numpy as np
import pymia.data.conversion as conversion
import common.evalutation.numpyfunctions as np_fn
import common.utils.labelhelper as lh
import rechun.eval.helper as helper
import rechun.eval.evaldata as evdata
import rechun.directories as dirs
class Loader:
class Params:
def __init__(self, misc_entry='probabilities', need_target=True, need_prediction=True, need_t2_mask=False,
need_prediction_dist_and_boarder=False, need_gt_dist_and_boarder=False, images_needed: list=None,
need_img_props=False) -> None:
super().__init__()
self.misc_entry = misc_entry
self.need_target = need_target
self.need_prediction = need_prediction
self.need_t2_mask = need_t2_mask
self.need_gt_dist_and_boarder = need_gt_dist_and_boarder
self.need_prediction_dist_and_boarder = need_prediction_dist_and_boarder
self.images_needed = images_needed
self.need_img_props = need_img_props
def __init__(self) -> None:
super().__init__()
self.cached_entries = {}
self.cached_subject_id = None
def get_data(self, subject_file, params: Params):
if subject_file.subject != self.cached_subject_id:
self.cached_entries.clear()
self.cached_subject_id = subject_file.subject
to_eval = {}
misc_np, props = self._get_misc_entry(subject_file, params.misc_entry, 'img_properties')
to_eval[params.misc_entry] = misc_np
if params.need_img_props:
to_eval['img_properties'] = props
if params.need_target:
to_eval['target'] = self._get_target(subject_file, 'target')
if params.need_prediction:
to_eval['prediction'] = self._get_prediction(subject_file, 'prediction')
if params.need_gt_dist_and_boarder:
mask, distance = self._get_dist_and_boarder(subject_file, 'target_boarder', 'target_distance',
'target')
to_eval['target_boarder'] = mask
to_eval['target_distance'] = distance
if params.need_prediction_dist_and_boarder:
mask, distance = self._get_dist_and_boarder(subject_file, 'prediction_boarder', 'prediction_distance',
'prediction')
to_eval['prediction_boarder'] = mask
to_eval['prediction_distance'] = distance
if params.need_t2_mask:
to_eval['mask'] = self._get_t2_mask(subject_file, 'mask')
if params.images_needed:
for image_type in params.images_needed:
to_eval[image_type] = self._get_image(subject_file, image_type)
return to_eval
def _get_misc_entry(self, subject_file, entry: str, property_entry: str):
if entry in self.cached_entries:
return self.cached_entries[entry].copy(), self.cached_entries[property_entry] # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['misc'].entries[entry]
np_misc, props = conversion.SimpleITKNumpyImageBridge.convert(sitk.ReadImage(file_path))
self.cached_entries[entry] = np_misc
self.cached_entries[property_entry] = props
return self.cached_entries[entry], self.cached_entries[property_entry]
def _get_target(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['labels'].entries['gt']
target_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path, sitk.sitkUInt8))
target_np[target_np > 0] = 1 # the labels are 0 to 4 but we only do 0 and 1
self.cached_entries[entry] = target_np
return target_np
def _get_image(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['images'].entries[entry]
image_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path))
self.cached_entries[entry] = image_np
return image_np
def _get_prediction(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy()
file_path = subject_file.categories['labels'].entries[entry]
prediction_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path, sitk.sitkUInt8))
self.cached_entries[entry] = prediction_np
return prediction_np
def _get_dist_and_boarder(self, subject_file, boarder_entry, dist_entry, prediction_entry):
if boarder_entry in self.cached_entries and dist_entry in self.cached_entries:
return self.cached_entries[boarder_entry].copy(), self.cached_entries[dist_entry].copy()
prediction_np = self._get_prediction(subject_file, prediction_entry)
distance, mask = lh.boarder_mask(prediction_np.astype(np.bool), distance_in=1, distance_out=1)
self.cached_entries[boarder_entry] = mask
self.cached_entries[dist_entry] = distance
return mask, distance
def _get_t2_mask(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['images'].entries['t2']
t2_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path))
mask_np = t2_np > 0
self.cached_entries[entry] = mask_np
return mask_np
class PrepareData(abc.ABC):
@abc.abstractmethod
def __call__(self, to_eval: dict) -> dict:
pass
class ComposePreparation(PrepareData):
def __init__(self, prepare_data_list: list) -> None:
super().__init__()
self.prepare_data_list = prepare_data_list
def __call__(self, to_eval: dict) -> dict:
for prepare_data in self.prepare_data_list:
to_eval = prepare_data(to_eval)
return to_eval
class AddBackgroundProbabilities(PrepareData):
def __call__(self, to_eval: dict) -> dict:
to_eval['probabilities'] = helper.add_background_probability(to_eval['probabilities'])
return to_eval
class RescaleLinear(PrepareData):
def __init__(self, entry: str, min_: float, max_: float, epsilon=1e-5) -> None:
self.entry = entry
self.min = min_
self.max = max_
self.epsilon = epsilon # epsilon is used to have probs != 0 or 1
def __call__(self, to_eval: dict) -> dict:
prob_np = helper.rescale_uncertainties(to_eval[self.entry], self.min, self.max, self.epsilon)
to_eval[self.entry] = prob_np
return to_eval
class RescaleSubjectMinMax(PrepareData):
def __init__(self, entry: str, epsilon=1e-5) -> None:
self.entry = entry
self.epsilon = epsilon # epsilon is used to have probs != 0 or 1
def __call__(self, to_eval: dict) -> dict:
entry_np = to_eval[self.entry]
prob_np = helper.rescale_uncertainties(entry_np, entry_np.min(), entry_np.max(), self.epsilon)
to_eval[self.entry] = prob_np
return to_eval
class ToForegroundProbabilities(PrepareData):
def __call__(self, to_eval: dict) -> dict:
prob_np = helper.uncertainty_to_foreground_probabilities(to_eval['probabilities'], to_eval['prediction'])
to_eval['probabilities'] = prob_np
return to_eval
class ToEntropy(PrepareData):
def __init__(self, entropy_entry='uncertainty') -> None:
super().__init__()
self.nb_classes = 2 # everything is binary until now
self.entropy_entry = entropy_entry
def __call__(self, to_eval: dict) -> dict:
prob_np = to_eval['probabilities']
if prob_np.shape[-1] != self.nb_classes:
raise ValueError('last dimension of probability array ({}) must be equal to nb_classes ({})'
.format(prob_np.shape, self.nb_classes))
to_eval[self.entropy_entry] = np_fn.entropy(prob_np) / np.log(self.nb_classes)
helper.check_min_max(to_eval[self.entropy_entry], only_warn=True)
return to_eval
class MoveEntry(PrepareData):
def __init__(self, from_entry: str, to_entry: str) -> None:
super().__init__()
self.from_entry = from_entry
self.to_entry = to_entry
def __call__(self, to_eval: dict) -> dict:
to_eval[self.to_entry] = to_eval[self.from_entry]
return to_eval
def get_probability_preparation(eval_data: evdata.EvalData, rescale_confidence='subject', rescale_sigma='subject',
min_max_dir: str = None):
prepare = []
if eval_data.confidence_entry == 'probabilities':
prepare.append(AddBackgroundProbabilities())
return ComposePreparation(prepare), eval_data.id_
if eval_data.confidence_entry == 'confidence':
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_confidence, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.extend([
MoveEntry(eval_data.confidence_entry, 'probabilities'),
ToForegroundProbabilities(),
AddBackgroundProbabilities()
])
return ComposePreparation(prepare), id_
# if sigma or log-var
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_sigma, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.extend([
MoveEntry(eval_data.confidence_entry, 'probabilities'),
ToForegroundProbabilities(),
AddBackgroundProbabilities()
])
return ComposePreparation(prepare), id_
def get_uncertainty_preparation(eval_data: evdata.EvalData, rescale_confidence='', rescale_sigma='global',
min_max_dir: str = None):
prepare = []
if eval_data.confidence_entry == 'probabilities':
prepare.append(AddBackgroundProbabilities())
prepare.append(ToEntropy())
return ComposePreparation(prepare), eval_data.id_
if eval_data.confidence_entry == 'confidence':
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_confidence, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.append(MoveEntry(eval_data.confidence_entry, 'uncertainty'))
return ComposePreparation(prepare), id_
# sigma or log-var
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_sigma, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.append(MoveEntry(eval_data.confidence_entry, 'uncertainty'))
return ComposePreparation(prepare), id_
def _get_rescale_prep_and_idstr(eval_data: evdata.EvalData, rescale_type: str, min_max_dir: str = None):
if rescale_type == 'global':
min_max_path = os.path.join(min_max_dir, dirs.MINMAX_PLACEHOLDER.format(eval_data.id_))
min_, max_ = helper.read_min_max(min_max_path)
return RescaleLinear(eval_data.confidence_entry, min_, max_), '_globalrescale'
elif rescale_type == 'subject':
return RescaleSubjectMinMax(eval_data.confidence_entry), '_rescale'
else:
return None, ''
def get_confidence_entry_preparation(eval_data: evdata.EvalData, to_entry):
if eval_data.confidence_entry == 'probabilities':
return MoveEntry('probabilities', to_entry), eval_data.id_
if eval_data.confidence_entry == 'confidence':
return MoveEntry(eval_data.confidence_entry, to_entry), eval_data.id_
# sigma or log-var
return MoveEntry(eval_data.confidence_entry, to_entry), eval_data.id_
|
1668822
|
from astropy.tests.helper import remote_data
from astropy.table import Table
from astropy.io import fits
from beast.tools.convert_hdf5_to_fits import st_file
from beast.tests.helpers import download_rename, compare_tables
@remote_data
def test_convert_hd5_to_fits():
# Pick some random .hd5 file to convert
data_fname = download_rename("M31-B09-EAST_tinychunk.phot.hdf5")
data_fname_cache = download_rename("M31-B09-EAST_tinychunk.st.fits")
# Convert the file
st_file(data_fname)
# Compare the contents of the new file to the cached version
data = Table(fits.getdata(data_fname.replace("phot.hdf5", "st.fits")))
data_cache = Table(fits.getdata(data_fname_cache))
compare_tables(data_cache, data)
|
1668874
|
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torch.autograd import Variable
from layers.slice_pool_layer.slice_pool_layer import *
from layers.slice_unpool_layer.slice_unpool_layer import *
class RSNet(nn.Module):
def __init__(self, pool_type, num_slice=[None, None, None]):
super(RSNet, self).__init__()
# input: B, 1, N, 3
#-- conv block 1
self.conv_1 = nn.Conv2d( 1, 64, kernel_size=(1,9), stride=(1,1) )
self.bn_1 = nn.BatchNorm2d(64)
self.conv_2 = nn.Conv2d( 64, 64, kernel_size=(1,1), stride=(1,1) )
self.bn_2 = nn.BatchNorm2d(64)
self.conv_3 = nn.Conv2d( 64, 64, kernel_size=(1,1), stride=(1,1) )
self.bn_3 = nn.BatchNorm2d(64)
#-- RNN block
num_slice_x, num_slice_y, num_slice_z = num_slice
self.pool_x = SP(pool_type, num_slice_x)
self.pool_y = SP(pool_type, num_slice_y)
self.pool_z = SP(pool_type, num_slice_z)
self.rnn_type = 'GRU'
self.rnn_hidden_sz_list = [256, 128, 64, 64, 128, 256]
self.rnn_x_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)
self.rnn_x_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)
self.rnn_x_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)
self.rnn_x_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)
self.rnn_x_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)
self.rnn_x_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)
self.rnn_y_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)
self.rnn_y_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)
self.rnn_y_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)
self.rnn_y_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)
self.rnn_y_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)
self.rnn_y_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)
self.rnn_z_1 = nn.GRU(64, self.rnn_hidden_sz_list[0], 1, bidirectional=True)
self.rnn_z_2 = nn.GRU(512, self.rnn_hidden_sz_list[1], 1, bidirectional=True)
self.rnn_z_3 = nn.GRU(256, self.rnn_hidden_sz_list[2], 1, bidirectional=True)
self.rnn_z_4 = nn.GRU(128, self.rnn_hidden_sz_list[3], 1, bidirectional=True)
self.rnn_z_5 = nn.GRU(128, self.rnn_hidden_sz_list[4], 1, bidirectional=True)
self.rnn_z_6 = nn.GRU(256, self.rnn_hidden_sz_list[5], 1, bidirectional=True)
#-- conv block 3
self.un_pool_x = SU()
self.un_pool_y = SU()
self.un_pool_z = SU()
self.conv_6 = nn.Conv2d( 512, 512, kernel_size=(1,1), stride=(1,1) )
self.bn_6 = nn.BatchNorm2d(512)
self.conv_7 = nn.Conv2d( 512, 256, kernel_size=(1,1), stride=(1,1) )
self.bn_7 = nn.BatchNorm2d(256)
self.dp = nn.Dropout(p=0.3)
self.conv_8 = nn.Conv2d( 256, 13, kernel_size=(1,1), stride=(1,1) )
self.relu = nn.ReLU(inplace=True)
self._initialize_weights()
def forward(self, x, x_slice_idx, y_slice_idx, z_slice_idx, hidden_list):
num_batch, _, num_points, _ = x.size()
x_hidden_1, x_hidden_2, x_hidden_3, x_hidden_4, x_hidden_5, x_hidden_6, y_hidden_1, y_hidden_2, y_hidden_3, y_hidden_4, y_hidden_5, y_hidden_6, z_hidden_1, z_hidden_2, z_hidden_3, z_hidden_4, z_hidden_5, z_hidden_6 = hidden_list
#-- conv block 1
conv_1 = self.relu( self.bn_1( self.conv_1(x) ) ) # num_batch, 64, num_points, 1
conv_2 = self.relu( self.bn_2( self.conv_2(conv_1) ) ) # num_batch, 64, num_points, 1
conv_3 = self.relu( self.bn_3( self.conv_3(conv_2) ) ) # num_batch, 64, num_points, 1
#-- RNN block
x_pooled = self.pool_x( conv_3, x_slice_idx ) # num_batch, 64, numSlices, 1
y_pooled = self.pool_y( conv_3, y_slice_idx )
z_pooled = self.pool_z( conv_3, z_slice_idx )
x_pooled = x_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()
y_pooled = y_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()
z_pooled = z_pooled[:,:,:,0].permute( 2, 0, 1 ).contiguous()
x_rnn_1, _ = self.rnn_x_1( x_pooled, x_hidden_1 )
x_rnn_2, _ = self.rnn_x_2( x_rnn_1, x_hidden_2 )
x_rnn_3, _ = self.rnn_x_3( x_rnn_2, x_hidden_3 )
x_rnn_4, _ = self.rnn_x_4( x_rnn_3, x_hidden_4 )
x_rnn_5, _ = self.rnn_x_5( x_rnn_4, x_hidden_5 )
x_rnn_6, _ = self.rnn_x_6( x_rnn_5, x_hidden_6 )
y_rnn_1, _ = self.rnn_y_1( y_pooled, y_hidden_1 )
y_rnn_2, _ = self.rnn_y_2( y_rnn_1, y_hidden_2 )
y_rnn_3, _ = self.rnn_y_3( y_rnn_2, y_hidden_3 )
y_rnn_4, _ = self.rnn_y_4( y_rnn_3, y_hidden_4 )
y_rnn_5, _ = self.rnn_y_5( y_rnn_4, y_hidden_5 )
y_rnn_6, _ = self.rnn_y_6( y_rnn_5, y_hidden_6 )
z_rnn_1, _ = self.rnn_z_1( z_pooled, z_hidden_1 )
z_rnn_2, _ = self.rnn_z_2( z_rnn_1, z_hidden_2 )
z_rnn_3, _ = self.rnn_z_3( z_rnn_2, z_hidden_3 )
z_rnn_4, _ = self.rnn_z_4( z_rnn_3, z_hidden_4 )
z_rnn_5, _ = self.rnn_z_5( z_rnn_4, z_hidden_5 )
z_rnn_6, _ = self.rnn_z_6( z_rnn_5, z_hidden_6 )
#-- uppooling
x_rnn_6 = x_rnn_6.permute( 1, 2, 0 ).contiguous()
x_rnn_6 = x_rnn_6.view( x_rnn_6.size(0), x_rnn_6.size(1), x_rnn_6.size(2), 1 )
y_rnn_6 = y_rnn_6.permute( 1, 2, 0 ).contiguous()
y_rnn_6 = y_rnn_6.view( y_rnn_6.size(0), y_rnn_6.size(1), y_rnn_6.size(2), 1 )
z_rnn_6 = z_rnn_6.permute( 1, 2, 0 ).contiguous()
z_rnn_6 = z_rnn_6.view( z_rnn_6.size(0), z_rnn_6.size(1), z_rnn_6.size(2), 1 )
x_rnn_6 = self.un_pool_x( x_rnn_6, x_slice_idx )
y_rnn_6 = self.un_pool_y( y_rnn_6, y_slice_idx )
z_rnn_6 = self.un_pool_z( z_rnn_6, z_slice_idx )
#-- conv block 3
rnn = x_rnn_6 + y_rnn_6 + z_rnn_6
conv_6 = self.relu( self.bn_6( self.conv_6(rnn) ) ) # num_batch, 512, num_points, 1
conv_7 = self.relu( self.bn_7( self.conv_7(conv_6) ) ) # num_batch, 256, num_points, 1
droped = self.dp(conv_7)
conv_8 = self.conv_8(droped)
return conv_8
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def init_hidden(self, bsz = 1):
weight = next(self.parameters()).data
hidden_list = [ ]
for i in range(3):
for hid_sz in self.rnn_hidden_sz_list:
if self.rnn_type == 'LSTM':
hidden_list.append( (Variable(weight.new(2, bsz, hid_sz).zero_()),
Variable(weight.new(2, bsz, hid_sz).zero_())) )
else:
hidden_list.append( Variable(weight.new(2, bsz, hid_sz).zero_()) )
return hidden_list
|
1668884
|
from filename_database.models import ExperimentType, ChargerDriveProfile, Category, SubCategory, ValidMetadata
import re
import datetime
import itertools
def guess_exp_type(file, root):
"""
This function takes a file as input and guesses what experiment type it is.
:param file:
:param root:
:return: the experiment type
"""
lowercase_file = file.lower()
fileList = re.split(r'-|_|\.|\s', lowercase_file)
#We handle cycling, formation and fra, maccor is only exception
cat_match = {
'cycling': r'(^cyc$)|(^cycling$)',
'formation': r'(^form$)|(^fm$)',
'impedance': r'^fra$',
'rpt': r'^rpt$',
}
cat = None
broken = False
for k in cat_match.keys():
if broken:
break
for elem in fileList:
if re.match(cat_match[k], elem):
cat = Category.objects.get(name=k)
broken = True
break
if cat is not None:
# try to match subcategory
sub_match = {
'neware':r'(^neware$)|(^nw$)',
'moli':r'^mo$',
'uhpc':r'^uhpc$',
'novonix':r'(^novonix$)|(^nx$)',
}
sub = None
broken = False
for k in sub_match.keys():
if broken:
break
for elem in fileList[1:]:
if re.match(sub_match[k], elem):
sub = SubCategory.objects.get(name=k)
broken = True
break
if sub is None:
if 'NEWARE' in root:
sub = SubCategory.objects.get(name='neware')
else:
sub = SubCategory.objects.get(name='maccor')
exp_type = ExperimentType.objects.get(category=cat, subcategory=sub)
#TODO: make a table in the experiment type to be the valid regexp for file extension.
if sub.name=='neware':
if lowercase_file.split('.')[-1] != 'txt':
return None
return exp_type
#handle the rest
match = [
('gas', 'insitu', r'(^insitugas$)|(^insitu$)|(^gasinsitu$)'),
('impedance', 'eis', r'^eis$'),
('impedance', 'symmetric', r'(^sym$)|(^symmetric$)'),
('thermal', 'arc', r'^arc$'),
('thermal', 'microcalorimetry', r'^tam$'),
('storage', 'smart', r'smart'),
('storage', 'dumb', r'dumb'),
('electrolyte', 'gcms', r'^gcms$'),
('electrolyte', 'ldta', r'^ldta$'),
('electrode', 'xps', r'^xps$'),
]
for c, s, p in match:
for elem in fileList:
if re.search(p, elem):
cat = Category.objects.get(name=c)
sub = SubCategory.objects.get(name=s)
if cat.name == 'impedance' and sub.name == 'eis':
if 'MACCOR' in root:
sub = SubCategory.objects.get(name='maccor')
exp_type = ExperimentType.objects.get(category=cat, subcategory=sub)
return exp_type
return None
##============================================================================================##
# META-DATA EXTRACTOR FUNCTION #
##============================================================================================##
def get_date_obj(date_str):
"""
parse date string
:param date_str:
:return:
"""
mat1 = re.match(r'20(\d{2,2})(\d{2,2})(\d{2,2})', date_str)
mat2 = re.match(r'(\d{2,2})(\d{2,2})(\d{2,2})', date_str)
if mat1:
mat = mat1
elif mat2:
mat = mat2
else:
return None
year = 2000 + int(mat.group(1))
month = int(mat.group(2))
day = int(mat.group(3))
try :
return datetime.date(year,month,day)
except ValueError:
return None
# Function Definition
# Takes in name of file and experiment type as arguments
def deterministic_parser(filename, exp_type):
"""
given a filename and an experiment type,
parse as much metadata as possible
and return a valid_metadata object (None means no parsing, valid metadata with gaps in in means partial parsing.)
:param filename:
:param exp_type:
:return:
"""
lowercase_file = filename.lower()
fileList = re.split(r'-|_|\.|\s', lowercase_file)
def get_charID(fileList):
max_look = min(3, len(fileList)-1)
for elem in fileList[:max_look]:
if re.match(r'^[a-z]{2,5}$', elem) and not (
re.search(
r'(cyc)|(gcms)|(rpt)|(eis)|(fra)|(sym)|(arc)|(tam)|(xps)|(fm)|(mo)|(nw)|(nx)',
elem)):
return elem
return None
def get_possible_cell_ids(fileList):
possible_cell_ids = []
max_look = min(5, len(fileList) - 1)
for elem in fileList[:max_look]:
if (not re.match(r'200[8-9]0[1-9][0-3][0-9]$|'
r'200[8-9]1[0-2][0-3][0-9]$|'
r'20[1-2][0-9]0[1-9][0-2][0-9]$|'
r'20[1-2][0-9]1[0-1][0-2][0-9]$|'
r'20[1-2][0-9]0[1-9][0-3][0-1]$|'
r'20[1-2][0-9]1[0-1][0-3][0-1]$|'
r'0[8-9]0[1-9][0-3][0-9]$|'
r'0[8-9]1[0-2][0-3][0-9]$|'
r'[1-2][0-9]0[1-9][0-2][0-9]$|'
r'[1-2][0-9]1[0-2][0-2][0-9]$|'
r'[1-2][0-9]0[1-9][0-3][0-1]$|'
r'[1-2][0-9]1[0-2][0-3][0-1]$',
elem)) and (re.match(r'^(\d{5,6})$|^(0\d{5,5})$', elem)) and elem.isdigit():
possible_cell_ids.append( int(elem))
return possible_cell_ids
def get_start_cycle(fileList, avoid=None):
max_look = min(7, len(fileList) - 1)
for elem in fileList[: max_look]:
match = re.match(r'^c(\d{1,4})$', elem)
if match:
if avoid is not None and avoid == int(match.group(1)):
avoid = None
continue
return int(match.group(1))
return None
def get_temperature(fileList):
for elem in fileList:
match = re.match(r'^(\d{2})c$', elem)
if match:
return int(match.group(1))
return None
def get_voltage(fileList):
for elem in fileList:
match = re.match(r'^(\d{1,3})v$', elem)
if match:
str_voltage = match.group(1)
n = len(str_voltage)
divider = 10.**(float(n)-1)
return float(str_voltage)/divider
return None
def get_possible_dates(fileList):
possible_dates = []
for elem in fileList:
if re.match(r'^[0-9]{6,8}$', elem):
date = get_date_obj(elem)
if date is not None:
possible_dates.append(date)
return possible_dates
def get_version_number(fileList):
for field in fileList:
match = re.match(r'v(\d)', field)
if match:
return int(match.group(1))
def get_ac_increment(fileList):
for i in range(len(fileList) - 1):
match1 = re.match(r'^sym$', fileList[i])
matchA = re.match(r'^a(\d{1,3})$', fileList[i + 1])
matchC = re.match(r'^c(\d{1,3})$', fileList[i + 1])
if match1 and matchA:
return ValidMetadata.ANODE, int(matchA.group(1))
elif match1 and matchC:
return ValidMetadata.CATHODE, int(matchC.group(1))
return None, None
def get_ac(fileList):
for i in range(len(fileList) - 1):
match1 = re.match(r'^xps$', fileList[i])
matchA = re.match(r'^a$', fileList[i + 1])
matchC = re.match(r'^c$', fileList[i + 1])
if match1 and matchA:
return ValidMetadata.ANODE
elif match1 and matchC:
return ValidMetadata.CATHODE
return None
drive_profile_match_dict = {
'cxcy': (r'^c(\d{1,2})c(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), True, True),
'xcyc': (r'^(\d{1,2})c(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), False, False),
'xccy': (r'^(\d{1,2})cc(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), False, True),
'cxcyc': (r'^c(\d{1,2})c(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), True, True),
'xcycc': (r'^(\d{1,2})c(\d{1,2})cc$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), False, False),
'xccyc': (r'^(\d{1,2})cc(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), False, True),
'cxrc': (r'^c(\d{1,2})rc$', ChargerDriveProfile.objects.get(drive_profile='CXrc'), True),
'xcrc': (r'^(\d{1,2})crc$', ChargerDriveProfile.objects.get(drive_profile='CXrc'), False),
'cxcyb': (r'^c(\d{1,2})c(\d{1,2})b$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), True, True),
'xcycb': (r'^(\d{1,2})c(\d{1,2})cb$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), False, False),
'xccyb': (r'^(\d{1,2})cc(\d{1,2})b$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), False, True),
'cxsz': (r'^c(\d{1,2})s(\d{2,3})$', ChargerDriveProfile.objects.get(drive_profile='CXsZZZ'), True),
'xcsz': (r'^(\d{1,2})cs(\d{2,3})$', ChargerDriveProfile.objects.get(drive_profile='CXsZZZ'), False),
'cx': (r'^c(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CX'), True),
'xc': (r'^(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CX'), False),
}
def get_possible_drive_profiles(fileList):
possible_drive_profiles = []
if len(fileList) < 4:
return possible_drive_profiles
for elem in fileList[3:]:
if re.match(r'(^0c$)|(^20c$)|(^40c$)|(^55c$)|(^c0$)|(^c1$)', elem):
continue
for k in drive_profile_match_dict.keys():
m = re.match(drive_profile_match_dict[k][0], elem)
if m:
#special cases
my_dp = {'drive_profile': drive_profile_match_dict[k][1]}
if drive_profile_match_dict[k][2]:
my_dp['drive_profile_x_numerator'] = 1
my_dp['drive_profile_x_denominator'] = int(m.group(1))
else:
my_dp['drive_profile_x_numerator'] = int(m.group(1))
my_dp['drive_profile_x_denominator'] = 1
if ((drive_profile_match_dict[k][1].drive_profile=='CXCY') and
(drive_profile_match_dict[k][2] == drive_profile_match_dict[k][3]) and
(m.group(1) == m.group(2))):
# CXCX
my_dp['drive_profile'] = ChargerDriveProfile.objects.get(drive_profile='CXCX')
elif drive_profile_match_dict[k][1].drive_profile=='CXsZZZ':
# CXsZZZ
n = len(m.group(2))
my_dp['drive_profile_z'] = float(m.group(2))/(10.**(float(n)-1))
else:
if len(drive_profile_match_dict[k]) == 4:
if drive_profile_match_dict[k][3]:
my_dp['drive_profile_y_numerator'] = 1
my_dp['drive_profile_y_denominator'] = int(m.group(1))
else:
my_dp['drive_profile_y_numerator'] = int(m.group(1))
my_dp['drive_profile_y_denominator'] = 1
possible_drive_profiles.append(my_dp)
break
return possible_drive_profiles
# TODO: once you have a date, you must prevent cell_id from being that date.
# TODO: for now, if multiple alternatives show up, take first one and print.
metadata = ValidMetadata(experiment_type=exp_type)
valid = True
charID = get_charID(fileList)
if charID is None:
valid = False
else:
metadata.charID = charID
dates = get_possible_dates(fileList)
if len(dates) == 0:
valid = False
elif len(dates) > 1:
metadata.date = dates[0]
else:
metadata.date = dates[0]
if exp_type.cell_id_active:
cell_ids = get_possible_cell_ids(fileList)
if len(cell_ids) == 0:
valid = False
else:
if metadata.date is None:
if len(cell_ids) > 1:
valid = False
else:
metadata.cell_id = cell_ids[0]
else:
valid_cell_ids = []
for cell_id in cell_ids:
date_pieces = [metadata.date.year % 100, metadata.date.month, metadata.date.day]
all_perms = list(itertools.permutations(date_pieces))
cell_id_ok = True
for p in all_perms:
if cell_id == p[0] + p[1]*100 + p[2]*10000:
cell_id_ok = False
break
if cell_id_ok:
valid_cell_ids.append(cell_id)
if len(valid_cell_ids) > 1 or len(valid_cell_ids) == 0:
valid = False
else:
metadata.cell_id = valid_cell_ids[0]
if exp_type.AC_active and exp_type.AC_increment_active:
ac, increment = get_ac_increment(fileList)
if ac is None:
valid = False
else:
metadata.AC = ac
metadata.AC_increment = increment
if exp_type.AC_active and not exp_type.AC_increment_active:
ac = get_ac(fileList)
if ac is None:
valid = False
else:
metadata.AC = ac
if exp_type.start_cycle_active:
avoid = None
if metadata.AC is not None and metadata.AC == ValidMetadata.CATHODE and metadata.AC_increment is not None:
avoid = metadata.AC_increment
start_cycle = get_start_cycle(fileList, avoid)
if start_cycle is None:
valid = False
else:
metadata.start_cycle = start_cycle
if exp_type.voltage_active:
voltage = get_voltage(fileList)
if voltage is None:
valid = False
else:
metadata.voltage = voltage
if exp_type.temperature_active:
temperature = get_temperature(fileList)
if temperature is None:
valid = False
else:
metadata.temperature = temperature
if exp_type.drive_profile_active:
drive_profiles = get_possible_drive_profiles(fileList)
if not len(drive_profiles) == 0:
if not exp_type.start_cycle_active or metadata.start_cycle is None:
dp = drive_profiles[0]
for key in dp.keys():
setattr(metadata, key, dp[key])
else:
for dp in drive_profiles:
if dp['drive_profile'].test == 'CX' and dp['drive_profile_x_denominator'] == metadata.start_cycle:
continue
dp = drive_profiles[0]
for key in dp.keys():
setattr(metadata, key, dp[key])
break
if exp_type.version_number_active:
version_number = get_version_number(fileList)
if version_number is None:
valid = False
else:
metadata.version_number = version_number
print("\t\tEXTRACTED METADATA: {}".format(metadata))
return metadata, valid
|
1668887
|
from rest_framework import exceptions
from pyplan.pyplan.common.baseService import BaseService
from .models import DashboardStyle
class DashboardStyleService(BaseService):
def getByStyleType(self, style_type):
"""Get By Style Type"""
if style_type and isinstance(style_type, int):
return DashboardStyle.objects.filter(owner_id=self.client_session.userCompanyId, style_type=int(style_type))
return DashboardStyle.objects.filter(owner_id=self.client_session.userCompanyId)
def getById(self, id):
"""Get By Style Type"""
return DashboardStyle.objects.filter(
owner_id=self.client_session.userCompanyId, pk=id)
|
1668896
|
import torch.nn.functional as F
from torch import nn
from networks.layers.basic import DropPath, GroupNorm1D, GNActDWConv2d, seq_to_2d
from networks.layers.attention import MultiheadAttention, MultiheadLocalAttentionV2, MultiheadLocalAttentionV3
def _get_norm(indim, type='ln', groups=8):
if type == 'gn':
return GroupNorm1D(indim, groups)
else:
return nn.LayerNorm(indim)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(
F"activation should be relu/gele/glu, not {activation}.")
class LongShortTermTransformer(nn.Module):
def __init__(self,
num_layers=2,
d_model=256,
self_nhead=8,
att_nhead=8,
dim_feedforward=1024,
emb_dropout=0.,
droppath=0.1,
lt_dropout=0.,
st_dropout=0.,
droppath_lst=False,
droppath_scaling=False,
activation="gelu",
return_intermediate=False,
intermediate_norm=True,
final_norm=True):
super().__init__()
self.intermediate_norm = intermediate_norm
self.final_norm = final_norm
self.num_layers = num_layers
self.return_intermediate = return_intermediate
self.emb_dropout = nn.Dropout(emb_dropout, True)
layers = []
for idx in range(num_layers):
if droppath_scaling:
if num_layers == 1:
droppath_rate = 0
else:
droppath_rate = droppath * idx / (num_layers - 1)
else:
droppath_rate = droppath
layers.append(
LongShortTermTransformerBlock(d_model, self_nhead, att_nhead,
dim_feedforward, droppath_rate,
lt_dropout, st_dropout,
droppath_lst, activation))
self.layers = nn.ModuleList(layers)
num_norms = num_layers - 1 if intermediate_norm else 0
if final_norm:
num_norms += 1
self.decoder_norms = [
_get_norm(d_model, type='ln') for _ in range(num_norms)
] if num_norms > 0 else None
if self.decoder_norms is not None:
self.decoder_norms = nn.ModuleList(self.decoder_norms)
def forward(self,
tgt,
long_term_memories,
short_term_memories,
curr_id_emb=None,
self_pos=None,
size_2d=None):
output = self.emb_dropout(tgt)
intermediate = []
intermediate_memories = []
for idx, layer in enumerate(self.layers):
output, memories = layer(output,
long_term_memories[idx] if
long_term_memories is not None else None,
short_term_memories[idx] if
short_term_memories is not None else None,
curr_id_emb=curr_id_emb,
self_pos=self_pos,
size_2d=size_2d)
if self.return_intermediate:
intermediate.append(output)
intermediate_memories.append(memories)
if self.decoder_norms is not None:
if self.final_norm:
output = self.decoder_norms[-1](output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.intermediate_norm:
for idx in range(len(intermediate) - 1):
intermediate[idx] = self.decoder_norms[idx](
intermediate[idx])
if self.return_intermediate:
return intermediate, intermediate_memories
return output, memories
class LongShortTermTransformerBlock(nn.Module):
def __init__(self,
d_model,
self_nhead,
att_nhead,
dim_feedforward=1024,
droppath=0.1,
lt_dropout=0.,
st_dropout=0.,
droppath_lst=False,
activation="gelu",
local_dilation=1,
enable_corr=True):
super().__init__()
# Self-attention
self.norm1 = _get_norm(d_model)
self.self_attn = MultiheadAttention(d_model, self_nhead)
# Long Short-Term Attention
self.norm2 = _get_norm(d_model)
self.linear_Q = nn.Linear(d_model, d_model)
self.linear_V = nn.Linear(d_model, d_model)
self.long_term_attn = MultiheadAttention(d_model,
att_nhead,
use_linear=False,
dropout=lt_dropout)
if enable_corr:
try:
import spatial_correlation_sampler
MultiheadLocalAttention = MultiheadLocalAttentionV2
except Exception as inst:
print(inst)
print(
"Failed to import PyTorch Correlation. For better efficiency, please install it."
)
MultiheadLocalAttention = MultiheadLocalAttentionV3
else:
MultiheadLocalAttention = MultiheadLocalAttentionV3
self.short_term_attn = MultiheadLocalAttention(d_model,
att_nhead,
dilation=local_dilation,
use_linear=False,
dropout=st_dropout)
self.droppath_lst = droppath_lst
# Feed-forward
self.norm3 = _get_norm(d_model)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = GNActDWConv2d(dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.droppath = DropPath(droppath, batch_dim=1)
self._init_weight()
def with_pos_embed(self, tensor, pos=None):
size = tensor.size()
if len(size) == 4 and pos is not None:
n, c, h, w = size
pos = pos.view(h, w, n, c).permute(2, 3, 0, 1)
return tensor if pos is None else tensor + pos
def forward(self,
tgt,
long_term_memory=None,
short_term_memory=None,
curr_id_emb=None,
self_pos=None,
size_2d=(30, 30)):
# Self-attention
_tgt = self.norm1(tgt)
q = k = self.with_pos_embed(_tgt, self_pos)
v = _tgt
tgt2 = self.self_attn(q, k, v)[0]
tgt = tgt + self.droppath(tgt2)
# Long Short-Term Attention
_tgt = self.norm2(tgt)
curr_Q = self.linear_Q(_tgt)
curr_K = curr_Q
curr_V = _tgt
local_Q = seq_to_2d(curr_Q, size_2d)
if curr_id_emb is not None:
global_K = curr_K
global_V = self.linear_V(curr_V + curr_id_emb)
local_K = seq_to_2d(global_K, size_2d)
local_V = seq_to_2d(global_V, size_2d)
else:
global_K, global_V = long_term_memory
local_K, local_V = short_term_memory
tgt2 = self.long_term_attn(curr_Q, global_K, global_V)[0]
tgt3 = self.short_term_attn(local_Q, local_K, local_V)[0]
if self.droppath_lst:
tgt = tgt + self.droppath(tgt2 + tgt3)
else:
tgt = tgt + tgt2 + tgt3
# Feed-forward
_tgt = self.norm3(tgt)
tgt2 = self.linear2(self.activation(self.linear1(_tgt), size_2d))
tgt = tgt + self.droppath(tgt2)
return tgt, [[curr_K, curr_V], [global_K, global_V],
[local_K, local_V]]
def _init_weight(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
|
1668911
|
from flasgger import swag_from
from flask import current_app, request, jsonify
from flask_jwt_extended import jwt_refresh_token_required
from app.doc.account.auth import AUTH_POST, REFRESH_POST
from app.model import StudentModel, TokenModel
from app.util.json_schema import json_type_validate, AUTH_POST_JSON
from app.view.base_resource import AccountResource
class Auth(AccountResource):
@json_type_validate(AUTH_POST_JSON)
@swag_from(AUTH_POST)
def post(self):
student = StudentModel.login(request.json['id'], request.json['password'])
user_agent = request.headers.get('user-agent')
token = TokenModel.create_new_token(student.id, user_agent)
return jsonify(token)
class Refresh(AccountResource):
@swag_from(REFRESH_POST)
@jwt_refresh_token_required
def post(self):
user_agent = request.headers.get('user-agent')
authorization = request.headers.get(current_app.config['JWT_HEADER_NAME'])
refresh_token = authorization[7:]
token = TokenModel.create_refresh_token(refresh_token, user_agent)
return jsonify(token)
|
1668912
|
import unittest
import gzip
import logging
import json
import os
import random
from multiprocessing.pool import ThreadPool
from SolrClient import SolrClient, IndexQ
from SolrClient.exceptions import *
from .test_config import test_config
from .RandomTestData import RandomTestData
import shutil
from functools import partial
from datetime import datetime as dt
from time import sleep
test_config['indexqbase'] = os.getcwd()
logging.disable(logging.CRITICAL)
class TestIndexQ(unittest.TestCase):
@classmethod
def setUpClass(self):
self.rand_docs = RandomTestData()
self.docs = self.rand_docs.get_docs(50)
self.devel = False
if self.devel:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] (%(process)d) (%(threadName)-10s) [%(name)s] %(message)s')
try:
shutil.rmtree(test_config['indexqbase'] + os.sep + 'testq')
except:
pass
@classmethod
def tearDownClass(self):
try:
shutil.rmtree(test_config['indexqbase'] + os.sep + 'testq')
except:
pass
def setUp(self):
index = IndexQ(test_config['indexqbase'], 'testq')
for dir in ['_todo_dir', '_done_dir']:
[os.remove(x) for x in index.get_all_as_list(dir=dir)]
def check_file_contents(self, file_path, real_data):
if os.path.isfile(file_path):
if file_path.endswith('.gz'):
f = gzip.open(file_path, 'rt', encoding='utf-8')
f_data = json.load(f)
else:
f = open(file_path)
f_data = json.load(f)
f.close()
[self.assertEqual(f_data[x], real_data[x]) for x in range(len(real_data))]
def test_add_bad_list(self):
index = IndexQ(test_config['indexqbase'], 'testq')
with self.assertRaises(ValueError):
index.add([{}, {}, [], {}])
def test_add_string(self):
index = IndexQ(test_config['indexqbase'], 'testq')
string_test = 'asd'
doc = index.add(string_test)
with open(doc) as f:
doc_data = f.read()
self.assertEqual(string_test, doc_data)
def test_add_int(self):
index = IndexQ(test_config['indexqbase'], 'testq')
with self.assertRaises(ValueError):
index.add(1)
def test_add_good_dict_zero_size(self):
index = IndexQ(test_config['indexqbase'], 'testq')
doc = index.add(self.docs[0])
# Sending docs as list because that is how json is stored
self.check_file_contents(doc, [self.docs[0]])
def test_add_good_list_zero_size(self):
index = IndexQ(test_config['indexqbase'], 'testq')
doc = index.add(self.docs[0:20])
self.check_file_contents(doc, self.docs[0:20])
def test_add_good_list_zero_size_compressed(self):
index = IndexQ(test_config['indexqbase'], 'testq', compress=True)
doc = index.add(self.docs[0:20])
self.check_file_contents(doc, self.docs[0:20])
def test_add_good_dict_zero_size(self):
index = IndexQ(test_config['indexqbase'], 'testq', compress=True)
doc = index.add(self.docs[0])
# Sending docs as list because that is how json is stored
self.check_file_contents(doc, [self.docs[0]])
def test_buffer_list_1m(self):
size = 1
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
doc = index.add(self.docs)
[buff.append(x) for x in self.docs]
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_dict_1m(self):
size = 1
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
item = random.choice(self.docs)
doc = index.add(item)
buff.append(item)
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_dict_25m(self):
size = 25
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
item = random.choice(self.docs)
doc = index.add(item)
buff.append(item)
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_list_25m(self):
size = 25
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
doc = index.add(self.docs)
[buff.append(x) for x in self.docs]
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_dict_75m(self):
size = 75
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
item = random.choice(self.docs)
doc = index.add(item)
buff.append(item)
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_list_75m(self):
size = 75
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
doc = index.add(self.docs)
[buff.append(x) for x in self.docs]
if type(doc) is str:
break
self.check_file_contents(doc, buff)
self.assertLessEqual(os.path.getsize(doc), size * 1000000)
self.assertGreaterEqual(os.path.getsize(doc), size * 1000000 * .90)
os.remove(doc)
def test_buffer_list_75m_dump_early(self):
size = 75
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
while True:
doc = index.add(self.docs)
[buff.append(x) for x in self.docs]
if doc > 40000000:
doc = index.add(finalize=True)
if type(doc) is str:
break
self.check_file_contents(doc, buff)
os.remove(doc)
def test_by_get_all_compressed(self):
size = 1
files = 2
index = IndexQ(test_config['indexqbase'], 'testq', size=size, compress=True)
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq')
indexdocs = index.get_all_as_list()
self.assertEqual(docs, indexdocs)
[os.remove(doc) for doc in docs]
def test_by_get_all_no_compressed(self):
size = 1
files = 2
index = IndexQ(test_config['indexqbase'], 'testq', size=size, compress=False)
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq', mode='out')
indexdocs = index.get_all_as_list()
self.assertEqual(docs, indexdocs)
[os.remove(doc) for doc in docs]
def test_by_get_all_default_compression(self):
size = 1
files = 2
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq', mode='out')
indexdocs = index.get_all_as_list()
self.assertEqual(docs, indexdocs)
[os.remove(doc) for doc in docs]
def test_dequeue(self):
size = 1
files = 2
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq')
indexdocs = []
for x in index.get_todo_items():
indexdocs.append(x)
self.assertEqual(docs, indexdocs)
[os.remove(doc) for doc in docs]
def test_dequeue_100(self):
size = 1
files = 100
rdocs = self.rand_docs.get_docs(500)
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
docs = []
for dir in ['_todo_dir', '_done_dir']:
[os.remove(x) for x in index.get_all_as_list(dir=dir)]
for _ in range(files):
doc = index.add(rdocs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq')
indexdocs = []
for x in index.get_todo_items():
indexdocs.append(x)
self.assertEqual(docs, indexdocs)
[os.remove(doc) for doc in docs]
def test_dequeue_and_complete_no_compression_5(self):
size = 1
files = 5
index = IndexQ(test_config['indexqbase'], 'testq', size=size)
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq', compress=False)
indexdocs = []
for x in index.get_todo_items():
indexdocs.append(x)
index.complete(x)
self.assertEqual(docs, indexdocs)
finaldocnames = [os.path.split(x)[-1] for x in indexdocs]
donefilepaths = [os.path.join(index._done_dir, x) for x in finaldocnames]
for x in donefilepaths:
self.assertTrue(os.path.exists(x))
[os.remove(doc) for doc in donefilepaths]
def test_locking(self):
'''
Working on this one, it doesn't lock properly
'''
files = 5
index = IndexQ(test_config['indexqbase'], 'testq')
buff = []
docs = []
for _ in range(files):
doc = index.add(self.docs, finalize=True)
docs.append(doc)
sleep(1)
index = IndexQ(test_config['indexqbase'], 'testq', mode='out', devel=True)
x = index.get_todo_items()
self.assertTrue(os.path.exists(index._lck))
with self.assertRaises(RuntimeError) as a:
new_index = IndexQ(test_config['indexqbase'], 'testq', mode='out')
y = new_index.get_todo_items()
[index.complete(i) for i in x]
self.assertFalse(os.path.exists(index._lck))
def test_index(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
solr.delete_doc_by_id(test_config['SOLR_COLLECTION'], '*')
buff = []
files = []
for doc in self.docs:
files.append(index.add(doc, finalize=True))
index.index(solr, test_config['SOLR_COLLECTION'])
solr.commit(test_config['SOLR_COLLECTION'], openSearcher=True)
for doc in self.docs:
res = solr.query(test_config['SOLR_COLLECTION'], {'q': 'id:{}'.format(doc['id'])})
self.assertTrue(res.get_results_count() == 1)
solr.delete_doc_by_id(test_config['SOLR_COLLECTION'], '*')
def test_index_multiproc(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
solr.delete_doc_by_id(test_config['SOLR_COLLECTION'], '*')
buff = []
files = []
for doc in self.docs:
files.append(index.add(doc, finalize=True))
index.index(solr, test_config['SOLR_COLLECTION'], threads=10)
solr.commit(test_config['SOLR_COLLECTION'], openSearcher=True)
for doc in self.docs:
res = solr.query(test_config['SOLR_COLLECTION'],
{'q': 'id:{}'.format(doc['id'])})
self.assertTrue(res.get_results_count() == 1)
def test_index_bad_send_method(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True, auth=test_config['SOLR_CREDENTIALS'])
with self.assertRaises(AttributeError):
index.index(solr,
test_config['SOLR_COLLECTION'],
send_method='Doesnt exist')
def test_index_bad_data(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
if index._is_locked():
index._unlock()
self.assertEqual(index.get_all_as_list(), [])
solr.delete_doc_by_id(test_config['SOLR_COLLECTION'], '*')
todo_file = index.add({'date': 'asd'}, finalize=True)
self.assertEqual(index.get_all_as_list()[0], todo_file)
with self.assertRaises(SolrError):
index.index(solr, test_config['SOLR_COLLECTION'])
self.assertEqual(index.get_all_as_list()[0], todo_file)
self.assertFalse(index._is_locked())
def test_index_dynamic_collections_basic_1(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
if index._is_locked():
index._unlock()
self.assertEqual(index.get_all_as_list(), [])
# Set up mock for indexing
temp = {}
def mock(temp, coll, docs):
temp[coll] = docs
return True
todo_file = index.add([{'type': '1', 'data': '1'},
{'type': '1', 'data': '2'},
{'type': '1', 'data': '3'},
{'type': '2', 'data': '4'},
{'type': '3', 'data': '5'},
], finalize=True)
runner_wrap = index._wrap_dynamic(partial(mock, temp),
lambda x: x['type'],
todo_file)
self.assertTrue(runner_wrap)
self.assertEqual(json.loads(temp['3']), [{"data": "5", "type": "3"}])
self.assertEqual(json.loads(temp['2']), [{'type': '2', 'data': '4'}])
self.assertEqual(sorted(json.loads(temp['1']), key=lambda x: x['data']),
sorted([{'type': '1', 'data': '1'},
{'type': '1', 'data': '2'},
{'type': '1', 'data': '3'}],
key=lambda x: x['data']))
self.assertFalse(index.get_all_as_list()) # Make sure item is completed
def test_index_dynamic_collections_func_basic_error_1(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
if index._is_locked():
index._unlock()
self.assertEqual(index.get_all_as_list(), [])
# Set up mock for indexing
temp = {}
def mock(temp, coll, docs):
temp[coll] = docs
todo_file = index.add([{'type': '1', 'data': '1'},
{'type': '1', 'data': '2'},
{'type': '1', 'data': '3'},
{'type': '2', 'data': '4'},
{'type': '3', 'data': '5'},
], finalize=True)
with self.assertRaises(KeyError):
index._wrap_dynamic(partial(mock, temp),
lambda x: x['asdasdasd'],
todo_file)
def test_index_dynamic_collections_indexing_error(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
if index._is_locked():
index._unlock()
self.assertEqual(index.get_all_as_list(), [])
# Set up mock for indexing
temp = {}
def mock(temp, coll, docs):
raise KeyError()
todo_file = index.add([{'type': '1', 'data': '1'},
{'type': '1', 'data': '2'},
{'type': '1', 'data': '3'},
{'type': '2', 'data': '4'},
{'type': '3', 'data': '5'},
], finalize=True)
runner_wrap = index._wrap_dynamic(partial(mock, temp),
lambda x: x['type'],
todo_file)
self.assertFalse(runner_wrap)
def test_index_dynamic_collections_indexing_error_partial(self):
index = IndexQ(test_config['indexqbase'], 'testq')
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
if index._is_locked():
index._unlock()
self.assertEqual(index.get_all_as_list(), [])
# Set up mock for indexing
temp = {}
def mock(temp, coll, docs):
if json.loads(docs)[0]['type'] == '1':
raise KeyError()
else:
temp[coll] = docs
return True
todo_file = index.add([{'type': '1', 'data': '1'},
{'type': '1', 'data': '2'},
{'type': '1', 'data': '3'},
{'type': '2', 'data': '4'},
{'type': '3', 'data': '5'},
], finalize=True)
runner_wrap = index._wrap_dynamic(partial(mock, temp),
lambda x: x['type'],
todo_file)
self.assertFalse(runner_wrap)
def test_thread_pool_low(self):
'''
Index data using multiple threads.
Verity that each thread
'''
docs = self.rand_docs.get_docs(5)
threads = 5
index = IndexQ(test_config['indexqbase'], 'testq', size=1)
with ThreadPool(threads) as p:
p.map(index.add, docs)
self.check_file_contents(index.add(finalize=True), docs)
def test_thread_pool_mid(self):
'''
Index data using multiple threads.
Verity that each thread
'''
docs = self.rand_docs.get_docs(5000)
threads = 5
index = IndexQ(test_config['indexqbase'], 'testq', size=1)
with ThreadPool(threads) as p:
p.map(index.add, docs)
index.add(finalize=True)
d = index.get_all_json_from_indexq()
self.assertEqual(sorted(d, key=lambda x: x['id']), sorted(docs, key=lambda x: x['id']))
def test_thread_pool_high(self):
'''
Index data using multiple threads.
Verity that each thread
'''
docs = self.rand_docs.get_docs(25000)
index = IndexQ(test_config['indexqbase'],
'testq',
size=.1,
devel=True)
for dir in ['_todo_dir', '_done_dir']:
[os.remove(x) for x in index.get_all_as_list(dir=dir)]
threads = 25
with ThreadPool(threads) as p:
p.map(index.add, docs)
index.add(finalize=True)
d = index.get_all_json_from_indexq()
self.assertEqual(len(d), len(docs))
self.assertEqual(sorted(d, key=lambda x: x['id']),
sorted(docs, key=lambda x: x['id']))
def test_add_callback_no_size(self):
docs = self.rand_docs.get_docs(5)
index = IndexQ(test_config['indexqbase'], 'testq')
temp = []
def cb(path):
temp.append(path)
t = index.add(docs[0], callback=cb)
self.assertTrue(t in temp)
def test_add_callback_with_size(self):
docs = self.rand_docs.get_docs(5)
index = IndexQ(test_config['indexqbase'], 'testq', size=1)
temp = []
def cb(path):
temp.append(path)
t = index.add(docs[0], callback=cb)
t = index.add(docs[1], callback=cb, finalize=True)
self.assertTrue(t in temp)
def test_get_multi_q1(self):
docs = self.rand_docs.get_docs(5000)
log = logging.getLogger()
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log)
q = index.get_multi_q()
for item in docs:
q.put(item)
q.put('STOP')
index.join_indexer()
self.assertEqual(docs, index.get_all_json_from_indexq())
def test_get_multi_q2(self):
log = logging.getLogger()
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log)
q = index.get_multi_q()
docs = self.rand_docs.get_docs(50000)
for item in docs:
q.put(item)
q.put('STOP')
index.join_indexer()
self.assertEqual(docs, index.get_all_json_from_indexq())
def test_get_multi_q3(self):
log = logging.getLogger()
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log)
q = index.get_multi_q()
docs = self.rand_docs.get_docs(5000)
docs2 = self.rand_docs.get_docs(5000)
for item in docs + ['STOP'] + docs2:
q.put(item)
index.join_indexer()
self.assertEqual(docs + docs2, index.get_all_json_from_indexq())
def test_get_multi_with_sentinel(self):
log = logging.getLogger()
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log)
q = index.get_multi_q(sentinel='BLAH')
docs = self.rand_docs.get_docs(5000)
docs2 = self.rand_docs.get_docs(5000)
for item in docs + ['BLAH'] + docs2:
q.put(item)
index.join_indexer()
self.assertEqual(docs + docs2, index.get_all_json_from_indexq())
def test_complete_dir_rotate(self):
log = logging.getLogger()
rotate_func = lambda: '{}/{}/{}'.format(dt.now().year, dt.now().month, dt.now().day)
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log,
rotate_complete=rotate_func)
dir_set = rotate_func()
docs = self.rand_docs.get_docs(69)
for item in self.docs[1:10]:
index.add(item, finalize=True)
files = []
for item in index.get_all_as_list():
files.append(index.complete(item))
[self.assertTrue(os.path.exists(x)) for x in files]
def test_complete_compress_basic(self):
log = logging.getLogger()
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log,
compress=True)
for item in self.docs[1:10]:
index.add(item, finalize=True)
files = []
for item in index.get_all_as_list():
files.append(index.complete(item))
[self.assertTrue(os.path.exists(x)) for x in files]
def test_complete_compress_basic_re_indexing(self):
log = logging.getLogger()
solr = SolrClient(test_config['SOLR_SERVER'],
devel=True,
auth=test_config['SOLR_CREDENTIALS'])
index = IndexQ(test_config['indexqbase'], 'testq', size=1, log=log,
compress=True)
solr.delete_doc_by_id(test_config['SOLR_COLLECTION'], '*')
for item in self.docs[1:10]:
index.add(item, finalize=True)
index.index(solr, test_config['SOLR_COLLECTION'])
# At this point items are indexed and are moved into the done directory
# Lets re-index them to make sure all json got properly encoded
files = index.get_all_as_list('_done_dir')
for f in index.get_all_as_list('_done_dir'):
shutil.move(f, index._todo_dir)
index.index(solr, test_config['SOLR_COLLECTION'])
self.assertEqual(files, index.get_all_as_list('_done_dir'))
|
1668970
|
import logging
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.core.cache import cache
from django.db.models.signals import post_save
from django.dispatch import Signal, receiver
from redis.lock import LockError
from influxdb_metrics.loader import log_metric
from apps.shipments.models import Shipment
from .models import AsyncJob, Message, MessageType, JobState
# pylint:disable=invalid-name
job_update = Signal(providing_args=["message", "shipment"])
channel_layer = get_channel_layer()
LOG = logging.getLogger('transmission')
@receiver(post_save, sender=AsyncJob, dispatch_uid='asyncjob_post_save')
def asyncjob_post_save(sender, instance, **kwargs):
# Notify websockets of any AsyncJob create/updates
async_to_sync(channel_layer.group_send)(instance.shipment.owner_id,
{"type": "jobs.update", "async_job_id": instance.id})
@receiver(post_save, sender=Message, dispatch_uid='message_post_save')
def message_post_save(sender, instance, **kwargs):
LOG.debug(f'Message post save with message {instance.id}.')
log_metric('transmission.info', tags={'method': 'jobs.message_post_save', 'module': __name__})
try:
wallet_lock = cache.lock(instance.async_job.parameters['signing_wallet_id'])
wallet_lock.local.token = instance.async_job.wallet_lock_token
wallet_lock.release()
except LockError:
LOG.warning(f'Wallet {instance.async_job.parameters["signing_wallet_id"]} was not locked when '
f'job {instance.async_job.id} received message {instance.id}')
if instance.type == MessageType.ERROR:
# Generic error handling
LOG.error(f"Transaction failure for AsyncJob {instance.async_job.id}: {instance.body}")
instance.async_job.state = JobState.FAILED
instance.async_job.save()
# Update has been received, send signal to listener
LOG.debug(f'Update has been received, and signal sent to listener {instance.id}.')
job_update.send(sender=Shipment, message=instance, shipment=instance.async_job.shipment)
|
1668974
|
from .base_interpreter import BaseInterpreter
from .lexer import AstError, Lexer
from .parser import Parser, TopNode
class ValidationError(AstError):
pass
class ValidationWarning(AstError):
pass
class ValidationResult:
def __init__(self, is_valid=True, error=None, warning=None, tree=None):
self.is_valid = is_valid
self.error = error
self.warning = warning
self.tree = tree
def __str__(self):
return self.error or self.warning or ''
class Validator(BaseInterpreter):
def __init__(self, expression, known_vars):
self.expression = expression
self.known_vars = known_vars
self.known_funcs = BaseInterpreter.known_funcs()
self.root = None
def validate(self, force_raise=True):
try:
parser = Parser(Lexer(self.expression))
self.root = parser.parse()
self.evaluate(self.root)
return ValidationResult(is_valid=True, tree=self.root)
except ValidationWarning as e:
return ValidationResult(is_valid=True, warning=str(e), tree=self.root)
except AstError as e:
if force_raise:
raise e
else:
return ValidationResult(is_valid=False, error=str(e), tree=self.root)
def evaluate_no_op_node(self, node):
return True
def evaluate_const_node(self, node):
return True
def evaluate_var_node(self, node):
if node.name in self.known_vars:
return True
else:
msg = f"unknown variable '{node.name}' at position {node.position()}"
if node.name.startswith('$'):
raise ValidationWarning(msg)
else:
raise ValidationError(msg)
def evaluate_var_def_node(self, node):
if node.name in self.known_vars:
msg = f"var definition '{node.name}' at position {node.position()} conflicts with existing variable"
raise ValidationError(msg)
else:
if isinstance(self.known_vars, set):
self.known_vars.add(node.name)
else:
self.known_vars.append(node.name)
return True
def evaluate_binary_op_node(self, node):
return self.evaluate(node.left) and self.evaluate(node.right)
def evaluate_unary_op_node(self, node):
return self.evaluate(node.node)
def evaluate_func_node(self, node):
if node.func_name in self.known_funcs:
args_count = len(node.arg_nodes)
if args_count in self.known_funcs[node.func_name]:
return all(map(lambda node: self.evaluate(node), node.arg_nodes))
else:
expected_args_count = self.known_funcs[node.func_name]
if isinstance(expected_args_count, range):
expected = f"from {expected_args_count.start} to {expected_args_count.stop - 1}"
else:
counts = list(map(str, ))
if len(counts) > 1:
counts = counts[0:-2] + [counts[-2] + " or " + counts[-1]]
expected = ", ".join(counts)
raise ValidationError(
f"invalid arguments count on '{node.func_name}' function, expected {expected} got {args_count} at position {node.position()}"
)
else:
raise ValidationError(f"unknown function '{node.func_name}' at position {node.position()}")
def evaluate_tuple_node(self, node):
return all(map(lambda node: self.evaluate(node), node.item_nodes))
def evaluate_top_node(self, node):
if not isinstance(self.root, TopNode):
raise ValidationError(f"top or bottom expression cannot be used as an argument or operand")
if node.with_node and not node.group_node:
raise ValidationError(
f"with experession at position {node.with_node.position()} can only be used with per expression"
)
return all(map(lambda n: self.evaluate(n), node.child_nodes()))
def evaluate_with_node(self, node):
return all(map(lambda n: self.evaluate(n), node.with_item_nodes))
def evaluate_with_item_node(self, node):
nodes = [node.source_node]
if node.alias_node:
nodes.append(node.alias_node)
return all(map(lambda n: self.evaluate(n), nodes))
|
1669002
|
from django_dynamic_fixture import G
from tests.base import BaseTestCase
from tests.models import IdGenerateTestModel, TestIds
class TableStrategyTestCase(BaseTestCase):
def test_id_generation(self):
dummy_one = G(IdGenerateTestModel)
dummy_two = G(IdGenerateTestModel)
self.assertEqual(dummy_one.id, 1)
self.assertEqual(dummy_two.id, 2)
id_generation_row_count = TestIds.objects.count()
total_row_count = IdGenerateTestModel.objects.count()
self.assertEqual(id_generation_row_count, total_row_count)
|
1669036
|
import platform
from .location import Location
from .robot import Robot
from .sikulpy import unofficial
from ..version import VERSION
class Env(object):
@staticmethod
def addHotkey(key, modifiers, handler):
raise NotImplementedError(
"Env.addHotKey(%r, %r, %r) not implemented" % (key, modifiers, handler)
) # FIXME
@staticmethod
def removeHotkey(key, modifiers):
raise NotImplementedError(
"Env.removeHotKey(%r, %r) not implemented" % (key, modifiers)
) # FIXME
@staticmethod
def getOS() -> str:
# FIXME: check that this matches sikuli's OS names
return platform.system()
@staticmethod
def getOSVersion() -> str:
raise NotImplementedError("Env.getOSVersion() not implemented") # FIXME
@staticmethod
def getSikuliVersion() -> str:
return "sikulpy %s" % VERSION
@staticmethod
def getClipboard() -> str:
return Robot.getClipboard()
@staticmethod
@unofficial
def putClipboard(text: str) -> None:
return Robot.putClipboard(text)
@staticmethod
def isLockOn(key: str) -> bool:
return Robot.isLockOn(key)
@staticmethod
def getMouseLocation() -> Location:
x, y = Robot.getMouseLocation()
return Location(x, y)
|
1669065
|
import math
def D(xU, xL):
r = 0.61803
return r * (xU - xL)
def func(x):
return 2 * math.sin(x) - (x * x) / 10.0
def GoldenSection(xL, xU, f):
d = D(xU, xL)
x1 = xL + d
x2 = xU - d
f1 = f(x1)
f2 = f(x2)
while abs(x1 - x2) > 3E-8:
if (f1 > f2):
xL = x2
x2 = x1
f2 = f1
d = D(xU, xL)
x1 = xL + d
f1 = f(x1)
else:
xU = x1
x1 = x2
f1 = f2
d = D(xU, xL)
x2 = xU - d
f2 = f(x2)
print(x1, f(x1))
GoldenSection(0, 3, func)
|
1669076
|
import datetime as dt
import uuid
from enum import Enum
from typing import List
from sqlalchemy import and_
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref
from sqlalchemy_utils import ChoiceType
from server.api.database import db
from server.api.database.mixins import (
Column,
Model,
SurrogatePK,
reference_col,
relationship,
)
class Kilometer(SurrogatePK, Model):
"""daily report of distances in km"""
__tablename__ = "kilometers"
start_of_day = Column(db.Float, nullable=False)
end_of_day = Column(db.Float, nullable=False)
personal = Column(db.Float, default=0, nullable=False)
date = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
teacher_id = reference_col("teachers", nullable=False)
teacher = relationship("Teacher", backref=backref("kilometers", lazy="dynamic"))
car_id = reference_col("cars", nullable=False)
car = relationship("Car", backref=backref("kilometers", lazy="dynamic"))
def __init__(self, **kwargs):
"""Create instance."""
db.Model.__init__(self, **kwargs)
@hybrid_property
def total_work_km(self) -> float:
return float(self.end_of_day) - float(self.start_of_day) - float(self.personal)
def to_dict(self) -> dict:
return {
"date": self.date,
"car": self.car.to_dict(),
"total_work_km": self.total_work_km,
"start_of_day": self.start_of_day,
"end_of_day": self.end_of_day,
"personal": self.personal,
}
|
1669087
|
import numpy as np
import xarray as xr
def compute_dataset(ds, period='1W', incl_stdev=False):
if incl_stdev:
resample_obj = ds.resample(time=period)
ds_mean = resample_obj.mean(dim='time')
ds_std = resample_obj.std(dim='time').rename(name_dict={name: f"{name}_stdev" for name in ds.data_vars})
ds_merged = xr.merge([ds_mean, ds_std])
ds_merged.attrs.update(ds.attrs)
return ds_merged
else:
return ds.resample(time=period).mean(dim='time')
def compute_variables(ds, factor_chl, factor_tsm):
chl_tsm_sum = factor_chl * ds.conc_chl + factor_tsm * ds.conc_tsm
chl_tsm_sum.attrs.update(dict(units='-',
long_name='Weighted sum of CHL nd TSM concentrations',
description='Nonsense variable, for demo purpose only'))
chl_category = _categorize_chl(ds.conc_chl)
chl_category.attrs.update(dict(units='-',
long_name='Categorized CHL',
description='0: 0<=CHL<3, 1: 3<=CHL<4, 2: CHL>4 mg/m^3'))
return xr.Dataset(dict(chl_tsm_sum=chl_tsm_sum,
chl_category=chl_category))
def _categorize_chl(chl):
return xr.where(chl >= 4., 2,
xr.where(chl >= 3.0, 1,
xr.where(chl >= 0.0, 0,
np.nan)))
|
1669089
|
from rnn_cell import RNNCell
from gru_cell import GRUCell
from cif_lstm_cell import CifLSTMCell
from lstm_cell import LSTMCell
|
1669105
|
import argparse
import atexit
import sys
from py4j.java_gateway import JavaGateway
from py4j.protocol import Py4JError
from pykukulcan.repl import PyKukulcanRepl
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyKukulcan REPL.")
parser.add_argument("--classpath", type=str, help="Classpath for the Kukulcan libs for the Java Gateway.")
options = parser.parse_args()
sys.ps1 = "@ "
gateway = JavaGateway.launch_gateway(
classpath=options.classpath,
redirect_stdout=sys.stdout,
redirect_stderr=sys.stderr)
def close_gateway():
gateway.shutdown()
print("Bye!")
atexit.register(close_gateway)
try:
gateway.jvm.com.github.mmolimar.kukulcan.repl.KukulcanRepl.printBanner()
kukulcan = PyKukulcanRepl(gateway=gateway)
except Py4JError:
gateway.shutdown()
sys.exit(1)
|
1669193
|
from typing import Any, Collection, Dict
from ..constants.types import basic_types, key_value_types, named_types, sequence_types
from ..models.heap_object import HeapObject
from ..models.options import Options
from .base_heap_object_factory import HeapObjectFactory
from .basic_heap_object_factory import BasicHeapObjectFactory
from .class_heap_object_factory import ClassHeapObjectFactory
from .instance_heap_object_factory import InstanceHeapObjectFactory
from .kvp_heap_object_factory import KvpHeapObjectFactory
from .named_heap_object_factory import NamedHeapObjectFactory
from .sequence_heap_object_factory import SequenceHeapObjectFactory
from .unknown_heap_object_factory import UnknownHeapObjectFactory
def resolve_heap_object_factory(obj: Any, options: Options = None) -> HeapObjectFactory:
if isinstance(obj, basic_types):
return BasicHeapObjectFactory(obj, options)
if isinstance(obj, sequence_types):
return SequenceHeapObjectFactory(obj, options)
if isinstance(obj, key_value_types):
return KvpHeapObjectFactory(obj, options)
if isinstance(obj, named_types):
return NamedHeapObjectFactory(obj, options)
if isinstance(obj, type) and hasattr(obj, '__dict__'):
return ClassHeapObjectFactory(obj, options)
if isinstance(type(obj), type) and hasattr(obj, '__dict__'):
return InstanceHeapObjectFactory(obj, options)
return UnknownHeapObjectFactory(obj, options)
def create_heap_object(obj: Any, options: Options = None) -> HeapObject:
return resolve_heap_object_factory(obj, options).create()
def reduce_heap_objects(objects: Collection[Any], heap: Dict[str, HeapObject], options: Options = None) -> None:
for obj in objects:
obj_id = HeapObjectFactory.get_object_id(obj)
if heap.get(obj_id, None) is not None:
continue
factory = resolve_heap_object_factory(obj, options)
heap[obj_id] = factory.create()
objects_to_reduce = factory.get_objects_to_reduce()
if objects_to_reduce is not None and len(objects_to_reduce) > 0:
reduce_heap_objects(objects_to_reduce, heap, options)
def create_heap_objects(objects: Collection[Any], options: Options = None) -> Dict[str, HeapObject]:
heap: Dict[str, HeapObject] = dict()
reduce_heap_objects(objects, heap, options)
return heap
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.