id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11498706
|
import math
import time
# TODO add Color() and simplify code (create rainbow once, then move it)
class RainbowAnimation:
def __init__(self,
led_strip,
brightness=1,
loop_limit=None,
duration_ms=1000,
pause_ms=None):
self.led_strip = led_strip
self.brightness_max = brightness
self.loop_limit = loop_limit
self.duration_ms = duration_ms
self.pause_ms = pause_ms
self.time_passed_ms = 0
def set_time_passed_ms(self):
if self.led_strip.debug:
print('RainbowAnimation().set_time_passed_ms()')
# if duration 1000 ms = 17 loops * 60ms
# if duration 500 ms = 8.5 loops 120ms
# if duration 250 ms = 4.25 loops 240ms
# if duration 100 ms = 1.7 loops 600ms
full_duration = 1000
default_rate = 60
added_ms = (full_duration/self.duration_ms)*default_rate
self.time_passed_ms += added_ms
def set_brightness(self, counter, max_counter):
if self.led_strip.debug:
print('RainbowAnimation().set_brightness(counter={},max_counter={})'.format(
counter, max_counter))
# if counter == 1 => brightness 0.3
# if counter == 2 => brightness 0.6
# if counter == max_counter-1 => brightness 0.3
# if counter == max_counter-2 => brightness 0.6
if self.duration_ms and self.pause_ms and counter == 0:
self.brightness = 0.3*self.brightness_max
elif self.duration_ms and self.pause_ms and counter == 1:
self.brightness = 0.6*self.brightness_max
elif self.duration_ms and self.pause_ms and counter == (max_counter-2):
self.brightness = 0.6*self.brightness_max
elif self.duration_ms and self.pause_ms and counter == (max_counter-1):
self.brightness = 0.3*self.brightness_max
else:
self.brightness = 1*self.brightness_max
def get_max_counter(self):
if self.led_strip.debug:
print('RainbowAnimation().get_max_counter()')
# if duration 1000 ms = 17 loops * 60ms
# if duration 500 ms = 8.5 loops 120ms
# if duration 250 ms = 4.25 loops 240ms
# if duration 100 ms = 1.7 loops 600ms
full_duration = 1000
loops = 17
return round((self.duration_ms/full_duration)*loops)
def glow(self):
if self.led_strip.debug:
print('RainbowAnimation().glow()')
print('Rainbow:')
try:
# if duration, need to adapt time_passed to make one full color loop (and then pause if pause set)
# turn LEDs rainbow
counter = 0
loops = 0
max_counter = self.get_max_counter()
while True:
if loops < 10:
self.brightness = (0.1+(loops*0.1))*self.brightness_max
else:
self.set_brightness(counter, max_counter)
# turn LEDs black (off) for duration of pause
if counter == max_counter:
if self.duration_ms and self.pause_ms:
self.led_strip.off()
time.sleep((self.pause_ms-10)/1000)
counter = 0
loops += 1
if self.loop_limit and self.loop_limit == loops:
print()
break
else:
self.set_time_passed_ms()
for i in range(self.led_strip.strip_length):
i = self.led_strip.get_led(i)
color = self.rainbow_color(self.time_passed_ms, i,
self.brightness)
self.led_strip.leds[i] = color
self.led_strip.write()
counter += 1
loops += 1
except KeyboardInterrupt:
self.led_strip.fadeout()
import sys
print()
sys.exit(0)
def rainbow_color(self, t, i, brightness):
if self.led_strip.debug:
print('RainbowAnimation().rainbow_color(t={},i={},brightness={})'.format(
t, i, brightness))
t = t/1000
k = t + 0.05 * i
r = 0.5 + 0.5 * math.cos(6.28318 * (1.0 * k + 0.00))
g = 0.5 + 0.5 * math.cos(6.28318 * (1.0 * k + 0.33))
b = 0.5 + 0.5 * math.cos(6.28318 * (1.0 * k + 0.67))
r = int(255.0 * r * brightness)
g = int(255.0 * g * brightness)
b = int(255.0 * b * brightness)
r = r if r < 255 and r > 0 else 255 if r >= 255 else 0
g = g if g < 255 and g > 0 else 255 if g >= 255 else 0
b = b if b < 255 and b > 0 else 255 if b >= 255 else 0
return (r, g, b)
|
11498721
|
class Scale(object):
# 常用的字母代表数字的字典
dic = {'10': 'A',
'11': 'B',
'12': 'C',
'13': 'D',
'14': 'E',
'15': 'F'}
# 将weight进制的某一位的值对应的十进制的值算出来
@staticmethod
def place_value(n_value, scale, digits):
# 某一位的权值,初始为1
weight = 1
for i in range(1, digits + 1):
weight = scale * weight
return n_value * weight
# scale进制的值value转为对应10进制的值
@staticmethod
def n_2_decimal(value_, scale):
sum_ = 0
# 数值的位数长度
n = len(str(value_))
for i in range(1, n + 1):
sum_ = sum_ + Scale.place_value(int(str(value_)[i - 1]), scale, n - i)
return sum_
# 10进制的值value转为对应scale进制的值
@staticmethod
def decimal_2_n(value_, scale):
arr = []
i = 0
while value_ is not 0:
rem = value_ % scale
if rem >= 16:
rem = "*" + str(rem) + "*"
elif 10 <= rem <= 15:
rem = Scale.dic[str(rem)]
value_ = value_ // scale
arr.append(rem)
i += 1
return arr
@staticmethod
def any_scale(scale1_, value_, scale2_):
mid_value = Scale.n_2_decimal(value_, scale1_)
fin_value = Scale.decimal_2_n(mid_value, scale2_)
fin_value.reverse()
fin_value = ''.join([str(x) for x in fin_value])
return fin_value
|
11498732
|
import tensorflow as tf
import numpy as np
import math
from tensorflow.python.util import nest
tf.set_random_seed(20160408)
# log probability
def probability(min_embed, max_embed):
# min_embed: batchsize * embed_size
# max_embed: batchsize * embed_size
# log_prob: batch_size
# numerically stable log probability of a uniform hypercube measure:
log_prob = tf.reduce_sum(tf.log((max_embed - min_embed) + 1e-8) ,axis = 1)
return log_prob
def batch_log_upper_bound(join_min, join_max, a, b, c, d):
# join_min: batchsize * embed_size
# join_max: batchsize * embed_size
# log_prob: batch_size
join_log_prob = probability(join_min, join_max)
join_log_prob_new = tf.reduce_logsumexp(tf.stack([tf.fill([tf.shape(join_log_prob)[0]], tf.log(0.1)), join_log_prob], axis = 1), axis = 1)
x_log_prob = probability(a, b) # batchsize
y_log_prob = probability(c, d) # batchsize
log_xy = tf.reduce_logsumexp(tf.stack([x_log_prob, y_log_prob], axis = 1), axis = 1)
log_upper_bound = join_log_prob_new + log1mexp(join_log_prob_new - log_xy)
return log_upper_bound
def calc_join_and_meet(t1_min_embed, t1_max_embed, t2_min_embed, t2_max_embed):
# two word embeddings are a, b, c, d
# join is min value of (a, c), max value of (b, d)
join_min = tf.minimum(t1_min_embed, t2_min_embed)
join_max = tf.maximum(t1_max_embed, t2_max_embed)
# find meet is calculate the max value of (a,c), min value of (b,d)
meet_min = tf.maximum(t1_min_embed, t2_min_embed) #batchsize * embed_size
meet_max = tf.minimum(t1_max_embed, t2_max_embed) #batchsize * embed_size
# The overlap cube's max value have to be bigger than min value in every dimension to form a valid cube
# if it's not, then two concepts are disjoint, return none
cond = tf.cast(tf.less_equal(meet_max, meet_min), tf.float32) # batchsize * embed_size
# cond = tf.reduce_sum(cond, axis = 1)
cond = tf.cast(tf.reduce_sum(cond, axis = 1), tf.bool) # batchsize. If disjoint, cond > 0; else, cond = 0
return join_min, join_max, meet_min, meet_max, cond
"""for positive examples"""
# this is for positive examples, slicing where function
def lambda_batch_log_upper_bound(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return the upper bound log(p(a join b) + log(0.01) - p(a) - p(b)) of positive examplse if they are disjoint
# minus the log probability of the condionaled term
# we want to minimize the return value too
joint_log = batch_log_upper_bound(join_min, join_max, a, b, c, d)
domi_log = probability(a, b) # batch_size
cond_log = joint_log - domi_log # (batch_size)
return -cond_log
def lambda_batch_log_cube_measure(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return the negative conditional log probability of positive examplse if they have overlap.
# we want to minimize the return value -log(p(a|b))
joint_log = probability(meet_min, meet_max)
domi_log = probability(a, b) # batch_size
cond_log = joint_log - domi_log # (batch_size)
smooth_log_prob = smooth_prob(cond_log)
cliped_smooth_log_prob = tf.clip_by_value(smooth_log_prob, np.log(1e-8), np.log(1.0))
return cliped_smooth_log_prob
"""for negative examples"""
def lambda_batch_log_cond_cube_measure(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return the log(1-p(a, b))
# we want to minimize this value
neg_smooth_log_prob = -lambda_batch_log_cube_measure(join_min, join_max, meet_min, meet_max, a, b, c, d)
# because input to log1mexp is positive
cliped_neg_smooth_log_prob = tf.clip_by_value(neg_smooth_log_prob, 1e-8, neg_smooth_log_prob)
onemp_smooth_log_prob = log1mexp(cliped_neg_smooth_log_prob)
return onemp_smooth_log_prob
def test(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return the log(1-p(a, b))
# we want to minimize this value
neg_smooth_log_prob = -lambda_batch_log_cube_measure(join_min, join_max, meet_min, meet_max, a, b, c, d)
# because input to log1mexp is positive
cliped_neg_smooth_log_prob = tf.clip_by_value(neg_smooth_log_prob, 1e-8, neg_smooth_log_prob)
onemp_smooth_log_prob = log1mexp(cliped_neg_smooth_log_prob)
return cliped_neg_smooth_log_prob, neg_smooth_log_prob
def lambda_zero_log_upper_bound(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return 0 because in this case, two negative examples are already disjoint to each other
result = tf.zeros_like(tf.reduce_sum(join_min, axis = 1))
return result
"""for test examples"""
def lambda_batch_test_joint_cube_measure(join_min, join_max, meet_min, meet_max, a, b, c, d):
# this function return the negative conditional log probability of positive examplse if they have overlap.
# we want to minimize the return value -log(p(a|b))
joint_log = probability(meet_min, meet_max)
smooth_log_prob = smooth_prob(joint_log)
cliped_smooth_log_prob = tf.clip_by_value(smooth_log_prob, np.log(1e-8), np.log(1.0))
return cliped_smooth_log_prob
# if not have meet, which means two disjoint events, the joint probablity is 0
# if have meet, then the joint probability is the measure of the meet cube
def test_joint_probability_log(join_min, join_max, meet_min, meet_max, a, b, c, d, not_have_meet):
result = slicing_where(condition = not_have_meet,
full_input = ([join_min, join_max, meet_min, meet_max, a, b, c, d]),
true_branch = lambda x: lambda_zero_log_upper_bound(*x),
false_branch = lambda x: lambda_batch_test_joint_cube_measure(*x))
return result
def test_cond_probability_log(join_min, join_max, meet_min, meet_max, a, b, c, d, not_have_meet):
result = slicing_where(condition = not_have_meet,
full_input = ([join_min, join_max, meet_min, meet_max, a, b, c, d]),
true_branch = lambda x: lambda_zero_log_upper_bound(*x),
false_branch = lambda x: lambda_batch_log_cube_measure(*x))
return result
"""helper function"""
def smooth_prob(input_prob):
lambda_value = 1e-6
pos_prob1 = tf.log(1-lambda_value) + input_prob # (batch_size)
pos_prob2 = tf.stack([pos_prob1, tf.zeros_like(input_prob) + tf.log(lambda_value) + tf.log(0.5)], axis = 1) #(batch_size, 2)
pos_prob = tf.reduce_logsumexp(pos_prob2, axis = 1) #(batch_size)
return pos_prob
# # log vector is a vector of negative log probabilities
def create_log_distribution(logits, batch_size):
log_1_minus = log1mexp(-logits)
# log_1 = tf.log(tf.ones([batch_size]))
# log_1_minus = logits + tf.log(tf.exp(log_1 - logits) - tf.ones([batch_size]))
return tf.concat([tf.expand_dims(logits, 1), tf.expand_dims(log_1_minus, 1)], 1)
# # vector is a tensor of (gold) probabilities
def create_distribution(probs, batch_size):
one_minus = tf.ones([batch_size]) - probs
return tf.concat([tf.expand_dims(probs, 1), tf.expand_dims(one_minus, 1)], 1)
def log1mexp(input_a):
# input_a: positive
# return the same shape as input
result = slicing_where(condition = tf.less_equal(input_a, tf.log(2.0)),
full_input = -input_a,
true_branch = lambda x: tf.log(-tf.expm1(x)),
false_branch = lambda x: tf.log1p(-tf.exp(x)))
return result
"""helper function. took from stackoverflow."""
def slicing_where(condition, full_input, true_branch, false_branch):
"""Split 'full_input' between 'true_branch' and 'false_branch' on 'condition'.
Args:
condition: A boolean Tensor with shape [B_1, ..., B_N].
full_input: A Tensor or nested tuple of Tensors of any dtype, each with
shape [B_1, ..., B_N, ...], to be split between 'true_branch' and
'false_branch' based on 'condition'.
true_branch: A function taking a single argument, that argument having the
same structure and number of batch dimensions as 'full_input'. Receives
slices of 'full_input' corresponding to the True entries of
'condition'. Returns a Tensor or nested tuple of Tensors, each with batch
dimensions matching its inputs.
false_branch: Like 'true_branch', but receives inputs corresponding to the
false elements of 'condition'. Returns a Tensor or nested tuple of Tensors
(with the same structure as the return value of 'true_branch'), but with
batch dimensions matching its inputs.
Returns:
Interleaved outputs from 'true_branch' and 'false_branch', each Tensor
having shape [B_1, ..., B_N, ...].
"""
full_input_flat = nest.flatten(full_input)
true_indices = tf.where(condition)
false_indices = tf.where(tf.logical_not(condition))
true_branch_inputs = nest.pack_sequence_as(
structure=full_input,
flat_sequence=[tf.gather_nd(params=input_tensor, indices=true_indices) for input_tensor in full_input_flat])
false_branch_inputs = nest.pack_sequence_as(
structure=full_input,
flat_sequence=[tf.gather_nd(params=input_tensor, indices=false_indices) for input_tensor in full_input_flat])
true_outputs = true_branch(true_branch_inputs)
false_outputs = false_branch(false_branch_inputs)
nest.assert_same_structure(true_outputs, false_outputs)
def scatter_outputs(true_output, false_output):
batch_shape = tf.shape(condition)
scattered_shape = tf.concat([batch_shape, tf.shape(true_output)[tf.rank(batch_shape):]], 0)
true_scatter = tf.scatter_nd(
indices=tf.cast(true_indices, tf.int32),
updates=true_output,
shape=scattered_shape)
false_scatter = tf.scatter_nd(
indices=tf.cast(false_indices, tf.int32),
updates=false_output,
shape=scattered_shape)
return true_scatter + false_scatter
result = nest.pack_sequence_as(
structure=true_outputs,
flat_sequence=[scatter_outputs(true_single_output, false_single_output) for true_single_output, false_single_output in zip(nest.flatten(true_outputs), nest.flatten(false_outputs))])
return result
|
11498736
|
from seamless.highlevel import Context, Cell
ctx = Context()
ctx.v = "test"
ctx.v_schema = Cell()
ctx.v_schema.celltype = "plain"
ctx.translate()
ctx.link(ctx.v.schema, ctx.v_schema)
ctx.translate()
ctx.v_schema.set({'type': 'integer'})
ctx.compute()
print(ctx.v.schema)
print("*" * 50)
print(ctx.v.exception)
print("*" * 50)
ctx.v.schema.set({})
ctx.compute() # this is needed, else the 1.2 below might take effect first,
# and then be overwritten by this. Seamless is async!!
print(ctx.v.schema)
print(ctx.v_schema.value)
ctx.v.example.set(1.2)
ctx.compute()
print("value:", ctx.v.value)
print("data:", ctx.v.data)
print("buffered:", ctx.v.buffered)
print(ctx.v_schema.value)
print("*" * 50)
print(ctx.v.exception)
print("*" * 50)
ctx.v_schema.set({"type": "string"})
ctx.compute()
print(ctx.v_schema.value)
print(ctx.v.schema)
print("value:", ctx.v.value)
|
11498746
|
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.models.vgg import vgg19
from nets.srgan import Discriminator, Generator
from utils.dataloader import SRGAN_dataset_collate, SRGANDataset
from utils.utils_fit import fit_one_epoch
if __name__ == "__main__":
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = True
#-----------------------------------#
# 代表进行四倍的上采样
#-----------------------------------#
scale_factor = 4
#-----------------------------------#
# 获得输入与输出的图片的shape
#-----------------------------------#
lr_shape = [96, 96]
hr_shape = [lr_shape[0] * scale_factor, lr_shape[1] * scale_factor]
#--------------------------------------------------------------------------#
# 如果想要断点续练就将model_path设置成logs文件夹下已经训练的权值文件。
# 当model_path = ''的时候不加载整个模型的权值。
#
# 此处使用的是整个模型的权重,因此是在train.py进行加载的。
# 如果想要让模型从0开始训练,则设置model_path = ''。
#--------------------------------------------------------------------------#
G_model_path = ""
D_model_path = ""
#------------------------------#
# 训练参数设置
#------------------------------#
Init_epoch = 0
Epoch = 200
batch_size = 4
lr = 0.0002
#------------------------------#
# 每隔50个step保存一次图片
#------------------------------#
save_interval = 50
#------------------------------#
# 获得图片路径
#------------------------------#
annotation_path = "train_lines.txt"
#---------------------------#
# 生成网络和评价网络
#---------------------------#
G_model = Generator(scale_factor)
D_model = Discriminator()
#-----------------------------------#
# 创建VGG模型,该模型用于提取特征
#-----------------------------------#
VGG_model = vgg19(pretrained=True)
VGG_feature_model = nn.Sequential(*list(VGG_model.features)[:-1]).eval()
for param in VGG_feature_model.parameters():
param.requires_grad = False
#------------------------------------------#
# 将训练好的模型重新载入
#------------------------------------------#
if G_model_path != '':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = G_model.state_dict()
pretrained_dict = torch.load(G_model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
G_model.load_state_dict(model_dict)
if D_model_path != '':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = D_model.state_dict()
pretrained_dict = torch.load(D_model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
D_model.load_state_dict(model_dict)
G_model_train = G_model.train()
D_model_train = D_model.train()
if Cuda:
cudnn.benchmark = True
G_model_train = torch.nn.DataParallel(G_model)
G_model_train = G_model_train.cuda()
D_model_train = torch.nn.DataParallel(D_model)
D_model_train = D_model_train.cuda()
VGG_feature_model = torch.nn.DataParallel(VGG_feature_model)
VGG_feature_model = VGG_feature_model.cuda()
# Binary Cross Entropy loss
BCE_loss = nn.BCELoss()
MSE_loss = nn.MSELoss()
with open(annotation_path) as f:
lines = f.readlines()
num_train = len(lines)
#------------------------------------------------------#
# Init_Epoch为起始世代
# Epoch总训练世代
#------------------------------------------------------#
if True:
epoch_step = min(num_train // batch_size, 2000)
if epoch_step == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------#
# Adam optimizer
#------------------------------#
G_optimizer = optim.Adam(G_model_train.parameters(), lr=lr, betas=(0.9, 0.999))
D_optimizer = optim.Adam(D_model_train.parameters(), lr=lr, betas=(0.9, 0.999))
G_lr_scheduler = optim.lr_scheduler.StepLR(G_optimizer,step_size=1,gamma=0.98)
D_lr_scheduler = optim.lr_scheduler.StepLR(D_optimizer,step_size=1,gamma=0.98)
train_dataset = SRGANDataset(lines, lr_shape, hr_shape)
gen = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=SRGAN_dataset_collate)
for epoch in range(Init_epoch, Epoch):
fit_one_epoch(G_model_train, D_model_train, G_model, D_model, VGG_feature_model, G_optimizer, D_optimizer, BCE_loss, MSE_loss, epoch, epoch_step, gen, Epoch, Cuda, batch_size, save_interval)
G_lr_scheduler.step()
D_lr_scheduler.step()
|
11498789
|
import numpy as np
try:
from utils.darts_utils import compute_latency_ms_tensorrt as compute_latency
print("use TensorRT for latency test")
except:
from utils.darts_utils import compute_latency_ms_pytorch as compute_latency
print("use PyTorch for latency test")
import torch
import torch.nn as nn
import os.path as osp
latency_lookup_table = {}
# table_file_name = "latency_lookup_table.npy"
# if osp.isfile(table_file_name):
# latency_lookup_table = np.load(table_file_name).item()
import torch.nn.functional as F
from collections import OrderedDict
from layers import NaiveSyncBatchNorm
from operations import ConvNorm
from att_sa import Self_Attn
BatchNorm2d = NaiveSyncBatchNorm
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
groups = 1
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = norm_layer(out_planes, eps=bn_eps)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class SeparableConvBnRelu(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=1, stride=1, padding=0, dilation=1,
has_relu=True, norm_layer=nn.BatchNorm2d):
super(SeparableConvBnRelu, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride,
padding, dilation, groups=in_channels,
bias=False)
self.bn = norm_layer(in_channels)
self.point_wise_cbr = ConvBnRelu(in_channels, out_channels, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=has_relu, has_bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.point_wise_cbr(x)
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
inputs = inputs.view(in_size[0], in_size[1], 1, 1)
return inputs
class SELayer(nn.Module):
def __init__(self, in_planes, out_planes, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_planes, out_planes // reduction),
nn.ReLU(inplace=True),
nn.Linear(out_planes // reduction, out_planes),
nn.Sigmoid()
)
self.out_planes = out_planes
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, self.out_planes, 1, 1)
return y
# For DFN
class ChannelAttention(nn.Module):
def __init__(self, in_planes, out_planes, reduction):
super(ChannelAttention, self).__init__()
self.channel_attention = SELayer(in_planes, out_planes, reduction)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], 1)
channel_attetion = self.channel_attention(fm)
fm = x1 * channel_attetion + x2
return fm
class BNRefine(nn.Module):
def __init__(self, in_planes, out_planes, ksize, has_bias=False,
has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5):
super(BNRefine, self).__init__()
self.conv_bn_relu = ConvBnRelu(in_planes, out_planes, ksize, 1,
ksize // 2, has_bias=has_bias,
norm_layer=norm_layer, bn_eps=bn_eps)
self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize,
stride=1, padding=ksize // 2, dilation=1,
bias=has_bias)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
t = self.conv_bn_relu(x)
t = self.conv_refine(t)
if self.has_relu:
return self.relu(t + x)
return t + x
class RefineResidual(nn.Module):
def __init__(self, in_planes, out_planes, ksize, has_bias=False,
has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5):
super(RefineResidual, self).__init__()
self.conv_1x1 = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, dilation=1,
bias=has_bias)
self.cbr = ConvBnRelu(out_planes, out_planes, ksize, 1,
ksize // 2, has_bias=has_bias,
norm_layer=norm_layer, bn_eps=bn_eps)
self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize,
stride=1, padding=ksize // 2, dilation=1,
bias=has_bias)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv_1x1(x)
t = self.cbr(x)
t = self.conv_refine(t)
if self.has_relu:
return self.relu(t + x)
return t + x
# For BiSeNet
class AttentionRefinement(nn.Module):
def __init__(self, in_planes, out_planes,
norm_layer=nn.BatchNorm2d):
super(AttentionRefinement, self).__init__()
self.conv_3x3 = ConvBnRelu(in_planes, out_planes, 3, 1, 1,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.channel_attention(fm)
fm = fm * fm_se
return fm
class FeatureFusion(nn.Module):
def __init__(self, in_planes, out_planes, reduction=1, Fch=16, scale=4, branch=2, norm_layer=nn.BatchNorm2d):
super(FeatureFusion, self).__init__()
self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
# self.channel_attention = nn.Sequential(
# nn.AdaptiveAvgPool2d(1),
# ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0,
# has_bn=False, norm_layer=norm_layer,
# has_relu=True, has_bias=False),
# ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0,
# has_bn=False, norm_layer=norm_layer,
# has_relu=False, has_bias=False),
# nn.Sigmoid()
# )
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out):
layer = FeatureFusion(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
name = "ff_H%d_W%d_C%d"%(size[1], size[2], size[0])
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, size
else:
print("not found in latency_lookup_table:", name)
latency = FeatureFusion._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._scale*self._Fch*self._branch)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, size
def forward(self, fm):
# fm is already a concatenation of multiple scales
fm = self.conv_1x1(fm)
return fm
# fm_se = self.channel_attention(fm)
# output = fm + fm * fm_se
# return output
class Head(nn.Module):
def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(Head, self).__init__()
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False)
# self.conv_3x3 = ConvBnRelu(in_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = Head(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x):
# fm = self.conv_3x3(x)
fm = self.att_sa(x)
output = self.conv_1x1(fm)
return output
class Decoder(nn.Module):
def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(Decoder, self).__init__()
C_low = 48
self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
# in_planes = in_planes + C_low
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False)
self.conv_3x3 = ConvBnRelu(mid_planes + C_low, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = Head(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x, low_level_feat):
low_level_feat = self.feature_projection(low_level_feat)
x = self.att_sa(x)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False)
x = torch.cat((x, low_level_feat), dim=1)
# x = self.att_sa(x)
x = self.conv_3x3(x)
output = self.conv_1x1(x)
return output
class BasicResidual_downup_2x(nn.Module):
def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1):
super(BasicResidual_downup_2x, self).__init__()
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
groups = 1
self.C_in = C_in
self.C_out = C_out
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
assert stride in [1, 2]
if self.stride == 2: self.dilation = 1
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False)
# self.bn1 = nn.BatchNorm2d(C_out)
self.bn1 = BatchNorm2d(C_out)
self.conv2 = nn.Conv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False)
# self.bn2 = nn.BatchNorm2d(C_out)
self.bn2 = BatchNorm2d(C_out)
if self.stride==1:
self.downsample = nn.Sequential(
nn.Conv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False),
BatchNorm2d(C_out)
)
def forward(self, x):
out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=False)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.stride == 1:
out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=False)
out = out + self.downsample(x)
out = self.relu(out)
return out
class PanopticHead(nn.Module):
def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(PanopticHead, self).__init__()
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
decoder2_planes = mid_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=in_planes, proj_factor=4, downsample=False)
self.decoder1 = BasicResidual_downup_2x(in_planes, mid_planes, 3, 1, 1)
self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
# self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder2 = BasicResidual_downup_2x(in_planes, decoder2_planes, 3, 1, 1)
self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0)
self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0)
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = PanopticHead(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "panoptichead%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x):
output_dict = OrderedDict()
xs = self.att_sa(x)
# semantic = self.att_sa1(x)
semantic = self.decoder1(xs)
semantic = self.conv_3x3(semantic)
semantic = self.conv_1x1(semantic)
# other = self.att_sa2(x)
other = self.decoder2(x)
center = self.center_conv_3x3(other)
center = self.center_conv_1x1(center)
offset = self.offset_conv_3x3(other)
offset = self.offset_conv_1x1(offset)
output_dict['semantic'] = semantic
output_dict['center'] = center
output_dict['offset'] = offset
return output_dict
class PanopticHeadDecoder(nn.Module):
def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)):
super(PanopticHeadDecoder, self).__init__()
C_low = 48
self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
self.feature_projection_sem = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False)
# in_planes = in_planes + C_low
if in_planes <= 64:
mid_planes = in_planes
elif in_planes <= 256:
if is_aux:
mid_planes = in_planes
else:
mid_planes = in_planes
else:
# in_planes > 256:
if is_aux:
mid_planes = in_planes // 2
else:
mid_planes = in_planes // 2
decoder2_planes = mid_planes // 2
self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=in_planes, proj_factor=4, downsample=False)
# self.att_sa1 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder1 = BasicResidual_downup_2x(in_planes+C_low, mid_planes, 3, 1, 1)
self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0)
self._in_planes = in_planes
self._out_planes = out_planes
self._Fch = Fch
self._scale = scale
self._branch = branch
# self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False)
self.decoder2 = BasicResidual_downup_2x(in_planes+C_low, decoder2_planes, 3, 1, 1)
self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0)
self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False)
self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0)
@staticmethod
def _latency(h, w, C_in, C_out=19):
layer = PanopticHead(C_in, C_out)
latency = compute_latency(layer, (1, C_in, h, w))
return latency
def forward_latency(self, size):
assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes)
name = "panopticheaddecoder%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes)
if name in latency_lookup_table:
latency = latency_lookup_table[name]
return latency, (self._out_planes, size[1], size[2])
else:
print("not found in latency_lookup_table:", name)
latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes)
latency_lookup_table[name] = latency
np.save("latency_lookup_table.npy", latency_lookup_table)
return latency, (self._out_planes, size[1], size[2])
def forward(self, x, low_level_feat):
output_dict = OrderedDict()
xs = self.att_sa(x)
low_level_feat_sem = self.feature_projection_sem(low_level_feat)
xs = F.interpolate(xs, size=low_level_feat_sem.size()[2:], mode='bilinear', align_corners=False)
xs = torch.cat((xs, low_level_feat_sem), dim=1)
semantic = self.decoder1(xs)
semantic = self.conv_3x3(semantic)
semantic = self.conv_1x1(semantic)
low_level_feat = self.feature_projection(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False)
x = torch.cat((x, low_level_feat), dim=1)
other = self.decoder2(x)
center = self.center_conv_3x3(other)
center = self.center_conv_1x1(center)
offset = self.offset_conv_3x3(other)
offset = self.offset_conv_1x1(offset)
output_dict['semantic'] = semantic
output_dict['center'] = center
output_dict['offset'] = offset
return output_dict
|
11498856
|
import math
from functools import reduce
from internal import utility as utility
from internal.terrain.cloud import Cloud
class Weather:
def __init__(self, cells, parent):
self.cells = cells
self.water_cells = []
self.clouds = []
self.cells_x = utility.S_WIDTH // utility.CELL_SIZE
self.cells_y = utility.S_HEIGHT // utility.CELL_SIZE
self.setup(parent)
def setup(self, parent):
self.wind_vectors = self.calculate_wind_vectors()
self.setup_clouds()
self.water_cells = self.get_water_cells()
def get_water_cells(self):
res = []
for row in self.cells:
for cell in row:
if cell.terrain.is_water():
res.append(cell)
return res
def setup_clouds(self):
for x, row in enumerate(self.cells):
self.clouds.append([])
for y, cell in enumerate(row):
self.clouds[-1].append([])
def handle_evaporation(self):
for i, cell in enumerate(self.water_cells):
# utility.show_bar(i, self.water_cells, message='Handling evaporation: ')
amount = cell.get_evaporation() * 4
if amount <= 0:
continue
if len(self.clouds[cell.x][cell.y]) > 0:
self.clouds[cell.x][cell.y][0].water += amount
else:
self.clouds[cell.x][cell.y].append(Cloud(cell.x, cell.y, amount))
def handle_clouds(self):
for x, row in enumerate(self.clouds):
for y, cell in enumerate(row):
for cloud in cell:
cloud.processed = False
cloud.age += 1
if not self.cells[x][y].terrain.is_water():
self.cells[x][y].terrain.moisture += cloud.precipitate()
if cloud.water <= 0:
cell.remove(cloud)
def calculate_wind_vectors(self):
wind_vectors = []
for x, row in enumerate(self.cells):
wind_vectors.append([])
for y, cell in enumerate(row):
dx, dy = 0.0, 0.0
for neighbor in cell.neighbors():
cdx, cdy = cell.x - neighbor.x, cell.y - neighbor.y
cdx = cdx * (cell.get_temperature() - neighbor.get_temperature()) / cell.get_temperature()
cdy = cdy * (cell.get_temperature() - neighbor.get_temperature()) / cell.get_temperature()
dx += cdx
dy += cdy
mag = math.sqrt(dx ** 2 + dy ** 2)
dx, dy = dx / mag * 5, dy / mag * 5
dx += 1.5 # Wind goes west to east
wind_vectors[-1].append((dx, dy))
return wind_vectors
def handle_wind(self):
for x, row in enumerate(self.clouds):
for y, cell in enumerate(row):
for cloud in cell:
if not cloud.processed:
cell.remove(cloud)
cloud.processed = True
dx, dy = self.wind_vectors[x][y]
cloud.x += dx
cloud.y += dy
if cloud.x >= self.cells_x:
cloud.x = 0
elif cloud.x < 0:
cloud.x = self.cells_x - 1
if cloud.y >= self.cells_y:
cloud.y = 0
elif cloud.y < 0:
cloud.y = self.cells_y - 1
self.clouds[int(cloud.x)][int(cloud.y)].append(cloud)
def step(self):
self.handle_wind()
self.handle_clouds()
self.handle_evaporation()
def normalize_moistures(self):
print('Normalizing moistures.')
moistures = reduce(lambda a, b: a + b,
map(lambda row: list(map(lambda cell: cell.terrain.moisture, row)), self.cells))
max_amount = max(moistures)
for row in self.cells:
for cell in row:
cell.terrain.moisture /= max_amount
def run(self, steps):
for step in range(steps + 1):
utility.show_bar(step, steps + 1, message='Generating rainfall patterns: ', number_limit=True)
self.step()
data = reduce(lambda a, b: a + b, map(lambda row: list(map(lambda cell: cell.terrain.moisture, row)), self.cells))
max_amount = max(data)
if max_amount != 0:
data = list(map(lambda i: i / max_amount, data))
|
11498862
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return self.val == other.val and self.next == other.next
def __repr__(self):
return '<Node {} {}>'.format(self.val, self.next)
class LinkedList(ListNode):
def __init__(self, arr):
nodes = [ListNode(v) for v in arr]
for i in range(1, len(nodes)):
nodes[i-1].next = nodes[i]
head = nodes[0]
self.val = head.val
self.next = head.next
def reverse_sublist(succeeding_tail, sublist_len):
if succeeding_tail.next:
sublist_head, sublist_tail, rest = reverse_linked_list(succeeding_tail.next, sublist_len)
# Relink the reversed sublist if
succeeding_tail.next = sublist_head
sublist_tail.next = rest
def reverse_linked_list_positional(head, m, n):
"""
Reverse a linked list from position m to n.
"""
cursor = head
if m == 1:
dummy = ListNode("Dummy")
dummy.next = cursor
reverse_sublist(dummy, n + 1 - m)
return dummy.next
pos = 1
while cursor:
pos += 1
if pos < m:
cursor = cursor.next
else:
break
reverse_sublist(cursor, n + 1 - m)
return head
def reverse_linked_list(head, max_node_count):
"""
Reverse linked list but only up to max_node_count.
Return the new head and tail of the linked list.
"""
if not head:
return None, None, None
count = 1
first_node = head
second_node = head.next
head.next = None # Make sure list has no cycle
while second_node and count < max_node_count:
third_node = second_node.next
second_node.next = first_node
first_node = second_node
second_node = third_node
count += 1
rest = second_node
tail = head # Old head is the new tail
head = first_node # Head is now the last node
return head, tail, rest
def test_reverse_linked_list():
a = LinkedList([5, 4, 3, 2, 1])
head, tail, rest = reverse_linked_list(a, 5)
assert head == LinkedList([1, 2, 3, 4, 5])
assert tail == ListNode(5)
assert rest is None
def test_reverse_linked_list_partially():
a = LinkedList([7, 6, 5, 4, 3, 2, 1])
head, tail, rest = reverse_linked_list(a, 4)
assert head == LinkedList([4, 5, 6, 7])
assert tail == ListNode(7)
assert rest == LinkedList([3, 2, 1])
def test_reverse_linked_list_positional():
a = LinkedList([1, 2, 3, 4, 5])
head = reverse_linked_list_positional(a, m=2, n=4)
assert head == LinkedList([1, 4, 3, 2, 5])
def test_reverse_linked_list_short():
a = LinkedList([1])
head = reverse_linked_list_positional(a, m=1, n=1)
assert head == LinkedList([1])
b = LinkedList([1, 2])
head = reverse_linked_list_positional(b, m=1, n=2)
assert head == LinkedList([2, 1])
|
11498886
|
from matplotlib import gridspec
from matplotlib import pyplot
import skimage.transform
import numpy
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = numpy.zeros((256, 3), dtype=int)
ind = numpy.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the inumpyut label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D inumpyut label')
colormap = create_pascal_label_colormap()
if numpy.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
LABEL_NAMES = numpy.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'
])
FULL_LABEL_MAP = numpy.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
def vis_segmentation(image, deeplab_seg_map, icnet_seg_map):
"""Visualizes inumpyut image, segmentation map and overlay view."""
pyplot.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[4, 4, 4, 4])
pyplot.subplot(grid_spec[0])
pyplot.imshow(image)
pyplot.axis('off')
pyplot.title('Input Image')
pyplot.subplot(grid_spec[1])
seg_image = label_to_color_image(deeplab_seg_map).astype(numpy.uint8)
pyplot.imshow(seg_image)
pyplot.axis('off')
pyplot.title('Deeplab v3 Segmentation')
pyplot.subplot(grid_spec[2])
# resize icnet mask
icnet_seg_map = skimage.transform.resize(
icnet_seg_map[0, :, :],
deeplab_seg_map.shape,
preserve_range=True,
anti_aliasing=False,
order=0).astype('int')
seg_image = label_to_color_image(icnet_seg_map).astype(numpy.uint8)
pyplot.imshow(seg_image)
pyplot.axis('off')
pyplot.title('Fritz Segmentation')
pyplot.subplot(grid_spec[3])
pyplot.imshow(image)
pyplot.imshow(seg_image, alpha=0.7)
pyplot.axis('off')
pyplot.title('Fritz Segmentation Overlay')
pyplot.grid('off')
pyplot.show()
def multiple_vis(results):
fig = pyplot.figure(figsize=(15, 3 * len(results)))
grid_spec = gridspec.GridSpec(len(results), 4, width_ratios=[4, 4, 4, 4])
i = 0
for image, deeplab_seg_map, icnet_seg_map in results:
pyplot.subplot(grid_spec[i])
pyplot.imshow(image)
# pyplot.axis('off')
i += 1
pyplot.subplot(grid_spec[i])
seg_image = label_to_color_image(deeplab_seg_map).astype(numpy.uint8)
pyplot.imshow(seg_image)
pyplot.axis('off')
pyplot.title('Deeplab v3 Segmentation')
i += 1
pyplot.subplot(grid_spec[i])
# resize icnet mask
icnet_seg_map = skimage.transform.resize(
icnet_seg_map[0, :, :],
deeplab_seg_map.shape,
preserve_range=True,
anti_aliasing=False,
order=0).astype('int')
seg_image = label_to_color_image(icnet_seg_map).astype(numpy.uint8)
pyplot.imshow(seg_image)
pyplot.axis('off')
pyplot.title('Fritz Segmentation')
i += 1
pyplot.subplot(grid_spec[i])
pyplot.imshow(image)
pyplot.imshow(seg_image, alpha=0.7)
pyplot.axis('off')
pyplot.title('Fritz Segmentation Overlay')
i += 1
pyplot.grid('off')
return fig
|
11498907
|
import unittest
import tempfile
import os
import caffe
class ElemwiseScalarLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
self.layer_params_ = eval(self.param_str_)
self.value_ = self.layer_params_['value']
if self.layer_params_['op'].lower() == 'add':
self._forward = self._forward_add
self._backward = self._backward_add
elif self.layer_params_['op'].lower() == 'mul':
self._forward = self._forward_mul
self._backward = self._backward_mul
else:
raise ValueError("Unknown operation type: '%s'"
% self.layer_params_['op'].lower())
def _forward_add(self, bottom, top):
top[0].data[...] = bottom[0].data + self.value_
def _backward_add(self, bottom, propagate_down, top):
bottom[0].diff[...] = top[0].diff
def _forward_mul(self, bottom, top):
top[0].data[...] = bottom[0].data * self.value_
def _backward_mul(self, bottom, propagate_down, top):
bottom[0].diff[...] = top[0].diff * self.value_
def reshape(self, bottom, top):
top[0].reshape(bottom[0].num, bottom[0].channels, bottom[0].height,
bottom[0].width)
def forward(self, bottom, top):
self._forward(bottom, top)
def backward(self, top, propagate_down, bottom):
self._backward(bottom, propagate_down, top)
def python_net_file():
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r"""name: 'pythonnet' force_backward: true
input: 'data' input_dim: 10 input_dim: 9 input_dim: 8 input_dim: 7
layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
python_param {
module: 'test_python_layer_with_param_str' layer: 'ElemwiseScalarLayer'
param_str: "{'op': 'add', 'value': 2}" } }
layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'
python_param {
module: 'test_python_layer_with_param_str' layer: 'ElemwiseScalarLayer'
param_str: "{'op': 'mul', 'value': 3}" } }
layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'
python_param {
module: 'test_python_layer_with_param_str' layer: 'ElemwiseScalarLayer'
param_str: "{'op': 'add', 'value': 10}" } }""")
f.close()
return f.name
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, (x + 2) * 3 + 10)
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 3 * x)
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in self.net.blobs.itervalues():
for d in blob.data.shape:
self.assertEqual(s, d)
|
11498922
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from builtins import NotImplementedError
from mmcv.cnn import kaiming_init, normal_init, trunc_normal_init
from ..utils import accuracy, accuracy_mixup
from ..registry import HEADS
from ..builder import build_loss
@HEADS.register_module
class ClsMixupHead(nn.Module):
"""Simplest classifier head, with only one fc layer.
*** Mixup and multi-label classification are supported ***
V1218, IP89, fix 'neg_weight' and 'eta_weight' usages
Args:
with_avg_pool (bool): Whether to use GAP before this head.
loss (dict): Config of classification loss.
in_channels (int): Number of channels in the input feature map.
num_classes (int): Number of categories excluding the category.
multi_label (bool): Whether to use one_hot like labels (requiring the
multi-label classification loss). Notice that we support the
single-label cls task to use the multi-label cls loss.
two_hot (bool): Whether to use multi-hot label (two hot).
two_hot_scale (float): Rescale the sum of labels, in (0, 1]. The sum of
softmax labels is 1, while that of the two-hot labels is 2. This scalar
is used to rescale the sum of labels to (0, 2].
lam_scale_mode (str): The mode of rescaling two-hot or soft mixup labels,
in {'pow', 'exp', 'none'}. If mode!='none', rescaling the labels with
lam_thr and lam_idx. Default: "none".
lam_thr (float): Rescale threshold for two-hot labels, in [0,1].
lam_idx (float): Rescale factor for the exp or power function.
eta_weight (dict): The lam threhold of whether to use the eta weights. It
contains 'eta_weight=dict(eta=1, mode="both", thr=1)', where 'eta' denotes
the basic rescale factor of each lam term and 'mode' is the selection method.
If eta_weight['mode']=="both", add the eta_weight for the both lam term.
If eta_weight['mode']=="less", add the eta_weight for lam < thr.
If eta_weight['mode']=="more", add the eta_weight for lam > thr.
Default: dict(eta=1, mode="both", thr=0).
neg_weight (bool or float): Whether to remove (or reweight) the negative
part of loss according to gt_label (should be BCE multi-label loss).
Default: 1 (True).
frozen (bool): Whether to freeze the parameters.
"""
def __init__(self,
with_avg_pool=False,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
in_channels=2048,
num_classes=1000,
multi_label=False,
two_hot=False,
two_hot_scale=1,
lam_scale_mode='none',
lam_thr=1,
lam_idx=1,
eta_weight=dict(eta=0, mode="both", thr=0.5),
neg_weight=1,
frozen=False):
super(ClsMixupHead, self).__init__()
self.with_avg_pool = bool(with_avg_pool)
self.in_channels = int(in_channels)
self.num_classes = int(num_classes)
self.multi_label = bool(multi_label)
self.two_hot = bool(two_hot)
self.two_hot_scale = float(two_hot_scale)
self.lam_scale_mode = str(lam_scale_mode)
self.lam_thr = float(lam_thr)
self.lam_idx = float(lam_idx)
self.eta_weight = eta_weight
self.neg_weight = float(neg_weight) if float(neg_weight) != 1 else 1
assert lam_scale_mode in ['none', 'pow', 'exp']
assert eta_weight["mode"] in ['more', 'less', 'both'] and \
0 <= eta_weight["thr"] <= 1 and eta_weight["eta"] < 100
assert 0 < lam_thr <= 1 and -100 < lam_idx < 100
assert 0 < two_hot_scale <= 1 and 0 <= neg_weight <= 1
# loss
if loss is not None:
assert isinstance(loss, dict)
self.criterion = build_loss(loss)
else:
assert multi_label == False
loss = dict(type='CrossEntropyLoss', loss_weight=1.0)
self.criterion = build_loss(loss)
if self.neg_weight != 1:
0 <= self.neg_weight <= 1, "the weight of negative parts should not be \
larger than the postive part."
assert multi_label == True and loss['type'] == 'CrossEntropyLoss'
# fc layer
self.fc = nn.Linear(in_channels, num_classes)
if frozen:
self.frozen()
def frozen(self):
self.fc.eval()
for param in self.fc.parameters():
param.requires_grad = False
def init_weights(self, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming', 'trunc_normal'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, nn.Linear):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
elif init_linear == 'kaiming':
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif init_linear == 'trunc_normal':
trunc_normal_init(m, std=std, bias=bias)
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
if self.with_avg_pool:
if x.dim() == 3:
x = F.adaptive_avg_pool1d(x, 1).view(x.size(0), -1)
elif x.dim() == 4:
x = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
else:
assert x.dim() in [2, 3, 4], \
"Tensor must has 2, 3 or 4 dims, got: {}".format(x.dim())
return [self.fc(x)]
def lambda_adjust(self, lam, mode="pow", thr=1, idx=1):
""" rescale lambda for two-hot label mixup classification
Args:
lam (float): The original lambda in [0,1].
mode (str): The rescale function, {'pow', 'exp'}.
thr (float): If lam < threshold, do rescale; else
lam=1. Threshold in (0,1].
idx (float): The index for power or exp functions.
"""
if lam >= thr:
lam = 1
else:
if mode == "pow":
lam = (thr ** (-abs(idx))) * (lam ** abs(idx))
elif mode == "exp":
b = (abs(idx)** (-thr*2)) * 1
k = 1 / (1 - b)
lam = ((abs(idx)** (lam - thr*2)) * (abs(idx) ** lam) - b) * k
else:
raise NotImplementedError
return lam
def loss(self, cls_score, labels, label_mask=None, **kwargs):
r"""" mixup classification loss forward
Args:
cls_score (list): Score should be [tensor] of [N, d].
labels (tuple or tensor): Labels should be tensor [N, \*] by default.
If labels as tuple, it's used for CE mixup, (gt_a, gt_b, lambda).
label_mask (tensor): Mask (N,1) to indicate whether this idx is a
ground truth or pseudo label.
"""
single_label = False
losses = dict()
assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1
# 1. original one-hot classification
if not isinstance(labels, tuple):
# whether is the single label cls [N,] or multi-label cls [N,C]
single_label = \
labels.dim() == 1 or (labels.dim() == 2 and labels.shape[1] == 1)
# Notice: we allow the single-label cls using multi-label loss, thus
# * For single-label cls, loss = loss.sum() / N
# * For multi-label cls, loss = loss.sum() or loss.mean()
avg_factor = labels.size(0) if single_label else None
target = labels.clone()
if self.multi_label:
# convert to onehot labels
if single_label:
target = F.one_hot(target, num_classes=self.num_classes)
# default onehot cls
losses['loss'] = self.criterion(
cls_score[0], target, avg_factor=avg_factor, **kwargs)
# compute accuracy
losses['acc'] = accuracy(cls_score[0], labels)
# 2. mixup classification
else:
y_a, y_b, lam = labels
if isinstance(lam, torch.Tensor): # lam is scalar or tensor [N,1]
lam = lam.unsqueeze(-1)
# whether is the single label cls [N,] or multi-label cls [N,C]
single_label = \
y_a.dim() == 1 or (y_a.dim() == 2 and y_a.shape[1] == 1)
# Notice: we allow the single-label cls using multi-label loss, thus
# * For single-label cls, loss = loss.sum() / N
# * For multi-label cls, loss = loss.sum() or loss.mean()
avg_factor = y_a.size(0) if single_label else None
# 2.1 mixup (hard ce) cls (using softmax)
if not self.multi_label:
assert self.two_hot == False
losses['loss'] = \
self.criterion(cls_score[0], y_a, avg_factor=avg_factor, **kwargs) * lam + \
self.criterion(cls_score[0], y_b, avg_factor=avg_factor, **kwargs) * (1 - lam)
else:
# convert to onehot (binary) for multi-label mixup cls
if single_label:
y_a = F.one_hot(y_a, num_classes=self.num_classes)
y_b = F.one_hot(y_b, num_classes=self.num_classes)
# basic mixup labels: sumed to 1
y_mixed = lam * y_a + (1 - lam) * y_b
use_eta_weight = None
class_weight = None
# 2.2 mixup (sigmoid) multi-lalebl sumed to 2 (using two-hot loss)
if self.two_hot:
if self.lam_scale_mode != 'none':
lam_a = self.lambda_adjust(
lam, mode=self.lam_scale_mode, thr=self.lam_thr, idx=self.lam_idx)
lam_b = self.lambda_adjust(
1-lam, mode=self.lam_scale_mode, thr=self.lam_thr, idx=self.lam_idx)
if label_mask is not None:
lam_a = lam_a if label_mask[0] else lam
lam_b = lam_b if label_mask[1] else 1-lam
y_mixed = lam_a * y_a + lam_b * y_b
else:
y_mixed = y_a + y_b
# 2.3 mixup (soft) single-label sumed to 1 (using softmax)
else:
if self.eta_weight["eta"] != 0:
# whether to use eta
below_thr = lam < self.eta_weight["thr"]
if self.eta_weight["mode"] == 'less':
use_eta_weight = [lam, 0] if below_thr else [0, 1-lam]
elif self.eta_weight["mode"] == 'more':
use_eta_weight = [lam, 0] if not below_thr else [0, 1-lam]
else:
use_eta_weight = [lam, 1-lam] # 'both'
# eta rescale by lam
for i in range(len(use_eta_weight)):
if use_eta_weight[i] > 0:
if self.lam_scale_mode != 'none':
use_eta_weight[i] = self.eta_weight["eta"] * \
self.lambda_adjust(
use_eta_weight[i], mode=self.lam_scale_mode,
thr=self.lam_thr, idx=self.lam_idx)
else:
use_eta_weight[i] = self.eta_weight["eta"]
assert use_eta_weight[0] > 0 or use_eta_weight[1] > 0, \
"one of eta should be non-zero, lam={}, lam_={}".format(lam, 1-lam)
# rescale the sum of labels, each hot <= 1
if self.two_hot_scale < 1:
y_mixed = (y_mixed * self.two_hot_scale).clamp(max=1)
# remove neg in BCE loss
if self.neg_weight < 1:
class_weight = (y_mixed > 0).type(torch.float)
class_weight = class_weight.clamp(min=self.neg_weight)
losses['loss'] = self.criterion(
cls_score[0], y_mixed,
avg_factor=avg_factor, class_weight_override=class_weight,
eta_weight=use_eta_weight, **kwargs)
# compute accuracy
losses['acc'] = accuracy(cls_score[0], labels[0])
losses['acc_mix'] = accuracy_mixup(cls_score[0], labels)
return losses
|
11498927
|
import logging
from uuid import UUID
from typing import List
from sqlalchemy import select, func, desc, update
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from acapy_client.api.revocation_api import RevocationApi
from acapy_client.model.revoke_request import RevokeRequest
from api.endpoints.models.v1.governance import TemplateStatusType
from api.services.v1 import tenant_service
from acapy_client.api.issue_credential_v1_0_api import IssueCredentialV10Api
from api.db.models.v1.contact import Contact
from api.db.models.v1.governance import CredentialTemplate
from api.db.models.v1.issuer import IssuerCredential, IssuerCredentialTimeline
from api.endpoints.models.credentials import CredentialStateType
from api.endpoints.models.v1.errors import (
IdNotMatchError,
IncorrectStatusError,
)
from api.endpoints.models.v1.issuer import (
IssuerCredentialListParameters,
IssuerCredentialItem,
IssuerCredentialContact,
IssuerCredentialAcapy,
IssuerCredentialTemplate,
OfferNewCredentialPayload,
IssuerCredentialStatusType,
IssuerCredentialTimelineItem,
UpdateIssuerCredentialPayload,
RevokeCredentialPayload,
)
from api.api_client_utils import get_api_client
issue_cred_v10_api = IssueCredentialV10Api(api_client=get_api_client())
revoc_api = RevocationApi(api_client=get_api_client())
logger = logging.getLogger(__name__)
def issuer_credential_to_item(
db_item: IssuerCredential, acapy: bool | None = False
) -> IssuerCredentialItem:
"""IssuerCredential to IssuerCredentialItem.
Transform a IssuerCredential Table record to a IssuerCredentialItem object.
Args:
db_item: The Traction database IssuerCredential
acapy: When True, populate the IssuerCredentialItem acapy field.
Returns: The Traction IssuerCredentialItem
"""
credential_template = IssuerCredentialTemplate(
credential_template_id=db_item.credential_template.credential_template_id,
name=db_item.credential_template.name,
cred_def_id=db_item.credential_template.cred_def_id,
revocation_enabled=db_item.credential_template.revocation_enabled,
)
contact = IssuerCredentialContact(
contact_id=db_item.contact.contact_id,
alias=db_item.contact.alias,
external_reference_id=db_item.contact.external_reference_id,
)
item = IssuerCredentialItem(
**db_item.dict(),
credential_template=credential_template,
contact=contact,
)
if acapy:
item.acapy = IssuerCredentialAcapy(
credential_exchange_id=db_item.credential_exchange_id,
revoc_reg_id=db_item.revoc_reg_id,
revocation_id=db_item.revocation_id,
)
return item
async def list_issuer_credentials(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
parameters: IssuerCredentialListParameters,
) -> [List[IssuerCredentialItem], int]:
"""List Issuer Credentials.
Return a page of issuer credentials filtered by given parameters.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
wallet_id: AcaPy Wallet ID for tenant
parameters: filters for Items
Returns:
items: The page of items
total_count: Total number of items matching criteria
"""
limit = parameters.page_size
skip = (parameters.page_num - 1) * limit
filters = [
IssuerCredential.tenant_id == tenant_id,
IssuerCredential.deleted == parameters.deleted,
]
if parameters.status:
filters.append(IssuerCredential.status == parameters.status)
if parameters.state:
filters.append(IssuerCredential.state == parameters.state)
if parameters.contact_id:
filters.append(IssuerCredential.contact_id == parameters.contact_id)
if parameters.cred_def_id:
filters.append(IssuerCredential.cred_def_id == parameters.cred_def_id)
if parameters.credential_template_id:
filters.append(
IssuerCredential.credential_template_id == parameters.credential_template_id
)
if parameters.external_reference_id:
filters.append(
IssuerCredential.external_reference_id == parameters.external_reference_id
)
if parameters.tags:
_filter_tags = [x.strip() for x in parameters.tags.split(",")]
filters.append(IssuerCredential.tags.comparator.contains(_filter_tags))
# build out a base query with all filters
base_q = select(IssuerCredential).filter(*filters)
# get a count of ALL records matching our base query
count_q = select([func.count()]).select_from(base_q)
count_q_rec = await db.execute(count_q)
total_count = count_q_rec.scalar()
# TODO: should we raise an exception if paging is invalid?
# ie. is negative, or starts after available records
# add in our paging and ordering to get the result set
results_q = (
base_q.limit(limit)
.offset(skip)
.options(
selectinload(IssuerCredential.contact),
selectinload(IssuerCredential.credential_template),
)
.order_by(desc(IssuerCredential.updated_at))
)
results_q_recs = await db.execute(results_q)
db_items = results_q_recs.scalars()
items = []
for db_item in db_items:
item = issuer_credential_to_item(db_item, parameters.acapy)
items.append(item)
return items, total_count
async def offer_new_credential(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
payload: OfferNewCredentialPayload,
save_in_traction: bool | None = False,
) -> IssuerCredentialItem:
"""Offer new Credential.
Create an Credential and Offer it.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
wallet_id: AcaPy Wallet ID for tenant
payload: Credential offer payload
save_in_traction: when True, store credential data in Traction
Returns:
item: The Traction Issuer Credential
Raises:
"""
# see if we are an issuer...
await tenant_service.is_issuer(tenant_id, wallet_id, True)
# need to find the contact/connection
# need to find the credential template/cred def
db_contact = None
db_credential_template = None
if payload.contact_id:
db_contact = await Contact.get_by_id(db, tenant_id, payload.contact_id)
elif payload.connection_id:
db_contact = await Contact.get_by_connection_id(
db, tenant_id, payload.connection_id
)
if payload.credential_template_id:
db_credential_template = await CredentialTemplate.get_by_id(
db, tenant_id, payload.credential_template_id
)
elif payload.cred_def_id:
db_credential_template = await CredentialTemplate.get_by_cred_def_id(
db, tenant_id, payload.cred_def_id
)
if db_credential_template.status != TemplateStatusType.active:
raise IncorrectStatusError(
code="issuer_credential.template.not-active",
title="Issuer Credential - Template not active",
detail=f"Cannot offer credential unless template status is {TemplateStatusType.active}.", # noqa: E501
)
# convert list of name/value tuples to an object
attributes = {}
credential_preview = {"attributes": []}
for attr in payload.attributes:
attributes[attr.name] = attr.value
credential_preview["attributes"].append(
{"name": attr.name, "value": attr.value}
)
# TODO: verify attributes match the cred def
# create a new "issued" credential record
db_item = IssuerCredential(
tenant_id=tenant_id,
credential_template_id=db_credential_template.credential_template_id,
cred_def_id=db_credential_template.cred_def_id,
contact_id=db_contact.contact_id,
status=IssuerCredentialStatusType.pending,
state=CredentialStateType.pending,
external_reference_id=payload.external_reference_id,
tags=payload.tags,
comment=payload.comment,
preview_persisted=save_in_traction,
credential_preview=credential_preview,
)
db.add(db_item)
await db.commit()
db_item = await IssuerCredential.get_by_id(
db, tenant_id, db_item.issuer_credential_id
)
item = issuer_credential_to_item(db_item, True)
return item
async def get_issuer_credential(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
issuer_credential_id: UUID,
acapy: bool | None = False,
deleted: bool | None = False,
) -> IssuerCredentialItem:
"""Get Issuer Credential.
Find and return a Traction Issuer Credential by ID.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
wallet_id: AcaPy Wallet ID for tenant
issuer_credential_id: Traction ID of Issuer Credential
acapy: When True, populate the Issuer Credential acapy field
deleted: When True, return Issuer Credential if marked as deleted
Returns: The Traction Issuer Credential
Raises:
NotFoundError: if the item cannot be found by ID and deleted flag
"""
db_item = await IssuerCredential.get_by_id(
db, tenant_id, issuer_credential_id, deleted
)
item = issuer_credential_to_item(db_item, acapy)
return item
async def get_issuer_credential_timeline(
db: AsyncSession,
issuer_credential_id: UUID,
) -> List[IssuerCredentialTimelineItem]:
"""Get Issuer Credential Timeline items.
Find and return the Traction Issuer Credential Timeline items. Timeline items
represent history of changes to Status and/or State. They will be sorted in
descending order of creation (newest first).
Args:
db: database session
issuer_credential_id: Traction ID of Issuer Credential
Returns: List of Issuer Credential Timeline items
"""
db_items = await IssuerCredentialTimeline.list_by_issuer_credential_id(
db, issuer_credential_id
)
results = []
for db_item in db_items:
results.append(IssuerCredentialTimeline(**db_item.dict()))
return results
async def update_issuer_credential(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
issuer_credential_id: UUID,
payload: UpdateIssuerCredentialPayload,
) -> IssuerCredentialItem:
"""Update Issuer Credential.
Update a Traction Issuer Credential.
Note that not all fields can be modified. If they are present in the payload, they
will be ignored.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
wallet_id: AcaPy Wallet ID for tenant
issuer_credential_id: Traction ID of item
payload: data fields to update.
Returns: The Traction IssuerCredentialItem
Raises:
NotFoundError: if the item cannot be found by ID and deleted flag
IdNotMatchError: if the item id parameter and in payload do not match
"""
# verify this item exists and is not deleted...
await IssuerCredential.get_by_id(db, tenant_id, issuer_credential_id, False)
# payload id must match parameter
if issuer_credential_id != payload.issuer_credential_id:
raise IdNotMatchError(
code="issuer_credential.update.id-not-match",
title="Issuer Credential ID mismatch",
detail=f"Issuer Credential ID in payload <{payload.issuer_credential_id}> does not match Issuer Credential ID requested <{issuer_credential_id}>", # noqa: E501
)
payload_dict = payload.dict()
# payload isn't the same as the db... move fields around
del payload_dict["issuer_credential_id"]
if not payload.status:
del payload_dict["status"]
q = (
update(IssuerCredential)
.where(IssuerCredential.tenant_id == tenant_id)
.where(IssuerCredential.issuer_credential_id == issuer_credential_id)
.values(payload_dict)
)
await db.execute(q)
await db.commit()
return await get_issuer_credential(db, tenant_id, wallet_id, issuer_credential_id)
async def delete_issuer_credential(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
issuer_credential_id: UUID,
) -> IssuerCredentialItem:
"""Delete Issuer Credential.
Delete a Traction Issuer Credential.
Note that deletes are "soft" in Traction.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
wallet_id: AcaPy Wallet ID for tenant
issuer_credential_id: Traction ID of item
Returns: The Traction IssuerCredentialItem
Raises:
NotFoundError: if the item cannot be found by ID and deleted flag
"""
q = (
update(IssuerCredential)
.where(IssuerCredential.tenant_id == tenant_id)
.where(IssuerCredential.issuer_credential_id == issuer_credential_id)
.values(
deleted=True,
status=IssuerCredentialStatusType.deleted,
state=CredentialStateType.abandoned,
)
)
await db.execute(q)
await db.commit()
return await get_issuer_credential(
db, tenant_id, wallet_id, issuer_credential_id, acapy=False, deleted=True
)
async def revoke_issuer_credential(
db: AsyncSession,
tenant_id: UUID,
wallet_id: UUID,
issuer_credential_id: UUID,
payload: RevokeCredentialPayload,
) -> IssuerCredentialItem:
# TODO: need to check permissions and status
# verify this item exists and is not deleted...
# payload id must match parameter
if issuer_credential_id != payload.issuer_credential_id:
raise IdNotMatchError(
code="issuer_credential.revoke.id-not-match",
title="Issuer Credential ID mismatch",
detail=f"Issuer Credential ID in payload <{payload.issuer_credential_id}> does not match Issuer Credential ID requested <{issuer_credential_id}>", # noqa: E501
)
db_item = await IssuerCredential.get_by_id(
db, tenant_id, issuer_credential_id, False
)
if db_item.status != IssuerCredentialStatusType.issued:
raise IncorrectStatusError(
code="issuer_credential.revoke.not-issued",
title="Issuer Credential not issued",
detail=f"Issuer Credential cannot be revoked unless status is {IssuerCredentialStatusType.issued}.", # noqa: E501
)
# no fancy workflow stuff, just revoke
rev_req = RevokeRequest(
comment=payload.comment if payload.comment else "",
connection_id=str(db_item.contact.connection_id),
rev_reg_id=db_item.revoc_reg_id,
cred_rev_id=db_item.revocation_id,
publish=True,
notify=True,
)
data = {"body": rev_req}
revoc_api.revocation_revoke_post(**data)
# update our status
q = (
update(IssuerCredential)
.where(IssuerCredential.tenant_id == tenant_id)
.where(IssuerCredential.issuer_credential_id == issuer_credential_id)
.values(
revoked=True,
status=IssuerCredentialStatusType.revoked,
state=CredentialStateType.credential_revoked,
revocation_comment=payload.comment,
)
)
await db.execute(q)
await db.commit()
return await get_issuer_credential(
db, tenant_id, wallet_id, issuer_credential_id, acapy=False
)
|
11498936
|
from typing import Optional, Sequence, TypeVar, Union, cast
import numpy as np
from rpcq.messages import ParameterAref
from pyquil.api._qam import QAM, QAMExecutionResult, QuantumExecutable
T = TypeVar("T")
class StatefulQAM(QAM[T]):
_loaded_executable: Optional[QuantumExecutable]
_result: Optional[QAMExecutionResult]
@classmethod
def wrap(cls, qam: QAM[T]) -> None:
"""
Mutate the provided QAM to add methods and data for backwards compatibility,
by dynamically mixing in this wrapper class.
"""
if not isinstance(qam, StatefulQAM):
qam.__class__ = type(str(qam.__class__.__name__), (StatefulQAM, qam.__class__), {})
qam = cast(StatefulQAM[T], qam)
qam.reset()
def load(self, executable: QuantumExecutable) -> "StatefulQAM[T]":
self._loaded_executable = executable.copy() # copy here because calls to self.write_memory() will mutate it
return self
def read_memory(self, region_name: str) -> Optional[np.ndarray]:
assert self._result is not None, "QAM#run must be called before QAM#read_memory"
data = self._result.readout_data.get(region_name)
return data
def reset(self) -> "StatefulQAM[T]":
self._loaded_executable = None
self._result = None
return self
def run(self) -> "StatefulQAM[T]": # type: ignore
assert self._loaded_executable is not None
self._result = super().run(self._loaded_executable)
return self
def wait(self) -> "StatefulQAM[T]":
return self
def write_memory(
self,
*,
region_name: str,
value: Union[int, float, Sequence[int], Sequence[float]],
offset: Optional[int] = None,
) -> "StatefulQAM[T]":
assert self._loaded_executable is not None, "Executable has not been loaded yet. Call QAM#load first"
parameter_aref = ParameterAref(name=region_name, index=offset or 0)
self._loaded_executable._memory._write_value(parameter=parameter_aref, value=value)
return self
|
11498942
|
import sys
import binascii
import socket
import struct
def ip_to_hex(ip):
return binascii.hexlify(socket.inet_aton(ip)).decode('utf-8')
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def compute_checksum(data):
s = 0
for i in range(0, len(list(data)), 2):
w = ord(data[i]) + (ord(data[i+1]) << 8)
s = carry_around_add(s, w)
return ~s & 0xffff
def format_data(data):
data = [data[i:i+2] for i in range(0, len(list(data)), 2)]
data = map(lambda x: int(x, 16), data)
list(data)
return struct.pack("%dB" % len(list(data)), *data)
def main():
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
test = test.strip().split(' ')
src = ip_to_hex(test[0])
dst = ip_to_hex(test[1])
package = ''.join(test[2:])
ihl = int(package[1])
header_length = 48 if (ihl > 5) else 40
header = package[:header_length]
new_header = header[:20] + '0000' + src + dst + header[40:]
checksum = format(compute_checksum(format_data(new_header)), 'x').zfill(4)
new_header = new_header[:20] + checksum[2:4] + checksum[0:2] + new_header[24:]
print(' '.join([new_header[i:i+2] for i in range(0, len(new_header), 2)]))
if __name__ == '__main__':
main()
|
11498951
|
c = get_config()
c.ServePostProcessor.ip = u'*'
c.ServePostProcessor.port = 8000
c.ServePostProcessor.open_in_browser = False
|
11498976
|
import numpy as np
friends_details_dtypes = {
"contributors_enabled": np.int8,
"created_at": np.datetime64,
"default_profile": np.int8,
"default_profile_image": np.int8,
"description": str,
"entities_description_urls": str,
"entities_url_urls": str,
"favourites_count": np.int64,
"follow_request_sent": np.int8,
"followers_count": np.int64,
"following": np.int8,
"friends_count": np.int64,
"geo_enabled": np.int8,
"has_extended_profile": np.int8,
"id": np.int64,
"id_str": str,
"is_translation_enabled": np.int8,
"is_translator": np.int8,
"lang": str,
"listed_count": np.int64,
"location": str,
"name": str,
"needs_phone_verification": np.int8,
"notifications": np.int8,
"profile_background_color": str,
"profile_background_image_url": str,
"profile_background_image_url_https": str,
"profile_background_tile": np.int8,
"profile_banner_url": str,
"profile_image_url": str,
"profile_image_url_https": str,
"profile_link_color": str,
"profile_sidebar_border_color": str,
"profile_sidebar_fill_color": str,
"profile_text_color": str,
"profile_use_background_image": np.int8,
"protected": np.int8,
"screen_name": str,
"status_contributors": str,
"status_coordinates": str,
"status_coordinates_coordinates": str,
"status_coordinates_type": str,
"status_created_at": np.datetime64,
"status_entities_hashtags": str,
"status_entities_media": str,
"status_entities_symbols": str,
"status_entities_urls": str,
"status_entities_user_mentions": str,
"status_extended_entities_media": str,
"status_favorite_count": np.int64,
"status_favorited": np.int8,
"status_geo": str,
"status_geo_coordinates": str,
"status_geo_type": str,
"status_id": np.int64,
"status_id_str": str,
"status_in_reply_to_screen_name": str,
"status_in_reply_to_status_id": np.int64,
"status_in_reply_to_status_id_str": str,
"status_in_reply_to_user_id": np.int64,
"status_in_reply_to_user_id_str": str,
"status_is_quote_status": np.int8,
"status_lang": str,
"status_place": str,
"status_place_bounding_box_coordinates": str,
"status_place_bounding_box_type": str,
"status_place_contained_within": str,
"status_place_country": str,
"status_place_country_code": str,
"status_place_full_name": str,
"status_place_id": str,
"status_place_name": str,
"status_place_place_type": str,
"status_place_url": str,
"status_possibly_sensitive": np.int8,
"status_quoted_status_id": np.int64,
"status_quoted_status_id_str": str,
"status_retweet_count": np.int64,
"status_retweeted": np.int8,
"status_retweeted_status_contributors": str,
"status_retweeted_status_coordinates": str,
"status_retweeted_status_created_at": np.datetime64,
"status_retweeted_status_entities_hashtags": str,
"status_retweeted_status_entities_media": str,
"status_retweeted_status_entities_symbols": str,
"status_retweeted_status_entities_urls": str,
"status_retweeted_status_entities_user_mentions": str,
"status_retweeted_status_extended_entities_media": str,
"status_retweeted_status_favorite_count": np.int64,
"status_retweeted_status_favorited": np.int8,
"status_retweeted_status_geo": str,
"status_retweeted_status_id": np.int64,
"status_retweeted_status_id_str": str,
"status_retweeted_status_in_reply_to_screen_name": str,
"status_retweeted_status_in_reply_to_status_id": np.int64,
"status_retweeted_status_in_reply_to_status_id_str": str,
"status_retweeted_status_in_reply_to_user_id": np.int64,
"status_retweeted_status_in_reply_to_user_id_str": str,
"status_retweeted_status_is_quote_status": np.int8,
"status_retweeted_status_lang": str,
"status_retweeted_status_place": str,
"status_retweeted_status_possibly_sensitive": np.int8,
"status_retweeted_status_quoted_status_id": np.int64,
"status_retweeted_status_quoted_status_id_str": str,
"status_retweeted_status_retweet_count": np.int64,
"status_retweeted_status_retweeted": np.int8,
"status_retweeted_status_source": str,
"status_retweeted_status_full_text": str,
"status_retweeted_status_truncated": np.int8,
"status_source": str,
"status_full_text": str,
"status_truncated": np.int8,
"statuses_count": np.int64,
"suspended": np.int8,
"time_zone": str,
"translator_type": str,
"url": str,
"verified": np.int8,
"utc_offset": str
}
|
11498983
|
from server import db, bcrypt
from server.models.orm import TeacherModel
from server.config import RestErrors
import email_validator
from datetime import datetime
import os
from werkzeug.datastructures import FileStorage
from server.parsing import parser
from server.parsing.utils import create_chat_df, create_students_df
# The class is responsible for validating diffrent inputs to the API
class Validators:
def __init__(self, supported_chat_file_ext, supported_student_files_ext, max_students_in_class,
invalid_username_chars="", min_password_len=0, required_password_chars=[]):
#TODO: documentation
self._supported_chat_file_ext = supported_chat_file_ext
self._supported_student_files_ext = supported_student_files_ext
self._max_students_in_class = max_students_in_class
self._invalid_username_chars = invalid_username_chars
self._min_password_len = min_password_len
self._required_password_chars = required_password_chars
@classmethod
def from_object(cls, config):
"""
The function will init an instance from config object
"""
return cls(
config.CHAT_FILE_EXT,
config.STUDENTS_FILE_EXT,
config.MAX_STUDENTS_IN_CLASS,
config.INVALID_USERNAME_CHARS,
config.MIN_PASSWORD_LEN,
config.REQUIRED_PASSWORD_CHARS
)
def username(self, value):
"""
The function will validate username (Make sure the username is unique and that he doesn't contain illegal chars)
:param value: the username value to check (str)
:return: username if valid (str)
"""
value = str(value)
if TeacherModel.query.filter_by(username=value).first():
raise ValueError(RestErrors.USERNAME_TAKEN)
if Validators.any_of_chars_match(self._invalid_username_chars, value):
raise ValueError(RestErrors.ILLEGAL_USERNAME_CHARS)
return value
def email(self, value):
"""
The function will make sure the email is in the correct format and unique
:param value: the email value to check (str)
:return: email if valid (str)
"""
value = str(value)
if TeacherModel.query.filter_by(email=value).first():
raise ValueError(RestErrors.EMAIL_TAKEN)
return email_validator.validate_email(value).email
def password(self, value):
"""
The function will check if the password is valid (Containing at least one of each required char groups, longer than the min length)
:param value: the password to check (str)
:return: hashed password to store in the db (str)
"""
value = str(value)
if len(value) < self._min_password_len:
raise ValueError(RestErrors.PASSWORD_TO_SHORT)
for chars_list in self._required_password_chars:
if not Validators.any_of_chars_match(chars_list, value):
raise ValueError(RestErrors.PASSWORD_MUST_CONTAIN)
return bcrypt.generate_password_hash(value).decode('utf-8')
def date(self, value):
"""
The function will check that the date is given in the correct format
:param value: the input unix timestamp (integer)
:return value: the date (datetime)
"""
try:
value = int(value)
return datetime.fromtimestamp(value)
except:
raise ValueError(RestErrors.INVALID_TIME_STAMP)
def students_file(self, value):
"""
The function will make sure the student file has the right extenstion
:param value: the student file (FileStorage)
:return: all the students from the file (Pandas DataFrame)
"""
ext = Validators.check_ext(value.filename, self._supported_student_files_ext)
if not ext:
raise ValueError(RestErrors.INVALID_STUDENTS_FILE)
students_df = create_students_df(ext, value.stream)
if students_df.shape[0] > self._max_students_in_class:
raise ValueError(RestErrors.TO_MANY_RECORDS)
if students_df.empty:
raise ValueError(RestErrors.INVALID_STUDENTS_FILE)
return parser.parse_df(students_df)
def chat_file(self, value):
"""
The function will make sure the chat file has the right extenstion
:param value: the chat file (FileStorage)
:return: The chat as dataframe (Pandas Dataframe)
"""
if not Validators.check_ext(value.filename, self._supported_chat_file_ext):
raise ValueError(RestErrors.INVALID_CHAT_FILE)
chat_file = value.stream.read().decode("utf-8").split("\n") #TODO: check this in test
chat_df = create_chat_df(chat_file)
if chat_df.empty:
raise ValueError(RestErrors.INVALID_CHAT_FILE)
return chat_df
@staticmethod
def check_ext(file_name, extensions):
"""
The function will check if the filename is one of the following extensions types
:param file_name: the name of the file (str)
:param extensions: list of extension to check (list)
:return: the extension of the file name or None if don't exists (str
"""
file_name = file_name.replace('"', "") # Because of weird quotes postman adds to the filename
for ext in extensions:
if file_name.endswith(ext):
return ext
@staticmethod
def any_of_chars_match(chars, string):
"""
The function will check if any of the chars in one string is in the second string
:param chars: list of chars to check (iterator of chars)
:param string: string to check in (str)
:return: the answer to the boolean question (bool)
"""
return any([c in string for c in chars])
|
11498996
|
import copy
import time
import joblib
from functools import wraps
import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.cluster import KMeans
# from sklearn.externals import joblib as skjoblib
from sklearn.preprocessing import LabelEncoder
from skimage import measure
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import tensorflow as tf
from consts import (
FONT_SIZE,
FONT_FP,
LE_FP,
UNICODE_MAP_FP
)
font = ImageFont.truetype(
FONT_FP, FONT_SIZE, encoding="utf-8"
)
# load label encoder
with open(LE_FP, "rb") as f:
le = joblib.load(f)
# load unicode mapping
with open(UNICODE_MAP_FP, "rb") as f:
unicode_map = joblib.load(f)
def norm_mean_std(img):
img = img / 255
img = img.astype('float32')
mean = np.mean(img, axis=(0, 1, 2))
std = np.std(img, axis=(0, 1, 2))
img = (img - mean) / std
return img
def load_image(img,
img_size=(512, 512),
expand=False,
return_hw=False):
if isinstance(img, str):
img = cv2.imread(img)[:, :, ::-1]
h, w, _ = img.shape
img = norm_mean_std(img)
img = cv2.resize(img, img_size)
if expand:
img = np.expand_dims(img, axis=0)
if return_hw:
return img, h, w
return img
def get_mask(img, labels):
"""Reference
"""
mask = np.zeros((img.shape[0], img.shape[1], 2), dtype='float32')
if isinstance(labels, str):
labels = np.array(labels.split(' ')).reshape(-1, 5)
for char, x, y, w, h in labels:
x, y, w, h = int(x), int(y), int(w), int(h)
if x + w >= img.shape[1] or y + h >= img.shape[0]:
continue
mask[y: y + h, x: x + w, 0] = 1
radius = 6
mask[y + h // 2 - radius: y + h // 2 + radius + 1, x +
w // 2 - radius: x + w // 2 + radius + 1, 1] = 1
return mask
def load_mask(img, label):
try:
mask = get_mask(img, label)
mask = mask.astype(np.float32)
except Exception:
mask = None
return mask
def resize_padding(img, desired_size=640):
"""https://jdhao.github.io/2017/11/06/resize-image-to-square-with-padding/
"""
ratio = float(desired_size) / max(img.shape)
new_size = tuple([int(dim * ratio) for dim in img.shape[:2]])
# resize img
rimg = cv2.resize(img, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
# make padding
color = [0, 0, 0]
rimg = cv2.copyMakeBorder(rimg, top, bottom, left,
right, cv2.BORDER_CONSTANT, value=color)
return rimg
def deunicode(char):
"""
e.g: U+770C --> 県
"""
return chr(int(char[2:], 16))
def minmax_scaler(img):
img = ((img - img.min()) * (1 / (img.max() - img.min()) * 255)).astype('uint8') # noqa
return img
def show_arrs(imgs, rows=4, cols=5):
fig = plt.figure(figsize=(16, 16))
for i in range(1, cols * rows + 1):
img = imgs[i - 1]
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.show()
def show_imgs(img_fps,
net=None,
font=None,
norm=False,
pad=False,
img_size=64,
rows=4,
cols=5):
fig = plt.figure(figsize=(16, 16))
for i in range(1, cols * rows + 1):
img = mpimg.imread(img_fps[i - 1])
# for some unknown reasons its cannot plot on CPU, raise ValueError :D
# ValueError: Floating point image RGB values must be in the 0..1 range. # noqa
if norm:
img = norm_mean_std(img)
if pad:
img = resize_padding(img, img_size)
else:
img = cv2.resize(img, (img_size, img_size))
ax = fig.add_subplot(rows, cols, i)
# TODO: add font size
if font is not None:
ax.title.set_font_properties(font)
if net is not None:
pimg = np.expand_dims(img, axis=0)
y_pred = net.predict(pimg)[0]
y_argmax = np.argmax(y_pred)
pred_label_unicode = le.classes_[y_argmax]
pred_label = deunicode(pred_label_unicode)
ax.title.set_text(pred_label)
if not tf.test.is_gpu_available():
img = minmax_scaler(img)
plt.imshow(img)
plt.show()
return ax
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(">>> Function {} tooks {}'s".format(func.__name__, end - start))
return result
return wrapper
def prepare_targets(y_train, y_test):
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_test_enc = le.transform(y_test)
return le, y_train_enc, y_test_enc
@timer
def visualize_training_data(image_fn,
labels,
width=3,
y_first=False):
# Convert annotation string to array
labels = np.array(labels.split(' ')).reshape(-1, 5)
# Read image
imsource = Image.open(image_fn).convert('RGBA')
bbox_canvas = Image.new('RGBA', imsource.size)
char_canvas = Image.new('RGBA', imsource.size)
# Separate canvases for boxes and chars
bbox_draw = ImageDraw.Draw(bbox_canvas)
char_draw = ImageDraw.Draw(char_canvas)
for codepoint, *args in labels: # noqa
if y_first:
y, x, h, w = args
else:
x, y, w, h = args
x, y, w, h = int(x), int(y), int(w), int(h)
try:
# Convert codepoint to actual unicode character
char = unicode_map[codepoint]
except KeyError:
# some codepoint not exists in unicode_map :/
print(codepoint)
continue
# Draw bounding box around character, and unicode character next to it
bbox_draw.rectangle(
(x, y, x + w, y + h), fill=(255, 255, 255, 0),
outline=(255, 0, 0, 255), width=width
)
char_draw.text(
(x + w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE),
char, fill=(0, 0, 255, 255),
font=font
)
imsource = Image.alpha_composite(
Image.alpha_composite(imsource, bbox_canvas), char_canvas)
# Remove alpha for saving in jpg format.
imsource = imsource.convert("RGB")
return np.asarray(imsource)
# utils
def get_centers(mask):
"""find center points by using contour method
:return: [(y1, x1), (y2, x2), ...]
"""
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
centers = []
for cnt in contours:
M = cv2.moments(cnt)
if M['m00'] > 0:
cx = M['m10'] / M['m00']
cy = M['m01'] / M['m00']
else:
cx, cy = cnt[0][0]
cy = int(np.round(cy))
cx = int(np.round(cx))
centers.append([cy, cx])
centers = np.array(centers)
return centers
def get_labels(center_coords,
pred_bbox):
kmeans = KMeans(len(center_coords), init=center_coords)
kmeans.fit(center_coords) # noqa
x, y = np.where(pred_bbox > 0)
pred_cluster = kmeans.predict(list(zip(x, y)))
pred_bbox_ = copy.deepcopy(pred_bbox)
pred_bbox_[x, y] = pred_cluster
return pred_bbox_
def draw_rects(center_coords,
bbox_cluster,
o_img):
img = copy.deepcopy(o_img)
for cluster_index in range(len(center_coords))[1:]:
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0, 255, 0), 1)
return img
@timer
def get_prediction(model,
img_fp,
bbox_thres=0.01,
center_thres=0.02,
show=True):
print(img_fp)
o_img = load_image(img_fp, expand=False)
o_img = np.expand_dims(o_img, axis=0)
# predict
start = time.time()
pred_mask = model.predict(o_img)
print(">>> Inference time: {}'s".format(time.time() - start))
pred_bbox, pred_center = pred_mask[0][:, :, 0], pred_mask[0][:, :, 1]
pred_bbox = (pred_bbox > bbox_thres).astype(np.float32)
pred_center = (pred_center > center_thres).astype(np.float32)
assert pred_bbox.shape == pred_center.shape
center_coords = get_centers(pred_center.astype(np.uint8))
no_center_points = len(center_coords)
print(">>> N.o center points: {}".format(no_center_points))
if len(center_coords) == 0:
print(">>> Non-text")
plt.imshow(np.squeeze(o_img))
return
bbox_cluster = get_labels(center_coords, pred_bbox)
plt_img = draw_rects(center_coords, bbox_cluster, np.squeeze(o_img))
return center_coords, o_img[0], plt_img, pred_bbox, pred_center, plt_img, bbox_cluster # noqa
@timer
def visual_pred_gt(model,
img_fp,
img_labels,
bbox_thres=0.01,
center_thres=0.02):
# test_id = img_fp.split("/")[-1][:-4]
# img_labels = df_train[df_train["image_id"].isin(
# [test_id])]["labels"].values[0]
char_labels = np.array(img_labels.split(' ')).reshape(-1, 5)
# visual gt
img = visualize_training_data(img_fp, img_labels, width=5)
# visual pred
oimg, oh, ow = load_image(img_fp, return_hw=True)
oimg = np.expand_dims(oimg, axis=0)
start = time.time()
pred_mask = model.predict(oimg)
print(">>> Inference time: {}'s".format(time.time() - start))
pred_bbox, pred_center = pred_mask[0][:, :, 0], pred_mask[0][:, :, 1]
pred_bbox = (pred_bbox > bbox_thres).astype(np.float32)
pred_center = (pred_center > center_thres).astype(np.float32)
assert pred_bbox.shape == pred_center.shape
center_coords = get_centers(pred_center.astype(np.uint8))
no_center_points = len(center_coords)
print(">>> no predicted center: {}".format(no_center_points))
print(">>> Gt no center points: {}".format(len(char_labels)))
if len(center_coords) == 0:
print(">>> Non-text")
return img
y_ratio = oh / 512
x_ratio = ow / 512
print(y_ratio, x_ratio)
# draw centers
print(center_coords.shape)
for y, x in center_coords:
x = int(x * x_ratio)
y = int(y * y_ratio)
cv2.circle(img, (x, y), 3, (0, 255, 0), 5)
if no_center_points > 0:
bbox_cluster = get_labels(center_coords, pred_bbox)
for cluster_index in range(len(center_coords))[1:]:
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
try:
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
except IndexError:
continue
x = x_min
y = y_min
w = x_max - x_min
h = y_max - y_min
# resize to origin yx
x = int(x * x_ratio)
w = int(w * x_ratio)
y = int(y * y_ratio)
h = int(h * y_ratio)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 3)
return img
def make_contours(masks, flatten=True):
"""
flatten: follow by coco's api
"""
if masks.ndim == 2:
masks = np.expand_dims(masks, axis=-1)
masks = masks.transpose((2, 0, 1))
segment_objs = []
for mask in masks:
contours = measure.find_contours(mask, 0.5)
for contour in contours:
contour = np.flip(contour, axis=1)
if flatten:
segmentation = contour.ravel().tolist()
else:
segmentation = contour.tolist()
segment_objs.append(segmentation)
return segment_objs
def filter_polygons_points_intersection(polygon_contours, center_coords):
"""https://github.com/huyhoang17/machine-learning-snippets/blob/master/filter_polygons_points_intersection.py
"""
# checking if polygon contains point
final_cons = []
for con in polygon_contours:
polygon = Polygon(zip(con[::2], con[1::2]))
for center in center_coords:
point = Point(center[1], center[0])
if polygon.contains(point):
final_cons.append(con)
break
return final_cons
def vis_pred_bbox_polygon(pred_bbox, cons):
"""
pred_bbox: 1st mask
cons: list contours return from `make_contours` method
"""
mask_ = Image.new('1', (512, 512))
mask_draw = ImageDraw.ImageDraw(mask_, '1')
for contour in cons:
mask_draw.polygon(contour, fill=1)
mask_ = np.array(mask_).astype(np.uint8)
return mask_ * 255
def vis_pred_center(center_points, rad=2, img_size=(512, 512)):
# center_points = get_centers(pred_center.astype(np.uint8))
img = np.zeros((512, 512))
pil_img = Image.fromarray(img).convert('RGBA')
center_canvas = Image.new('RGBA', pil_img.size)
center_draw = ImageDraw.Draw(center_canvas)
for point in center_points:
y, x = point
# x1, y1, x2, y2
center_draw.ellipse(
(x - rad, y - rad, x + rad, y + rad), fill='blue', outline='blue'
)
res_img = Image.alpha_composite(pil_img, center_canvas)
res_img = res_img.convert("RGB")
res_img = np.asarray(res_img)
return res_img
def vis_pred_bbox(pred_bbox, center_coords, width=6):
"""
pred_bbox: 1st mask
center_coords: list of center point coordinates [[x1, y1], [x2, y2], ...]
"""
bbox_cluster = get_labels(center_coords, pred_bbox)
img = np.zeros((512, 512))
pil_img = Image.fromarray(img).convert('RGBA')
bbox_canvas = Image.new('RGBA', pil_img.size)
bbox_draw = ImageDraw.Draw(bbox_canvas)
# center_canvas = Image.new('RGBA', pil_img.size)
# center_draw = ImageDraw.Draw(center_canvas)
# exclude background index
for cluster_index in range(len(center_coords))[1:]:
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
# draw polygon
bbox_draw.rectangle(
(x_min, y_min, x_max, y_max), fill=(255, 255, 255, 0),
outline=(255, 0, 0, 255), width=width
)
# draw center
res_img = Image.alpha_composite(pil_img, bbox_canvas)
res_img = res_img.convert("RGB")
res_img = np.asarray(res_img)
# normalize image
res_img = res_img / 255
res_img = res_img.astype(np.float32)
return res_img
|
11499006
|
import numpy as np
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by <NAME> & <NAME>
This code checks the data size & activity label constitution of the
Opportunity UCI Dataset data used in the experiment.
NOTE:
[Label] [Activity]
1 - Locomotion - Stand
2 - Locomotion - Walk
4 - Locomotion - Sit
5 - Locomotion - Lie
'''
dir_path = '../data/OpportunityUCIDataset/processed/'
print "-------------------------------------------------"
print " LOWER BODY SENSORS DATA"
print "-------------------------------------------------"
X_train = np.load(dir_path + "lower_train_X.npy")
y_train = np.load(dir_path + "lower_train_y.npy")
X_valid = np.load(dir_path + "lower_valid_X.npy")
y_valid = np.load(dir_path + "lower_valid_y.npy")
X_test = np.load(dir_path + "lower_test_X.npy")
y_test = np.load(dir_path + "lower_test_y.npy")
print "lower_train_X: ", X_train.shape
print "lower_valid_X: ", X_valid.shape
print "lower_test_X: ", X_test.shape
unique, counts = np.unique(y_train, return_counts=True)
print "--- lower_train_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_valid, return_counts=True)
print "--- lower_valid_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_test, return_counts=True)
print "--- lower_test_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
print "-------------------------------------------------"
print " UPPER BODY SENSORS DATA"
print "-------------------------------------------------"
X_train = np.load(dir_path + "upper_train_X.npy")
y_train = np.load(dir_path + "upper_train_y.npy")
X_valid = np.load(dir_path + "upper_valid_X.npy")
y_valid = np.load(dir_path + "upper_valid_y.npy")
X_test = np.load(dir_path + "upper_test_X.npy")
y_test = np.load(dir_path + "upper_test_y.npy")
print "upper_train_X: ", X_train.shape
print "upper_valid_X: ", X_valid.shape
print "upper_test_X: ", X_test.shape
unique, counts = np.unique(y_train, return_counts=True)
print "--- upper_train_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_valid, return_counts=True)
print "--- upper_valid_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_test, return_counts=True)
print "--- upper_test_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
print "-------------------------------------------------"
print " RAW BODY SENSORS DATA"
print "-------------------------------------------------"
X_train = np.load(dir_path + "raw_train_X.npy")
y_train = np.load(dir_path + "raw_train_y.npy")
X_valid = np.load(dir_path + "raw_valid_X.npy")
y_valid = np.load(dir_path + "raw_valid_y.npy")
X_test = np.load(dir_path + "raw_test_X.npy")
y_test = np.load(dir_path + "raw_test_y.npy")
print "raw_train_X: ", X_train.shape
print "raw_valid_X: ", X_valid.shape
print "raw_test_X: ", X_test.shape
unique, counts = np.unique(y_train, return_counts=True)
print "--- raw_train_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_valid, return_counts=True)
print "--- raw_valid_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
unique, counts = np.unique(y_test, return_counts=True)
print "--- raw_test_y: label freq ---"
print np.asarray((unique, counts)).T, '\n'
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/ref_opp_data_statistics.py
-------------------------------------------------
LOWER BODY SENSORS DATA
-------------------------------------------------
lower_train_X: (28938, 156)
lower_valid_X: (13609, 156)
lower_test_X: (13464, 156)
--- lower_train_y: label freq ---
[[ 1 13250]
[ 2 7403]
[ 4 6874]
[ 5 1411]]
--- lower_valid_y: label freq ---
[[ 1 5964]
[ 2 3216]
[ 4 3766]
[ 5 663]]
--- lower_test_y: label freq ---
[[ 1 5326]
[ 2 3885]
[ 4 3460]
[ 5 793]]
-------------------------------------------------
UPPER BODY SENSORS DATA
-------------------------------------------------
upper_train_X: (28938, 216)
upper_valid_X: (13609, 216)
upper_test_X: (13464, 216)
--- upper_train_y: label freq ---
[[ 1 13250]
[ 2 7403]
[ 4 6874]
[ 5 1411]]
--- upper_valid_y: label freq ---
[[ 1 5964]
[ 2 3216]
[ 4 3766]
[ 5 663]]
--- upper_test_y: label freq ---
[[ 1 5326]
[ 2 3885]
[ 4 3460]
[ 5 793]]
-------------------------------------------------
RAW BODY SENSORS DATA
-------------------------------------------------
raw_train_X: (28938, 585)
raw_valid_X: (13609, 585)
raw_test_X: (13464, 585)
--- raw_train_y: label freq ---
[[ 1 13250]
[ 2 7403]
[ 4 6874]
[ 5 1411]]
--- raw_valid_y: label freq ---
[[ 1 5964]
[ 2 3216]
[ 4 3766]
[ 5 663]]
--- raw_test_y: label freq ---
[[ 1 5326]
[ 2 3885]
[ 4 3460]
[ 5 793]]
Process finished with exit code 0
'''
|
11499007
|
from wolframclient.evaluation import WolframLanguageSession
import logging
# set the root level to INFO
logging.basicConfig(level=logging.INFO)
try:
session = WolframLanguageSession()
# this will trigger some log messages with the process ID, the sockets
# address and the startup timer.
session.start()
# Warning: Infinite expression Power[0, -1] encountered.
res = session.evaluate('1/0')
finally:
session.terminate()
|
11499022
|
from mock import Mock, patch, call
from sqlalchemy import Column, String, Integer, Table, MetaData
from monitorrent.db import DBSession
from monitorrent.upgrade_manager import core_upgrade, upgrade, _operation_factory
from tests import UpgradeTestCase
class CoreUpgradeTest(UpgradeTestCase):
def upgrade_func(self, engine, operation_factory):
pass
def _upgrade(self):
core_upgrade(self.operation_factory)
def test_empty_db_test(self):
self._test_empty_db_test()
def test_not_existing_core_upgrade(self):
core_upgrade(self.operation_factory)
def test_empty_core_upgrade(self):
m = MetaData()
Table('plugin_versions', m,
Column('plugin', String, nullable=False, primary_key=True),
Column('version', Integer, nullable=False))
m.create_all(self.engine)
self._upgrade()
def test_filled_core_upgrade(self):
m = MetaData()
versions = Table('plugin_versions', m,
Column('plugin', String, nullable=False, primary_key=True),
Column('version', Integer, nullable=False))
m.create_all(self.engine)
with DBSession() as db:
db.execute(versions.insert(), {'plugin': 'lostfilm', 'version': -1})
db.execute(versions.insert(), {'plugin': 'rutor', 'version': 2})
self._upgrade()
class UpgradeTest(UpgradeTestCase):
def upgrade_func(self, engine, operation_factory):
pass
def _upgrade(self):
upgrade()
def test_upgrade(self):
core_upgrade_mock = Mock()
call_ugprades_mock = Mock()
upgrades_mock = ['test']
with patch("monitorrent.upgrade_manager.core_upgrade", core_upgrade_mock), \
patch("monitorrent.upgrade_manager.call_ugprades", call_ugprades_mock), \
patch("monitorrent.upgrade_manager.upgrades", upgrades_mock):
self._upgrade()
core_upgrade_mock.assert_called_once_with(_operation_factory)
call_ugprades_mock.assert_called_once_with(upgrades_mock)
|
11499025
|
from chalice import Blueprint
from chalicelib import _overrides
from chalicelib.blueprints import bp_authorizers
from chalicelib.utils import assist_helper
app = Blueprint(__name__)
_overrides.chalice_app(app)
@app.route('/v1/assist/credentials', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer)
def get_assist_credentials(context):
credentials = assist_helper.get_temporary_credentials()
if "errors" in credentials:
return credentials
return {"data": credentials}
|
11499027
|
import pytest
from tinkoff.dolyame import Dolyame
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _credentials(settings):
settings.DOLYAME_LOGIN = 'root'
settings.DOLYAME_PASSWORD = '<PASSWORD>'
settings.DOLYAME_CERTIFICATE_PATH = 'tinkoff/tests/.fixtures/testing.pem'
@pytest.fixture(autouse=True)
def _absolute_host(settings):
settings.ABSOLUTE_HOST = 'https://tst.hst'
@pytest.fixture
def dolyame(order):
return Dolyame(order=order)
|
11499050
|
REGION = 'us-east-1'
SEARCH_CHECKPOINT_TABLE_NAME = 'Checkpoint'
SEARCH_TEXT = '#searchtext'
TWEET_PROCESSOR_FUNCTION_NAME = 'TweetProcessor'
BATCH_SIZE = '2'
STREAM_MODE_ENABLED = 'false'
SSM_PARAMETER_PREFIX = 'ssm_prefix'
CONSUMER_KEY = 'key'
CONSUMER_SECRET = 'secret'
ACCESS_TOKEN = 'token'
ACCESS_TOKEN_SECRET = 'token_secret'
|
11499085
|
import sys,os,argparse
import torch
# Arguments
parser=argparse.ArgumentParser(description='Renaming script (runs over preprocessed files)')
parser.add_argument('--dataset',type=str,required=True,help='(default=%(default)s)',
choices=['vctk','librispeech','nsynthp'])
parser.add_argument('--path',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--extension',default='.pt',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--execute',action='store_true')
args=parser.parse_args()
if args.execute:
input('Are you sure you want to rename? ')
# Get all filenames
all_fn=[]
for dirpath,dirnames,filenames in os.walk(args.path):
for filename in filenames:
if filename.endswith(args.extension):
fn=os.path.join(dirpath,filename)
all_fn.append(fn)
# Process datasets
if args.dataset=='vctk':
# ----- VCTK -----
# Load fn2ut
utterances,fn2ut=torch.load(os.path.join('utils','vctk_utterance_files_map.pt'))
i=0
for ut in utterances:
utterances[ut]=i
i+=1
# Loop files
for i,fn_old in enumerate(all_fn):
folder,fn=os.path.split(fn_old[:-len(args.extension)])
spk=fn.split('_')[0]
try:
ut='{:05d}'.format(utterances[fn2ut[fn]])
fn_new=os.path.join(folder,spk+'_'+ut+args.extension)
print(i+1,'\t'+fn_old+' ---> '+fn_new)
if args.execute:
os.rename(fn_old,fn_new)
except:
print('Not found correspondence for '+fn_old+' - Deleting')
if args.execute:
os.remove(fn_old)
else:
input()
elif args.dataset=='librispeech':
# --- LibriSpeech ---
# Loop files
for i,fn_old in enumerate(all_fn):
folder,fn=os.path.split(fn_old[:-len(args.extension)])
fields=fn.split('_')
spk,chap=fields[0],fields[0]+'-'+fields[1]
fn_new=os.path.join(folder,spk+'_'+chap+'_'+'-'.join(fields[2:])+args.extension)
print(i+1,'\t'+fn_old+' ---> '+fn_new)
if args.execute:
os.rename(fn_old,fn_new)
elif args.dataset=='nsynthp':
# --- NSynth (pitch) ---
# Loop files
for i,fn_old in enumerate(all_fn):
folder,fn=os.path.split(fn_old[:-len(args.extension)])
instr,pitch,vel=fn.split('-')
instr=instr.replace('_','')
fn_new=os.path.join(folder,pitch+'_'+instr+'-'+vel+args.extension)
print(i+1,'\t'+fn_old+' ---> '+fn_new)
if args.execute:
os.rename(fn_old,fn_new)
# *** Add other options here ***
|
11499115
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.bert_things.pytorch_pretrained_bert import BertConfig, BertModel, BertPreTrainedModel
import numpy as np
class BertTextModel(BertPreTrainedModel):
def __init__(self, config):
super(BertTextModel, self).__init__(config)
self.bert = BertModel(config)
def forward(self, data, attention_mask, output_all_encoded_layers=False):
sequence_output, pooled_output = self.bert(data, attention_mask=attention_mask, output_all_encoded_layers=output_all_encoded_layers)
return sequence_output, pooled_output
def init_from_state_dict(self, state_dict):
td = {k:v for k, v in self.named_parameters() if k in state_dict}
self.load_state_dict(td)
|
11499142
|
import cli_parser
import json
from file_operations import load_bookmarks_file
from file_operations import write_to_file
from deduplicator_utils import deduplicate
from deduplicator_utils import directory_merge
from fetch import get_bookmarks_from_toolbar
from fetch import get_bookmarks_from_menu
from fetch import get_bookmarks_from_other
def deduplicate_and_merge_all(bookmarks_object):
deduplicate_and_merge(get_bookmarks_from_toolbar(bookmarks_object))
deduplicate_and_merge(get_bookmarks_from_menu(bookmarks_object))
deduplicate_and_merge(get_bookmarks_from_other(bookmarks_object))
def deduplicate_and_merge(bookmarks):
deduplicate(bookmarks)
directory_merge(bookmarks)
def main():
args = cli_parser.parse_arguments()
bookmarks_object = load_bookmarks_file(args.file)
deduplicate_and_merge_all(bookmarks_object)
write_to_file(args.output, json.dumps(bookmarks_object, indent=4))
|
11499145
|
from urllib.parse import quote
from django.conf import settings
from django.conf.urls import include, url
from django.shortcuts import render
from django.urls.conf import path
from django.views.static import serve
def seo(request, su, huasu=None, im=None):
return pangboo(
request, status=200,
title='{} - iTaigi 愛台語'.format(su),
image='https://www.moedict.tw/{}.png?font=wt064'.format(
quote(su)
),
)
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^', include('臺灣言語平臺.網址')),
url(r'^影音檔案/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
path('k/<su>/', seo),
path('t/<su>/', seo),
#
# url: `https://itaigi.tw/${Iah}/${Su}`,
# title: `${Su} - iTaigi 愛台語`,
# image: `https://www.moedict.tw/${encodeURI(Su)}.png?font=wt064`,
path('t/<huasu>/<su>/<im>', seo),
path('k/<huasu>/<su>/<im>', seo),
# url: `https://itaigi.tw/${Iah}/${HuaSu}/${TaiSu}/${Im}`,
# title: `${TaiSu} - iTaigi 愛台語`,
# image: `https://www.moedict.tw/${encodeURI(TaiSu)}.png?font=wt064`,
]
def pangboo(request, status, title, image):
return render(request, 'index.html', status=status, context={
'url': request.build_absolute_uri(),
'title': title,
'image': image,
})
def kithann(request, exception=None):
return pangboo(
request, status=404,
title='iTaigi 愛台語',
image='https://g0v.github.io/itaigi/design/logo_og.png',
)
handler404 = kithann
|
11499243
|
from py_tests_common import *
def CharLiteral_Test0():
c_program_text= """
// short-form literal
static_assert( "str"[1u] == "t"c8 );
"""
tests_lib.build_program( c_program_text )
def CharLiteral_Test1():
c_program_text= """
// short-form literal, type is char16
var char16 constexpr c= "Ё"c16;
static_assert( c == 1025c16 );
"""
tests_lib.build_program( c_program_text )
def CharLiteral_Test2():
c_program_text= """
// long-form literal
var char32 constexpr c= "Ⴅ"char32;
static_assert( c == 4261c32 );
"""
tests_lib.build_program( c_program_text )
def CharLiteral_Test3():
c_program_text= """
fn constexpr GetCharSize( char8 c ) : i32 { return 1; }
fn constexpr GetCharSize( char16 c ) : i32 { return 2; }
fn constexpr GetCharSize( char32 c ) : i32 { return 4; }
static_assert( GetCharSize( "R"c8 ) == 1 );
static_assert( GetCharSize( "R"char8 ) == 1 );
static_assert( GetCharSize( "R"c16 ) == 2 );
static_assert( GetCharSize( "R"char16 ) == 2 );
static_assert( GetCharSize( "R"c32 ) == 4 );
static_assert( GetCharSize( "R"char32 ) == 4 );
"""
tests_lib.build_program( c_program_text )
def CharLiteralIsConstantValue_Test0():
c_program_text= """
fn Bar( char16 &mut c ) {}
fn Foo()
{
Bar( "Ö"c16 );
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "CouldNotSelectOverloadedFunction" )
assert( errors_list[0].src_loc.line == 5 )
def InvalidSizeForCharLiteral_Test0():
c_program_text= """
fn Foo()
{
"try mupltiple symbols"c8;
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidSizeForCharLiteral" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidSizeForCharLiteral_Test1():
c_program_text= """
fn Foo()
{
""c16; // zero symbols
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidSizeForCharLiteral" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidSizeForCharLiteral_Test2():
c_program_text= """
fn Foo()
{
"Ü"c8; // Error, c8 literals may represent only symbols with codes 0-127.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidSizeForCharLiteral" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidSizeForCharLiteral_Test3():
c_program_text= """
fn Foo()
{
"😀"c16; // Symbol does not fit into single utf-16 char.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidSizeForCharLiteral" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidSizeForCharLiteral_Test4():
c_program_text= """
fn Foo()
{
"wtf"c32; // Too much symbols
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidSizeForCharLiteral" )
assert( errors_list[0].src_loc.line == 4 )
|
11499294
|
from spartan import expr, util
from spartan.examples.als import als
import test_common
from test_common import millis
from datetime import datetime
<EMAIL>
#def test_als(ctx):
def benchmark_als(ctx, timer):
print "#worker:", ctx.num_workers
#USER_SIZE = 100 * ctx.num_workers
USER_SIZE = 320
#USER_SIZE = 200 * 64
MOVIE_SIZE = 12800
num_features = 20
num_iter = 2
#A = expr.randint(USER_SIZE, MOVIE_SIZE, low=0, high=5, tile_hint=(USER_SIZE, util.divup(MOVIE_SIZE, ctx.num_workers)))
A = expr.randint(USER_SIZE, MOVIE_SIZE, low=0, high=5)
util.log_warn('begin als!')
t1 = datetime.now()
U, M = als(A, implicit_feedback=True, num_features=num_features, num_iter=num_iter)
U.optimized()
M.optimized()
t2 = datetime.now()
cost_time = millis(t1,t2)
print "total cost time:%s ms, per iter cost time:%s ms" % (cost_time, cost_time/num_iter)
if __name__ == '__main__':
test_common.run(__file__)
|
11499299
|
import json
import os
import re
import sys
from typing import List, Dict
from collections import OrderedDict, defaultdict
from rapidstream.BE.Device import U250
class TimingReportParser:
def __init__(self, direction: str, timing_report_path: str) -> None:
"""
direction: Literal['to_anchor', 'from_anchor']
"""
self.timing_report_path = timing_report_path
# each timing path is a list of lines
self.slack_sections = self.splitReportIntoSlackSections()
self.direction = direction # whether the timing paths in the report are to anchors or from anchors
self.end_cell_role = 'source' if self.direction == 'to_anchor' else 'sinks'
def getAnchorConnection(self, filename='') -> Dict[str, Dict[str, List[Dict]]]:
"""
anchor -> [ {timing_path_source_site, LUT_count, ...}, ... ]
"""
anchor_connections = defaultdict(list)
for slack_section in self.slack_sections:
anchor = self.getAnchorFromSlackSection(slack_section)
timing_path = self.getDataTimingPathOfSlackSection(slack_section)
if self.direction == 'to_anchor':
end_cell_site = timing_path[0]
else:
end_cell_site = timing_path[-1]
lut_count = self.getLUTCountInSlackSection(slack_section)
anchor_connections[anchor].append(
{
'src_or_sink' : self.end_cell_role,
'end_cell_name': self.getEndCellName(slack_section),
'end_cell_site': end_cell_site,
'num_lut_on_path' : lut_count,
'normalized_coordinate' : U250.getCalibratedCoordinatesFromSiteName(end_cell_site),
'setup_slack': self.getSetupSlackOfSlackSection(slack_section)
}
)
if filename:
open(filename, 'w').write(json.dumps(anchor_connections, indent=2))
return anchor_connections
def splitReportIntoSlackSections(self) -> List[List[str]]:
"""
partition the original report into local groups of lines
each group correspond to one slack section
"""
report = open(self.timing_report_path)
slack_sections = []
curr = []
for line in report:
if line.startswith('Slack'):
slack_sections.append(curr)
curr = [line]
else:
curr.append(line)
slack_sections.append(curr)
# not that the first entry is the headings of the report
return slack_sections[1:]
def getAnchorFromSlackSection(self, slack_section: List[str]) -> str:
"""
extract which anchor is in this timing path
"""
if self.direction == 'to_anchor':
for line in slack_section:
if 'Destination:' in line:
assert '_q0_reg' in line
# example:
# "Destination: PE_wrapper247_U0_fifo_cout_drain_out_V_write_pass_0_q0_reg/D"
return re.search(' ([^/ ]+)/', line).group(1)
elif self.direction == 'from_anchor':
for line in slack_section:
if 'Source:' in line:
assert '_q0_reg' in line
# example:
# "Destination: PE_wrapper247_U0_fifo_cout_drain_out_V_write_pass_0_q0_reg/D"
return re.search(' ([^/ ]+)/', line).group(1)
else:
assert False
def getLUTCountInSlackSection(self, slack_section: List[str]) -> int:
"""
count how many LUTs are there in the path from/to the anchor
Example:
------------------------------------------------------------------- -------------------
SLICE_X54Y111 FDRE (Prop_DFF_SLICEL_C_Q)
0.079 3.871 r CR_X0Y0_To_CR_X1Y1_ctrl_U0/ap_done_Boundary_X0Y2_To_X2Y2_q_reg/Q
net (fo=2, estimated) 0.146 4.017 CR_X0Y0_To_CR_X1Y1_ctrl_U0/ap_done_Boundary_X0Y2_To_X2Y2_q
SLICE_X54Y111 r CR_X0Y0_To_CR_X1Y1_ctrl_U0/ap_done_Boundary_X2Y0_To_X2Y2_INST_0/I0
SLICE_X54Y111 LUT2 (Prop_H6LUT_SLICEL_I0_O)
0.051 4.068 r CR_X0Y0_To_CR_X1Y1_ctrl_U0/ap_done_Boundary_X2Y0_To_X2Y2_INST_0/O
net (fo=1, estimated) 0.385 4.453 ap_done_Boundary_X2Y0_To_X2Y2_out
SLICE_X57Y111 FDRE r ap_done_Boundary_X2Y0_To_X2Y2_q0_reg/D
------------------------------------------------------------------- -------------------
Seems that we only need to count how many lines have the ' LUT' pattern
"""
return len([line for line in slack_section if ' LUT' in line])
def getSetupSlackOfSlackSection(self, slack_section: List[str]) -> float:
"""
extract setup slack. Examples:
Slack (MET) : 1.347ns (required time - arrival time)
Slack (VIOLATED) : -1.347ns (required time - arrival time)
"""
for line in slack_section:
if re.search('^Slack', line):
return float(re.search(' ([-]*[ ]*[0-9.]+)ns', line).group(1))
assert False
def getEndCellName(self, slack_section: List[str]) -> str:
"""
get the name of the cell that connects to the anchor FF
"""
for line in slack_section:
if '_q0_reg' not in line:
if 'Source:' in line or 'Destination:' in line:
# the last section after "/" will be the pin name. We do not want that part
match = re.search(r'(Source:|Destination:)[ ]*([^ ]*)/[^/]+', line)
return match.group(2)
assert False
def getDataTimingPathOfSlackSection(self, slack_section: List[str]) -> List[str]:
"""
get all elements from the last/next sequential element to the anchor register
a sample slack section. The data signal path is the 2nd section divided by '---------'
Slack (MET) : 0.208ns (required time - arrival time)
Source: CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/ram_reg/CLKARDCLK
(rising edge-triggered cell RAMB36E2 clocked by ap_clk {rise@0.000ns fall@1.250ns period=2.500ns})
Destination: cout_drain_IO_L1_out_wrapper441_U0_fifo_cout_drain_out_V_V_din_pass_0_q0_reg[42]/D
(rising edge-triggered cell FDRE clocked by ap_clk {rise@0.000ns fall@1.250ns period=2.500ns})
......
......
Location Delay type Incr(ns) Path(ns) Netlist Resource(s)
------------------------------------------------------------------- -------------------
(clock ap_clk rise edge) 0.000 0.000 r
BUFGCE_X0Y194 BUFGCE 0.000 0.000 r test_bufg/O
X4Y4 (CLOCK_ROOT) net (fo=22739, estimated) 2.677 2.677 CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/ap_clk
SLR Crossing[2->1]
RAMB36_X10Y61 RAMB36E2 r CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/ram_reg/CLKARDCLK
------------------------------------------------------------------- -------------------
RAMB36_X10Y61 RAMB36E2 (Prop_RAMB36E2_RAMB36_CLKARDCLK_DOUTBDOUT[10])
0.830 3.507 r CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/ram_reg/DOUTBDOUT[10]
net (fo=2, estimated) 0.478 3.985 CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/local_cout_V_q0[42]
SLICE_X156Y305 r CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/fifo_cout_drain_out_V_V_din[42]_INST_0/I0
SLICE_X156Y305 LUT5 (Prop_E6LUT_SLICEM_I0_O)
0.124 4.109 r CR_X4Y4_To_CR_X5Y5_ctrl_U0/CR_X4Y4_To_CR_X5Y5_routing_U0/CR_X4Y4_To_CR_X5Y5_U0/cout_drain_IO_L1_out_wrapper441_U0/grp_cout_drain_IO_L1_out_fu_28/local_cout_V_U/kernel0_cout_drain_IO_L1_out_boundary_wrapper367_local_cout_V_ram_U/fifo_cout_drain_out_V_V_din[42]_INST_0/O
net (fo=1, estimated) 0.572 4.681 cout_drain_IO_L1_out_wrapper441_U0_fifo_cout_drain_out_V_V_din_pass_0_out[42]
SLICE_X175Y292 FDRE r cout_drain_IO_L1_out_wrapper441_U0_fifo_cout_drain_out_V_V_din_pass_0_q0_reg[42]/D
------------------------------------------------------------------- -------------------
(clock ap_clk rise edge) 2.500 2.500 r
BUFGCE_X0Y194 BUFGCE 0.000 2.500 r test_bufg/O
X4Y4 (CLOCK_ROOT) net (fo=22739, estimated) 2.268 4.768 ap_clk
SLR Crossing[2->1]
SLICE_X175Y292 FDRE r cout_drain_IO_L1_out_wrapper441_U0_fifo_cout_drain_out_V_V_din_pass_0_q0_reg[42]/C
clock pessimism 0.131 4.899
clock uncertainty -0.035 4.864
SLICE_X175Y292 FDRE (Setup_GFF2_SLICEM_C_D)
0.025 4.889 cout_drain_IO_L1_out_wrapper441_U0_fifo_cout_drain_out_V_V_din_pass_0_q0_reg[42]
-------------------------------------------------------------------
required time 4.889
arrival time -4.681
-------------------------------------------------------------------
slack 0.208
"""
dividing_line_indices = []
for i in range(len(slack_section)):
if '-----' in slack_section[i]:
dividing_line_indices.append(i)
data_signal_path_begin = dividing_line_indices[1] + 1 # inclusive
data_signal_path_end = dividing_line_indices[2] # exclusive
data_signal_path_part = slack_section[data_signal_path_begin : data_signal_path_end]
data_signal_path = []
for line in data_signal_path_part:
match = re.search(' ([^ ]*_X\d+Y\d+) ', line)
if match:
data_signal_path.append(match.group(1))
# remove repetitions
data_signal_path = list(OrderedDict.fromkeys(data_signal_path))
return data_signal_path
if __name__ == '__main__':
curr_dir = os.getcwd()
assert len(sys.argv) == 2
report_prefix = sys.argv[1]
from_anchor_report = f'{curr_dir}/{report_prefix}_timing_path_from_anchor.txt'
to_anchor_report = f'{curr_dir}/{report_prefix}_timing_path_to_anchor.txt'
assert os.path.isfile(from_anchor_report), from_anchor_report
assert os.path.isfile(to_anchor_report), to_anchor_report
parser_from_anchor = TimingReportParser('from_anchor', from_anchor_report)
parser_to_anchor = TimingReportParser('to_anchor', to_anchor_report)
connection_from_anchor = parser_from_anchor.getAnchorConnection()
connection_to_anchor = parser_to_anchor.getAnchorConnection()
anchor_connections = {**connection_from_anchor, **connection_to_anchor}
# check that one anchor must not exist in both report
assert len(anchor_connections) == len(connection_from_anchor) + len(connection_to_anchor)
open(f'{report_prefix}_anchor_connections.json', 'w').write(json.dumps(anchor_connections, indent=2))
open(f'{report_prefix}_anchor_connections_source.json', 'w').write(json.dumps(connection_to_anchor, indent=2))
open(f'{report_prefix}_anchor_connections_sink.json', 'w').write(json.dumps(connection_from_anchor, indent=2))
open(f'{report_prefix}_anchor_connections.json.done.flag', 'w').write(' ')
|
11499381
|
from pysoa.common.transport.redis_gateway.constants import REDIS_BACKEND_TYPE_STANDARD
SOA_SERVER_SETTINGS = {
'heartbeat_file': '/srv/echo_service-{{fid}}.heartbeat',
'middleware': [], # TODO
'transport': {
'path': 'pysoa.common.transport.redis_gateway.server:RedisServerTransport',
'kwargs': {
'backend_layer_kwargs': {
'hosts': [
('standalone.redis5.pysoa', 6379),
('standalone.redis6.pysoa', 6379),
],
},
'backend_type': REDIS_BACKEND_TYPE_STANDARD,
},
},
'harakiri': {
'timeout': 7,
'shutdown_grace': 3,
},
}
|
11499384
|
import requests, re, json
from colorama import init, Fore, Back, Style
from terminaltables import SingleTable
warning = "["+Fore.RED+"!"+Fore.RESET+"]"
question = "["+Fore.YELLOW+"?"+Fore.RESET+"]"
found = "["+Fore.GREEN+"+"+Fore.RESET+"]"
wait = "["+Fore.MAGENTA+"*"+Fore.RESET+"]"
def bssidFinder():
bssid = input(" MAC/Bssid: ")
print("\n"+wait+" Locating '%s'..." % (bssid))
url = "https://api.mylnikov.org/wifi?v=1.1&bssid=%s"
response = requests.get(url % (bssid))
data = response.content.decode('utf-8')
values = json.loads(data)
code = str(values['result'])
if code == "404":
print("\n[%s]\n" % (bssid))
print(warning+" Localisation Not Found")
else:
pass
try:
localisation = str(values['data']['lat']) + ','+str(values['data']['lon'])
print("\n[ %s ]" % (bssid))
print(found+" Localisation: " + localisation)
print(found+" Maps: https://www.google.fr/maps?q=%s" % (localisation))
except:
pass
|
11499385
|
import json
import json.decoder
import logging
import os
from copy import deepcopy
from functools import wraps
from typing import Callable, Dict, Final, Optional
import click
import docker
from alembic import __version__ as __alembic_version__
from alembic.config import Config as AlembicConfig
from .utils import build_url
from .utils_migration import create_basic_config
DISCOVERED_CACHE: Final[str] = os.path.expanduser(
"~/.simcore_postgres_database_cache.json"
)
log = logging.getLogger("root")
def _safe(if_fails_return=False):
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kargs):
try:
res = func(*args, **kargs)
return res
except RuntimeError as err:
log.info(
"%s failed: %s",
func.__name__,
str(err),
exc_info=True,
stack_info=True,
)
except Exception: # pylint: disable=broad-except
log.info(
"%s failed unexpectedly",
func.__name__,
exc_info=True,
stack_info=True,
)
return deepcopy(if_fails_return) # avoid issues with default mutables
return wrapper
return decorator
@_safe(if_fails_return=None)
def get_service_published_port(service_name: str) -> int:
client = docker.from_env()
services = [
s for s in client.services.list() if service_name in getattr(s, "name", "")
]
if not services:
raise RuntimeError(
"Cannot find published port for service '%s'. Probably services still not up"
% service_name
)
service_endpoint = services[0].attrs["Endpoint"]
if "Ports" not in service_endpoint or not service_endpoint["Ports"]:
raise RuntimeError(
"Cannot find published port for service '%s' in endpoint. Probably services still not up"
% service_name
)
published_port = service_endpoint["Ports"][0]["PublishedPort"]
return int(published_port)
def load_cache(*, raise_if_error=False) -> Dict:
try:
with open(DISCOVERED_CACHE) as fh:
cfg = json.load(fh)
except (FileNotFoundError, json.decoder.JSONDecodeError):
if raise_if_error:
raise
return {}
return cfg
def reset_cache():
if os.path.exists(DISCOVERED_CACHE):
os.remove(DISCOVERED_CACHE)
click.echo("Removed %s" % DISCOVERED_CACHE)
def get_alembic_config_from_cache(
force_cfg: Optional[Dict] = None,
) -> Optional[AlembicConfig]:
"""
Creates alembic config from cfg or cache
Returns None if cannot build url (e.g. if user requires a cache that does not exists)
"""
# build url
try:
if force_cfg:
cfg = force_cfg
else:
cfg = load_cache(raise_if_error=True)
url = build_url(**cfg)
except Exception: # pylint: disable=broad-except
log.debug(
"Cannot open cache or cannot build URL", exc_info=True, stack_info=True
)
click.echo("Invalid database config, please run discover first", err=True)
reset_cache()
return None
# build config
config = create_basic_config()
config.set_main_option("sqlalchemy.url", str(url))
return config
|
11499405
|
def get_sp(key, default_value=None):
accessed(key)
property_value = sys_prop.get(key)
return default_value if property_value is None else property_value
|
11499435
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# In this test, I will check and make sure the scoring history metrics of GLM without Lambda Search
# will contain the following: timestamp, duration, training_rmse, training_logloss, training_auc, training_pr_auc,
# training_classification_error.
def test_glm_scoring_history_multinomial():
col_list_compare = ["iterations", "objective", "negative_log_likelihood", "training_logloss", "validation_logloss",
"training_classification_error", "validation_classification_error", "deviance_train",
"deviance_test"]
print("Preparing dataset....")
h2o_data = h2o.import_file(
pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
h2o_data["C3"] = h2o_data["C3"].asfactor()
h2o_data["C4"] = h2o_data["C4"].asfactor()
h2o_data["C5"] = h2o_data["C5"].asfactor()
h2o_data["C11"] = h2o_data["C11"].asfactor()
splits_frames = h2o_data.split_frame(ratios=[.8], seed=1234)
train = splits_frames[0]
valid = splits_frames[1]
print("Building model with score_each_iteration turned on.")
h2o_model_score_each = glm(family="multinomial", score_each_iteration=True, generate_scoring_history=True)
h2o_model_score_each.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
print("Building model with score_interval=1. Should generate same model as score_each_iteration turned on.")
h2o_model = glm(family="multinomial", score_iteration_interval=1, generate_scoring_history=True)
h2o_model.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
# scoring history from h2o_model_score_each and h2o_model should be the same
pyunit_utils.assert_equal_scoring_history(h2o_model_score_each, h2o_model, col_list_compare)
print("Building model with score_each_iteration turned on and cross-validaton on.")
h2o_model_score_each_cv = glm(family="multinomial", score_each_iteration=True, nfolds = 2, seed=1234,
fold_assignment="modulo", generate_scoring_history=True)
h2o_model_score_each_cv.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
print("Building model with score_interval=1 and cross-validation on. Should generate same model as "
"score_each_iteration and cv turned on.")
h2o_model_cv = glm(family="multinomial", score_iteration_interval=1, nfolds = 2, fold_assignment="modulo",
seed=1234, generate_scoring_history=True)
h2o_model_cv.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train, validation_frame=valid)
# scoring history from h2o_model_score_each_cv and h2o_model_cv should be the same
pyunit_utils.assert_equal_scoring_history(h2o_model_score_each_cv, h2o_model_cv, col_list_compare)
# check if scoring_interval is set to 4, the output should be the same for every fourth iteration
print("Building model with score_interval=4 and cross-validation on. Should generate same model as "
"other models and same scoring history at the correct iteration.")
h2o_model_cv_4th = glm(family="multinomial", score_iteration_interval=3, nfolds = 2, fold_assignment="modulo",
seed=1234, generate_scoring_history=True)
h2o_model_cv_4th.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train, validation_frame=valid)
pyunit_utils.assertEqualScoringHistoryIteration(h2o_model_cv, h2o_model_cv_4th, col_list_compare)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_scoring_history_multinomial)
else:
test_glm_scoring_history_multinomial()
|
11499461
|
import peewee
import sqlite3
import Peewee
import PeeweeLocation
import PeeweeEpisode
import pprint
import ClaseCharacter
import ClaseLocation
import ClaseEpisode
def get_personajes():
char = Peewee.CharacterP.select(Peewee.CharacterP.id,Peewee.CharacterP.name,Peewee.CharacterP.status,Peewee.CharacterP.species,Peewee.CharacterP.type,Peewee.CharacterP.gender,Peewee.CharacterP.origin_name,Peewee.CharacterP.origin_url,Peewee.CharacterP.location_name,Peewee.CharacterP.location_url,Peewee.CharacterP.image,Peewee.CharacterP.episode,Peewee.CharacterP.url,Peewee.CharacterP.created)
lista = []
for c in char:
objetoper = ClaseCharacter.Personaje(c.id,c.name,c.status,c.species,c.type,c.gender,c.origin_name,c.origin_url,c.location_name,c.location_url,c.image,c.episode,c.url,c.created)
lista.append(objetoper)
return lista
##Crear HTML PERSONAJE
def CrearHTMLIndex():
Ch = []
Ch = get_personajes()
v = ''
for nini in range(0,len(Ch)):
#print(nini)
character = Ch[nini]
content='''<p>Id: {}</p>
<p>Name: <a href='file:///C:/Users/Eduardo-PC/Desktop/OrdinarioDAS/staticHtml/Characters/CharactersAccess/Personaje_{}.html'>{}</a></p>
'''.format(character._id,nini+1, character._nameCh)
v += content
#print(character)
fill = open('staticHtml/Characters/Personajes.html','w',encoding='utf-8') #Abrir archivo para ser escrito
fill.write(v)
fill.close()
############################################
def get_locations():
loc = PeeweeLocation.LocationP.select(PeeweeLocation.LocationP,PeeweeLocation.LocationP.name,PeeweeLocation.LocationP.type,PeeweeLocation.LocationP.dimension,PeeweeLocation.LocationP.residents,PeeweeLocation.LocationP.url,PeeweeLocation.LocationP.created)
listaL = []
for l in loc:
objetoloc = ClaseLocation.Locacion(l.id,l.name,l.type,l.dimension,l.residents,l.url,l.created)
listaL.append(objetoloc)
#print(listaL)
return listaL
#print(len(listaL))
##Crear HTML Locations
def CrearHTMLIndexLocation():
Liist = []
Liist = get_locations()
v = ''
for nil in range(0,len(Liist)):
#print(nini)
location = Liist[nil]
content='''<p>Id: {}</p>
<p>Name: <a href="file:///C:/Users/Eduardo-PC/Desktop/OrdinarioDAS/staticHtml/Locations/LocationsAccess/Location_{}.html">{}</a></p>
'''.format(location._id,nil+1, location._nameLoc)
#print(character)
v+=content
fill = open('staticHtml/Locations/Locations.html','w',encoding='utf-8') #Abrir archivo para ser escrito
fill.write(v)
fill.close()
#print(len(Liist))
##########################################################
def get_episodes():
ep = PeeweeEpisode.EpisodeP.select(PeeweeEpisode.EpisodeP.id,PeeweeEpisode.EpisodeP.name,PeeweeEpisode.EpisodeP.airdate,PeeweeEpisode.EpisodeP.episode,PeeweeEpisode.EpisodeP.characters,PeeweeEpisode.EpisodeP.url,PeeweeEpisode.EpisodeP.created)
listaE = []
for e in ep:
objetoep = ClaseEpisode.Episodio(e.id,e.name,e.airdate,e.episode,e.characters,e.url,e.created)
listaE.append(objetoep)
#print(listaE)
return listaE
##Crear HTML Episode
def CrearHTMLEpisodioIndex():
Liste = []
Liste = get_episodes()
v = ''
for nile in range(0,len(Liste)):
#print(nini)
episode = Liste[nile]
content='''<p>Id: {}</p>
<p>Name: <a href="file:///C:/Users/Eduardo-PC/Desktop/OrdinarioDAS/staticHtml/Episodes/EpisodesAccess/Episode_{}.html">{}</a></p>
'''.format(episode._id,nile+1, episode._nameEp)
v += content
#print(character)
fill = open('staticHtml/Episodes/Episodes.html','w',encoding='utf-8') #Abrir archivo para ser escrito
fill.write(v)
fill.close()
#CrearHTMLIndex()
#CrearHTMLIndexLocation()
CrearHTMLEpisodioIndex()
|
11499473
|
from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version
def test_pytorch_cudnn_version():
print(pytorch_cudnn_version())
|
11499479
|
from multiprocessing import Process
from time import sleep
import webview
from server import serve
from http.client import HTTPConnection
# Constants
HOST = 'localhost'
PORT = 37128
URL = f'http://{HOST}:{PORT}'
# Server
server = Process(target=serve)
# Frontend
webview.create_window(
'Junction',
f'http://{HOST}:{PORT}',
min_size=(600, 400),
text_select=True,
)
def server_running():
'''Helper to see if server is running'''
try:
conn = HTTPConnection(HOST, PORT)
conn.request('GET', '/')
r = conn.getresponse()
return r.status == 200
except:
print('Server not started')
return False
if __name__ == '__main__':
# Required for pyinstaller + multiprocessing: https://stackoverflow.com/a/32677108/2542016
multiprocessing.freeze_support()
# Run server process
server.start()
# Wait for server to start
print('Starting server')
while not server_running():
sleep(1)
print('Server started')
# Run frontend
webview.start(http_server=True)
|
11499480
|
import argparse
import math
import random
import sys
import os
import json
import numpy as np
import time
import operator
scatter_word_list = ['scatter', "'scatter'", '"scatter"', 'scatter_kws', "'o'", "'bo'", "'r+'", '"o"', '"bo"', '"r+"']
hist_word_list = ['hist', "'hist'", '"hist"', 'bar', "'bar'", '"bar"', 'countplot', 'barplot']
pie_word_list = ['pie', "'pie'", '"pie"']
scatter_plot_word_list = ['lmplot', 'regplot']
hist_plot_word_list = ['distplot', 'kdeplot', 'contour']
normal_plot_word_list = ['plot']
reserved_words = scatter_word_list + hist_word_list + pie_word_list + scatter_plot_word_list + hist_plot_word_list + normal_plot_word_list
arg_parser = argparse.ArgumentParser(description='JuiCe plot data extraction')
arg_parser.add_argument('--data_folder', type=str, default='../data',
help="the folder where the datasets downloaded from the original JuiCe repo are stored. We will retrieve 'train.jsonl', 'dev.jsonl' and 'test.jsonl' here.")
arg_parser.add_argument('--init_train_data_name', type=str, default='train.jsonl',
help="the filename of the original training data.")
arg_parser.add_argument('--init_dev_data_name', type=str, default='dev.jsonl',
help="the filename of the original dev data.")
arg_parser.add_argument('--init_test_data_name', type=str, default='test.jsonl',
help="the filename of the original test data.")
arg_parser.add_argument('--prep_train_data_name', type=str, default='train_plot.json',
help="the filename of the preprocessed training data. When set to None, it means that the training data is not preprocessed (this file is the most time-consuming for preprocessing).")
arg_parser.add_argument('--prep_dev_data_name', type=str, default='dev_plot.json',
help="the filename of the preprocessed dev data. When set to None, it means that the dev data is not preprocessed.")
arg_parser.add_argument('--prep_test_data_name', type=str, default='test_plot.json',
help="the filename of the preprocessed test data. When set to None, it means that the test data is not preprocessed.")
arg_parser.add_argument('--prep_dev_hard_data_name', type=str, default='dev_plot_hard.json',
help="the filename of the preprocessed hard split of the dev data. When set to None, it means that the dev data is not preprocessed.")
arg_parser.add_argument('--prep_test_hard_data_name', type=str, default='test_plot_hard.json',
help="the filename of the preprocessed hard split of the test data. When set to None, it means that the test data is not preprocessed.")
arg_parser.add_argument('--build_vocab', action='store_true', default=True,
help="set the flag to be true, so as to build the natural language word and code vocabs from the training set.")
arg_parser.add_argument('--nl_freq_file', type=str, default='nl_freq.json',
help='the file that stores the frequency of each natural language word.')
arg_parser.add_argument('--code_freq_file', type=str, default='code_freq.json',
help='the file that stores the frequency of each code token.')
arg_parser.add_argument('--nl_vocab', type=str, default='nl_vocab.json',
help='the file that stores the natural language word vocabulary.')
arg_parser.add_argument('--code_vocab', type=str, default='code_vocab.json',
help='the file that stores the code token vocabulary.')
arg_parser.add_argument('--min_nl_freq', type=int, default=15,
help='Words with a smaller number of occurrences in the training data than this threshold are excluded from the nl word vocab.')
arg_parser.add_argument('--min_code_freq', type=int, default=1000,
help='Code tokens with a smaller number of occurrences in the training data than this threshold are excluded from the code token vocab.')
args = arg_parser.parse_args()
def preprocess(data_folder, init_data_name, prep_data_name, prep_hard_data_name=None, additional_samples=[], is_train=True):
plot_samples = []
clean_samples = []
init_data_name = os.path.join(data_folder, init_data_name)
with open(init_data_name) as fin:
for i, line in enumerate(fin):
sample = json.loads(line)
# extract code sequence without comments and empty strings
init_code_seq = sample['code_tokens']
code_seq = []
for tok in init_code_seq:
if len(tok) == 0 or tok[0] == '#':
continue
code_seq.append(tok)
# filter out samples where 'plt' is not used
while 'plt' in code_seq:
pos = code_seq.index('plt')
if pos < len(code_seq) - 1 and code_seq[pos + 1] == '.':
break
code_seq = code_seq[pos + 1:]
if not ('plt' in code_seq):
continue
plot_calls = []
api_seq = sample['api_sequence']
for api in api_seq:
if api == 'subplot':
continue
if api[-4:] == 'plot' and not ('_' in api):
plot_calls.append(api)
exist_plot_calls = False
for code_idx, tok in enumerate(code_seq):
if not (tok in reserved_words + plot_calls):
continue
if code_idx == len(code_seq) - 1 or code_seq[code_idx + 1] != '(':
continue
exist_plot_calls = True
break
if not exist_plot_calls:
continue
url = sample['metadata']['path']
if 'solution' in url.lower() or 'assignment' in url.lower():
clean_samples.append(sample)
if not is_train:
plot_samples.append(sample)
else:
plot_samples.append(sample)
print('number of samples in the original partition: ', len(plot_samples))
print('number of course-related samples in the partition: ', len(clean_samples))
json.dump(plot_samples, open(os.path.join(data_folder, prep_data_name), 'w'))
if len(additional_samples) > 0:
print('number of samples in the hard partition: ', len(additional_samples))
json.dump(additional_samples, open(os.path.join(data_folder, prep_hard_data_name), 'w'))
return plot_samples, clean_samples
def add_token_to_dict(seq, vocab_dict, is_code=False):
for tok in seq:
if len(tok) == 0:
continue
if is_code and tok[0] == '#':
continue
if tok in vocab_dict:
vocab_dict[tok] += 1
else:
vocab_dict[tok] = 1
return vocab_dict
def build_vocab(samples):
# Compute the frequency of each nl and code token
code_dict = {}
word_dict = {}
for sample in samples:
context = sample['context']
for cell in context:
if not 'code_tokens' in cell:
continue
code_context = cell['code_tokens']
if type(code_context) != list:
continue
code_dict = add_token_to_dict(code_context, code_dict, is_code=True)
code_dict = add_token_to_dict(sample['code_tokens'], code_dict, is_code=True)
word_dict = add_token_to_dict(sample['nl'] + sample['comments'], word_dict, is_code=False)
sorted_word_list = sorted(word_dict.items(), key=operator.itemgetter(1), reverse=True)
sorted_code_list = sorted(code_dict.items(), key=operator.itemgetter(1), reverse=True)
print('Total number of nl tokens (before filtering): ', len(sorted_word_list))
print('Total number of code tokens (before filtering): ', len(sorted_code_list))
json.dump(sorted_word_list, open(os.path.join(args.data_folder, args.nl_freq_file), 'w'))
json.dump(sorted_code_list, open(os.path.join(args.data_folder, args.code_freq_file), 'w'))
# filter out rare tokens
code_vocab = {}
word_vocab = {}
for i, item in enumerate(sorted_word_list):
if item[1] < args.min_nl_freq:
break
word_vocab[item[0]] = i
for i, item in enumerate(sorted_code_list):
if item[1] < args.min_code_freq:
break
code_vocab[item[0]] = i
print('Total number of nl tokens (after filtering): ', len(word_vocab))
print('Total number of code tokens (after filtering): ', len(code_vocab))
json.dump(word_vocab, open(os.path.join(args.data_folder, args.nl_vocab), 'w'))
json.dump(code_vocab, open(os.path.join(args.data_folder, args.code_vocab), 'w'))
if not os.path.exists(args.data_folder):
os.makedirs(args.data_folder)
# data preprocessing
if args.prep_train_data_name:
print('preprocessing training data:')
train_plot_samples, train_plot_clean_samples = preprocess(args.data_folder, args.init_train_data_name, args.prep_train_data_name, is_train=True)
cnt_train_clean_samples = len(train_plot_clean_samples)
if args.prep_dev_data_name:
print('preprocessing dev data:')
dev_plot_samples, dev_plot_clean_samples = preprocess(args.data_folder, args.init_dev_data_name, args.prep_dev_data_name,
prep_hard_data_name=args.prep_dev_hard_data_name, additional_samples=train_plot_clean_samples[:cnt_train_clean_samples // 2], is_train=False)
if args.prep_test_data_name:
print('preprocessing test data:')
test_plot_samples, test_plot_clean_samples = preprocess(args.data_folder, args.init_test_data_name, args.prep_test_data_name,
prep_hard_data_name=args.prep_test_hard_data_name, additional_samples=train_plot_clean_samples[cnt_train_clean_samples // 2:], is_train=False)
# build natural language word and code vocabularies
if args.build_vocab:
assert args.init_train_data_name is not None
build_vocab(train_plot_samples)
|
11499501
|
import numpy as np
from prediction_flow.transformers.column import LogTransformer
def test_normal():
log_transformer = LogTransformer()
x = np.array([100, 10, 32])
log_transformer.fit(x)
np.testing.assert_array_almost_equal(
log_transformer.transform(x), np.array([4.615121, 2.397895, 3.496508]))
|
11499509
|
import pymongo
import json
import sys
# Local Files
sys.path.append("..")
from scripts import settings
client = pymongo.MongoClient(settings.mongo_server, settings.mongo_id)
db = client[settings.mongo_client]
db.authenticate(settings.mongo_user, settings.mongo_pass)
def save(account, datatype, data):
d = db.positions.find_one({'account': account})
if d is not None:
d[datatype] = json.dumps(data)
db.positions.save(d)
else:
db.positions.insert_one({'account': account, datatype: json.dumps(data)})
def load(account, datatype):
d = db.positions.find_one({'account': account})
return json.loads(d[datatype])
|
11499520
|
import io
import ujson
from .log import logger
class Buffer:
def __init__(self, buffer_limit=5000):
self.buffer_limit = buffer_limit
self.buffer = io.BytesIO()
self.counter = 0
self.full = False
def __len__(self):
return self.counter
def prepare(self):
self.buffer.seek(0)
def append(self, rec):
self.buffer.write((rec + '\n').encode())
self.counter += 1
if self.counter >= self.buffer_limit:
self.full = True
class WriterContext:
def __init__(self, ch, table, dump_json=True, ensure_ascii=False, buffer_limit=5000):
self.ch = ch
self.ensure_ascii = ensure_ascii
self.dump_json = dump_json
self.buffer_limit = buffer_limit
self.table = table
self.set_buffer()
def flush(self):
self.buffer.prepare()
buff = self.buffer
self.set_buffer()
return self.ch._flush(self.table, buff)
def set_buffer(self):
self.buffer = Buffer(buffer_limit=self.buffer_limit)
def push(self, *docs):
try:
for doc in docs:
if self.dump_json == True:
doc = ujson.dumps(doc, self.ensure_ascii)
self.buffer.append(doc)
if self.buffer.full:
self.flush()
except Exception as e:
logger.exception('exc during push')
raise e
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not exc_value:
self.flush()
|
11499524
|
import spacy
# load word embedding model
nlp = spacy.load('en')
# define word embedding vectors
happy_vec = nlp('happy').vector
print(happy_vec)
sad_vec = nlp('sad').vector
print(sad_vec)
angry_vec = nlp('angry').vector
print(angry_vec)
# find vector length here
vector_length = len(happy_vec)
print(vector_length)
|
11499559
|
from PIL import Image
from src.models.cc0 import patcher
import numpy as np
import skimage.io as io
from src.utils.imgproc import *
from skimage.color import rgb2hsv, hsv2rgb
class patcher(patcher):
def __init__(self, body='./body/body_sakurana.png', **options):
try:
options = options['options']
except:
pass
options['is_4k'] = False
super().__init__(options=options)
self.name = 'サクラナ'
self.body = Image.open(body)
self.body_size = self.body.size
self.pantie_position = [3433, 1782]
try:
self.with_bra = options['with_bra']
except:
self.with_bra = self.ask(question='With bra?', default=True)
if self.with_bra:
self.bra_position = [2017, 1491]
self.bra = np.float32(io.imread('./mask/bra_sakurana.png') / 255)
self.bra_center = np.float32(io.imread('./mask/bra_sakurana_center.png') / 255)
self.bra_shade = np.float32(io.imread('./material/bra_sakurana_shade.png') / 255)
self.bra_frill = np.float32(io.imread('./material/bra_sakurana_frill.png') / 255)
self.bra_shade_alpha = self.bra_shade[:, :, -1]
self.bra_frill_mask = self.bra_frill[:, :, -1] > 0.5
def gen_bra(self, image):
def pick_color(arr):
return np.mean(np.mean(arr, axis=0), axis=0)
pantie = np.array(image)
# pickup colors
front = pantie[20:100, 30:80, :3] / 255.0
front_shade = pantie[130:150, 0:40, :3] / 255.0
front_color = pick_color(front)
front_shade_color = pick_color(front_shade)
front_shade_color = rgb2hsv(front_shade_color[None, None])
front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3
if front_shade_color[0, 0, 1] > 0.7:
front_shade_color[0, 0, 1] *= 0.7
front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4
front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)
ribbon = pantie[24:32, 15:27, :3] / 255.0
ribbon_color = pick_color(ribbon)
# making a center texture
center = pantie[20:170, -200:-15, :3][:, ::-1]
center = resize(center, [2.3, 2.5])
bra_center = np.copy(self.bra_center)
bra_center[:center.shape[0], :center.shape[1], :3] = center * np.float32(bra_center[:center.shape[0], :center.shape[1], :3] > 0)
bra = self.bra[:, :, :3] * front_color
bra_shade = (self.bra_shade[:, :, -1])[:, :, None] * front_shade_color
bra_frill = self.bra_frill[:, :, :3] * ribbon_color
# overlaying layers
bra = alpha_brend(bra_center[:, :, :3], bra[:, :, :3], bra_center[:, :, 0] > 0.1)
bra = alpha_brend(bra_frill, bra, self.bra_frill_mask)
bra = alpha_brend(bra_shade, bra, self.bra_shade_alpha)
bra = np.dstack((bra, self.bra[:, :, 0] > 0))
return Image.fromarray(np.uint8(np.clip(bra, 0, 1) * 255))
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", (4096, 4096))
else:
patched = self.body.copy()
pantie = pantie.resize((int(pantie.width * .75), int(pantie.height * .77)), resample=Image.BICUBIC)
pantie = pantie.rotate(-90, expand=True)
patched = self.paste(patched, pantie, self.pantie_position)
if self.with_bra:
patched = self.paste(patched, self.gen_bra(image), self.bra_position)
return patched
|
11499577
|
from abc import ABC, abstractmethod
import asyncio
from typing import Any, Awaitable, Callable, Type, cast
import trio
from lahja import AsyncioEndpoint, EndpointAPI, TrioEndpoint
from lahja.base import EventAPI
Driver = Callable[["EngineAPI"], Awaitable[None]]
class EngineAPI(ABC):
endpoint_class: Type[EndpointAPI]
Event: Type[EventAPI]
@abstractmethod
def run_drivers(self, *drivers: Driver) -> Awaitable[None]:
"""
Performs the actual *running* of the drivers executing them with in a
manner appropriate for the individual endpoint implementation.
"""
...
@abstractmethod
async def run_with_timeout(
self, coro: Callable[..., Awaitable[Any]], *args: Any, timeout: int
) -> None:
"""
Runs a coroutine with the specifid positional ``args`` with a timeout.
**must** raise the built-in ``TimeoutError`` when a timeout occurs.
"""
...
@abstractmethod
async def sleep(self, seconds: float) -> None:
"""
Sleep for the provide number of seconds in a manner appropriate for the
individual endpoint implementation.
"""
...
class AsyncioEngine(EngineAPI):
endpoint_class = AsyncioEndpoint
Event = cast(Type[EventAPI], asyncio.Event)
async def run_drivers(self, *drivers: Driver) -> None:
await asyncio.gather(*(driver(self) for driver in drivers))
async def run_with_timeout(
self, coro: Callable[..., Awaitable[Any]], *args: Any, timeout: int
) -> None:
try:
await asyncio.wait_for(coro(*args), timeout=timeout)
except asyncio.TimeoutError as err:
raise TimeoutError from err
async def sleep(self, seconds: float) -> None:
await asyncio.sleep(seconds)
class TrioEngine(EngineAPI):
endpoint_class = TrioEndpoint
Event = cast(Type[EventAPI], trio.Event)
async def run_drivers(self, *drivers: Driver) -> None:
async with trio.open_nursery() as nursery:
for driver in drivers:
nursery.start_soon(driver, self)
async def run_with_timeout(
self, coro: Callable[..., Awaitable[Any]], *args: Any, timeout: int
) -> None:
try:
with trio.fail_after(timeout):
await coro(*args)
except trio.TooSlowError as err:
raise TimeoutError from err
async def sleep(self, seconds: float) -> None:
await trio.sleep(seconds)
|
11499665
|
import datetime
import logging
import random
from unittest.mock import call
import pytest
from bloop.exceptions import ShardIteratorExpired
from bloop.stream.shard import (
CALLS_TO_REACH_HEAD,
Shard,
last_iterator,
reformat_record,
unpack_shards,
)
from . import (
build_get_records_responses,
build_shards,
dynamodb_record_with,
stream_description,
)
def drop_milliseconds(dt):
return datetime.datetime.fromtimestamp(int(dt.timestamp()))
def now_with_offset(seconds=0):
return datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(seconds=seconds)
def expected_get_calls(chain):
"""Returns the expected number of get_records calls including catch-up logic, to exhaust the chain.
For example, [3, 0, 1, 0, 0, 0, 1] will take 3 calls:
- call 1 stops after page 1 (3 records) (no empty responses)
- call 2 stops after page 3 (1 record) (1 empty response) <- catch-up applied
- call 3 stops after page 7 (1 record) (4 empty responses) <- catch-up applied
Or, [0, 0, 0, 1, 0, 0, 0, 1] will take 4 calls:
- call 1 stops after page 4 (1 record) (3 empty responses)
- call 2 stops after page 6 (0 record) (5 empty response) <- stopped due to catch-up limit
- call 3 stops after page 7 (0 record) (6 empty responses) <- only 1 try since catch-up reached
- call 4 stops after page 8 (1 record) (6 empty responses)
"""
count = 0
# 0) Every non-empty page is another Shard.get_records() call
empty_pages = chain.count(0)
non_empty = len(chain) - empty_pages
count += non_empty
# 1) If calls_to_reach_head is 5, every empty page after the 4th
# is another Shard.get_records() call
has_free_empty_pages = True
if empty_pages >= CALLS_TO_REACH_HEAD:
has_free_empty_pages = False
count += empty_pages - (CALLS_TO_REACH_HEAD - 1)
# 2) If the last page is empty and we're out of free empty pages,
# then the last page has already been counted.
#
# But if we still have free pages, the last empty page needs to be
# explicitly counted, since we had to call Shard.get_records()
# to learn that there were no more pages, and it wasn't really free.
if chain[-1] == 0 and has_free_empty_pages:
count += 1
return count
@pytest.mark.parametrize("expected, kwargs", [
("<Shard[exhausted, id='shard-id']>", {"iterator_id": last_iterator}),
("<Shard[at_seq='sequence', id='shard-id']>",
{"sequence_number": "sequence", "iterator_type": "at_sequence"}),
("<Shard[after_seq='sequence', id='shard-id']>",
{"sequence_number": "sequence", "iterator_type": "after_sequence"}),
("<Shard[latest, id='shard-id']>", {"iterator_type": "latest"}),
("<Shard[trim_horizon, id='shard-id']>", {"iterator_type": "trim_horizon"}),
("<Shard[id='shard-id']>", {}),
])
def test_repr(expected, kwargs):
shard = Shard(stream_arn="stream-arn", shard_id="shard-id", **kwargs)
assert repr(shard) == expected
@pytest.mark.parametrize("iterator_type", ["latest", "trim_horizon"])
def test_next_raises_expired_without_sequence(iterator_type, shard, session):
"""If the iterator expires and didn't have a sequence_number, there's no way to safely re-create it."""
shard.sequence_number = None
shard.iterator_type = iterator_type
shard.iterator_id = "iterator-id"
exception = session.get_stream_records.side_effect = ShardIteratorExpired()
with pytest.raises(ShardIteratorExpired) as excinfo:
next(shard)
# Exception is raised directly
assert excinfo.value is exception
session.get_stream_records.assert_called_once_with("iterator-id")
# Didn't try to get a new iterator
session.get_shard_iterator.assert_not_called()
@pytest.mark.parametrize("iterator_type", ["at_sequence", "after_sequence"])
def test_next_refreshes_expired_with_sequence(iterator_type, shard, session):
"""If the iterator expires and has a sequence_number, it will try to refresh."""
shard.stream_arn = "stream-arn"
shard.shard_id = "shard-id"
shard.sequence_number = "sequence-number"
shard.iterator_type = iterator_type
shard.iterator_id = "expired-iterator-id"
# Single response with 4 records
[response] = build_get_records_responses(4)
session.get_stream_records.side_effect = [ShardIteratorExpired(), response]
session.get_shard_iterator.return_value = "new-iterator-id"
records = next(shard)
# Don't need to deep validate here; that's covered by get_records and reformat_record tests.
assert len(records) == len(response["Records"])
# Only jumped once
session.get_shard_iterator.assert_called_once_with(
stream_arn=shard.stream_arn, shard_id=shard.shard_id,
iterator_type=iterator_type, sequence_number="sequence-number"
)
# First call raised Expired, second call returned records
session.get_stream_records.assert_has_calls([
call("expired-iterator-id"),
call("new-iterator-id")
])
@pytest.mark.parametrize("attr", [
"stream_arn", "shard_id", "iterator_id", "iterator_type",
"sequence_number", "parent"])
def test_eq_not_set_or_different(attr):
parent = Shard(stream_arn="parent-arn", shard_id="parent-id")
children = [Shard(stream_arn="child-arn", shard_id="child-id") for _ in range(2)]
kwargs = {
"stream_arn": "stream-arn",
"shard_id": "shard-id",
"iterator_id": "iterator-id",
"iterator_type": "iterator-type",
"sequence_number": "sequence-number",
"parent": parent
}
shard = Shard(**kwargs)
other = Shard(**kwargs)
# Initially equal
assert shard == other
assert other == shard
shard.children.extend(children)
assert not shard == other
assert not other == shard
# Compare equal regardless of order
other.children.extend(children[::-1])
assert shard == other
assert other == shard
setattr(other, attr, "something else")
assert not shard == other
assert not other == shard
def test_iter(shard):
assert iter(shard) is shard
def test_exhausted(shard):
assert shard.iterator_id is None
assert not shard.exhausted
shard.iterator_id = last_iterator
assert shard.exhausted
shard.iterator_id = None
assert not shard.exhausted
def test_token(caplog):
parent = Shard(stream_arn="parent-stream-arn", shard_id="parent-id")
shard = Shard(stream_arn="stream-arn", shard_id="shard-id",
iterator_id="iterator-id", iterator_type="at_sequence",
sequence_number="sequence-number", parent=parent)
expected = {
"stream_arn": "stream-arn",
"shard_id": "shard-id",
"iterator_type": "at_sequence",
"sequence_number": "sequence-number",
"parent": "parent-id"
}
assert shard.token == expected
# Removing parent omits it from the token entirely
shard.parent = None
expected.pop("parent")
assert shard.token == expected
assert not caplog.records
shard.iterator_type = "trim_horizon"
getattr(shard, "token")
assert caplog.record_tuples == [
("bloop.stream", logging.WARNING, "creating shard token at non-exact location \"trim_horizon\"")
]
def test_walk_tree():
shards = build_shards(10, {
0: 1,
1: [2, 3],
2: [4, 5, 6],
3: [7, 8],
4: 9
})
shard_ids = [shard.shard_id for shard in shards]
root = shards[0]
walked_shard_ids = [shard.shard_id for shard in root.walk_tree()]
assert set(shard_ids) == set(walked_shard_ids)
def test_jump_to(shard, session):
shard.empty_responses = 3
shard.shard_id = "shard-id"
shard.iterator_id = "iterator-id"
shard.iterator_type = "iterator-type"
shard.sequence_number = "sequence-number"
shard.stream_arn = "stream-arn"
session.get_shard_iterator.return_value = "new-shard-id"
shard.jump_to(iterator_type="latest", sequence_number="different-sequence-number")
assert shard.iterator_id == "new-shard-id"
assert shard.iterator_type == "latest"
assert shard.sequence_number == "different-sequence-number"
assert shard.empty_responses == 0
session.get_shard_iterator.assert_called_once_with(
stream_arn="stream-arn",
shard_id="shard-id",
iterator_type="latest",
sequence_number="different-sequence-number")
def test_seek_exhausted(shard, session):
"""Shard is exhausted before finding the target time"""
position = now_with_offset(-120)
session.get_shard_iterator.return_value = "new-iterator-id"
session.get_stream_records.side_effect = build_get_records_responses(0)
records = shard.seek_to(position)
assert not records
assert shard.exhausted
session.get_stream_records.assert_called_once_with("new-iterator-id")
def test_seek_catches_head(shard, session):
"""Shard is still open, and seek stops after catching up to head"""
position = now_with_offset(3600)
session.get_shard_iterator.return_value = "new-iterator-id"
session.get_stream_records.side_effect = build_get_records_responses(*([0] * (CALLS_TO_REACH_HEAD + 1)))
shard.seek_to(position)
# Not exhausted; just gave up after the number of empty responses required to reach head.
# The shard is probably still open, and the target time may be in the future.
assert not shard.exhausted
assert session.get_stream_records.call_count == CALLS_TO_REACH_HEAD
@pytest.mark.parametrize("time_offset", [0, -10])
@pytest.mark.parametrize("record_index", [0, 5, 9])
def test_seek_finds_position(time_offset, record_index, shard, session):
session.get_shard_iterator.return_value = "new-iterator-id"
# The value that will be inserted in the records, that we will find
exact_target = now_with_offset()
# The value we will use to find the exact_target with
with_offset = exact_target + datetime.timedelta(seconds=time_offset)
# Build a list of Records, then inject the appropriate spread of create times
[response] = build_get_records_responses(10)
records = response["Records"]
# Reverse the iterator so that the offset can increase
# as we move backwards from the target point on the left side
for offset, record in enumerate(reversed(records[:record_index])):
previous = exact_target - datetime.timedelta(hours=offset + 1)
record["dynamodb"]["ApproximateCreationDateTime"] = drop_milliseconds(previous)
# Same thing going forward for records after the target
for offset, record in enumerate(records[record_index + 1:]):
future = exact_target + datetime.timedelta(hours=offset + 1)
record["dynamodb"]["ApproximateCreationDateTime"] = drop_milliseconds(future)
# Set target record's exact value
records[record_index]["dynamodb"]["ApproximateCreationDateTime"] = drop_milliseconds(exact_target)
session.get_stream_records.return_value = response
results = shard.seek_to(with_offset)
assert len(results) == len(records[record_index:])
session.get_stream_records.assert_called_once_with("new-iterator-id")
def test_load_existing_children(session):
shards = build_shards(3, {0: [1, 2]}, session=session)
root = shards[0]
children = root.children[:]
root.load_children()
assert root.children == children
session.describe_stream.assert_not_called()
def test_load_children(session):
description = stream_description(5, {0: 1, 1: [2, 3]}, stream_arn="stream-arn")
session.describe_stream.return_value = description
# First shard in the description is unrelated to the root
root = Shard(
stream_arn="stream-arn",
shard_id=description["Shards"][0]["ShardId"],
session=session)
assert not root.children
# 0 -> 1 -> 2
# -> 3
# 4
child_id = description["Shards"][1]["ShardId"]
first_grandchild_id = description["Shards"][2]["ShardId"]
second_grandchild_id = description["Shards"][3]["ShardId"]
# Loading shouldn't rely on implicit ordering
random.shuffle(description["Shards"])
root.load_children()
assert set(s.shard_id for s in root.children) == {child_id}
assert root.children[0].shard_id == child_id
grandchild_ids = [s.shard_id for s in root.children[0].children]
assert set(grandchild_ids) == {first_grandchild_id, second_grandchild_id}
session.describe_stream.assert_called_once_with(stream_arn="stream-arn", first_shard=root.shard_id)
def test_get_records_exhausted(shard, session):
shard.iterator_id = last_iterator
records = shard.get_records()
assert not records
session.get_stream_records.assert_not_called()
def test_get_records_after_head(shard, session):
"""Once the shard has reached head, get_stream_records is called once per get_records."""
shard.empty_responses = CALLS_TO_REACH_HEAD
# Intentionally provide more than one page to ensure
# the call isn't stopping because there is only one page.
records = build_get_records_responses(1, 1)
session.get_stream_records.side_effect = records
returned_records = shard.get_records()
assert len(returned_records) == 1
assert returned_records[0]["meta"]["sequence_number"] == "0"
assert session.get_stream_records.called_once_with(shard.iterator_id)
assert shard.iterator_type == "at_sequence"
assert shard.sequence_number == "0"
@pytest.mark.parametrize("chain", [
# === 0 records on every page, from 1 - CALLS_TO_REACH_HEAD + 1 pages
*[[0] * i for i in range(1, CALLS_TO_REACH_HEAD + 1)],
# === 1 record on every page, from 1 - CALLS_TO_REACH_HEAD + 1 pages
*[[1] * i for i in range(1, CALLS_TO_REACH_HEAD + 1)],
# === 1 record, CALLS_TO_REACH_HEAD - 1 pages ===
*[([0] * i) + [1] + ([0] * (CALLS_TO_REACH_HEAD - 2 - i)) for i in range(CALLS_TO_REACH_HEAD - 1)],
# === 1 record, CALLS_TO_REACH_HEAD pages ===
*[([0] * i) + [1] + ([0] * (CALLS_TO_REACH_HEAD - 1 - i)) for i in range(CALLS_TO_REACH_HEAD)],
# === 1 record, CALLS_TO_REACH_HEAD + 1 pages ===
*[([0] * i) + [1] + ([0] * (CALLS_TO_REACH_HEAD - 0 - i)) for i in range(CALLS_TO_REACH_HEAD + 1)],
# Contains every permutation of 3-page runs:
# (first value is 00 to align the comment)
# 0, 0, 0
# 0, 0, 1
# 0, 1, 1
# 1, 1, 1
# 1, 1, 0
# 1, 0, 1
# 0, 1, 0
# 1, 0, 0
[00, 0, 0, 1, 1, 1, 0, 1, 0, 0],
])
def test_get_records_shard(chain, shard, session):
"""Catchup logic is applied until the CALLS_TO_REACH_HEAD limit, or shard is exhausted.
This holds even when there are non-empty calls in between. The catch
up logic will be applied on the next empty response."""
#
responses = build_get_records_responses(*chain)
session.get_stream_records.side_effect = responses
records = []
get_records_call_count = 0
while not shard.exhausted:
get_records_call_count += 1
records.extend(shard.get_records())
# Calls to shard.get_records() to exhaust the shard
assert get_records_call_count == expected_get_calls(chain)
# Call until exhausted, means we always reach the end of the chain
assert session.get_stream_records.call_count == len(chain)
assert len(records) == sum(chain)
@pytest.mark.parametrize("initial_sequence_number", ["11", "913"])
@pytest.mark.parametrize("record_count", [0, 1, 2])
def test_apply_records(initial_sequence_number, record_count, session):
# Temporarily ignoring that an iterator should never be "latest" and have a sequence_number..
shard = Shard(stream_arn="stream-arn", shard_id="shard-id", iterator_type="initial-iterator-type",
sequence_number=initial_sequence_number, session=session)
records = [dynamodb_record_with(key=True, sequence_number=i) for i in range(record_count)]
response = {
"Records": records,
"NextShardIterator": "next-iterator-id"
}
shard._apply_get_records_response(response)
session.get_stream_records.assert_not_called()
if records:
if initial_sequence_number:
# Don't overwrite; found records but already had a sequence_number
assert shard.iterator_type == "initial-iterator-type"
assert shard.sequence_number == initial_sequence_number
else:
# Remember first sequence_number; found records and no existing sequence_number
assert shard.iterator_type == "at_sequence"
assert shard.sequence_number == records[0]["dynamodb"]["SequenceNumber"] == 0
assert shard.empty_responses == 0
else:
# No records, no change
assert shard.iterator_type == "initial-iterator-type"
assert shard.sequence_number == initial_sequence_number
assert shard.empty_responses == 1
@pytest.mark.parametrize("include", [{"new"}, {"old"}, {"old", "new"}, {"key"}])
def test_reformat_record(include):
raw = dynamodb_record_with(**{field: True for field in include})
record = reformat_record(raw)
renames = {
"new": "NewImage",
"old": "OldImage",
"key": "Keys"
}
for field in {"new", "old", "key"}:
if field in include:
assert record[field] is raw["dynamodb"][renames[field]]
else:
assert record[field] is None
assert record["meta"]["created_at"] == raw["dynamodb"]["ApproximateCreationDateTime"]
assert record["meta"]["event"]["type"] == raw["eventName"].lower()
def test_unpack_empty_shards_list(session):
assert unpack_shards([], "stream-arn", session) == {}
def test_unpack_shards_from_token(session):
# multiple roots, 1:1 and 1:2 relations
shards = build_shards(5, {0: 1, 2: [3, 4]}, session, stream_arn="stream_arn")
by_id = {shard.shard_id: shard for shard in shards}
# unpacking shouldn't rely on ordering over the wire
tokens = [shard.token for shard in shards]
random.shuffle(tokens)
unpacked = unpack_shards(tokens, "stream_arn", session)
assert unpacked == by_id
def test_unpack_shards_from_describe_stream(session):
# multiple roots, 1:1 and 1:2 relations
shards = stream_description(5, {0: 1, 2: [3, 4]})["Shards"]
by_id = {shard["ShardId"]: shard for shard in shards}
# unpacking shouldn't rely on ordering over the wire
random.shuffle(shards)
unpacked = unpack_shards(shards, "stream_arn", session=session)
assert by_id.keys() == unpacked.keys()
for shard_id, shard in unpacked.items():
if shard.parent is None:
assert "ParentShardId" not in by_id[shard_id]
else:
assert shard.parent.shard_id == by_id[shard_id].get("ParentShardId")
def test_unpack_shards_with_deleted_parent_shard(session):
# multiple roots, 1:1 and 1:2 relations
shards = stream_description(6, {0: 1, 1: 2, 3: [4, 5]})["Shards"]
# removing old shard
shards = shards[1:]
by_id = {shard["ShardId"]: shard for shard in shards}
unpacked = unpack_shards(shards, "stream_arn", session=session)
assert by_id.keys() == unpacked.keys()
assert len([shard for shard in unpacked.values() if not shard.parent]) == 2
for shard_id, shard in unpacked.items():
if by_id[shard_id].get("ParentShardId") not in by_id:
assert shard.parent is None
else:
assert shard.parent.shard_id == by_id[shard_id].get("ParentShardId")
|
11499667
|
from io import BytesIO
import json
import os
import pymongo
import requests
from collections import deque
import zipfile
from datanator.util import mongo_util
import datanator.config.core
class TaxonTree(mongo_util.MongoUtil):
def __init__(self, cache_dirname, MongoDB, db, replicaSet=None,
verbose=False, max_entries=float('inf'), username = None,
password = <PASSWORD>, authSource = 'admin'):
self.ENDPOINT_DOMAINS = {
'root': 'https://ftp.ncbi.nlm.nih.gov',
}
self.cache_dirname = cache_dirname
self.MongoDB = MongoDB
self.db = db
self.verbose = verbose
self.max_entries = max_entries
self.collection_str = 'taxon_tree'
self.path = os.path.join(self.cache_dirname, self.collection_str)
super(TaxonTree, self).__init__(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=replicaSet, db=db,
verbose=verbose, max_entries=max_entries, username = username,
password = password, authSource = authSource)
self.client, self.db, self.collection = self.con_db(self.collection_str)
self.repetition = 1 # how often verbose messages show
def load_content(self):
'''Load contents of several .dmp files into MongoDB
'''
self.download_dump()
self.parse_fullname_taxid() # taxidlineage.dmp fullnamelineage.dmp
if self.verbose:
print('Indexing tax_id ... \n')
self.collection.create_index( [("tax_id", pymongo.ASCENDING)] , background=False, sparse=True)
self.parse_nodes() # nodes.dmp
if self.verbose:
print('Indexing division_id and gene_code ... \n')
index1 = pymongo.IndexModel( [("division_id", pymongo.ASCENDING)] , background=False, sparse=True)
index2 = pymongo.IndexModel([("gene_code", pymongo.ASCENDING)] , background=False, sparse=True)
self.collection.create_indexes([index1, index2])
self.parse_division() # division.dmp
self.parse_names() # names.dmp
self.parse_gencode() # gencode.dmp
def download_dump(self):
os.makedirs(self.path, exist_ok=True)
cwd = '/pub/taxonomy/new_taxdump/'
noi = 'new_taxdump.zip'
database_url = self.ENDPOINT_DOMAINS['root'] + cwd + noi
local_filename = os.path.join(self.path, noi)
if self.verbose:
print ('\n Downloading taxdump zip file ...')
response = requests.get(database_url)
response.raise_for_status()
if self.verbose:
print (' ... Done!')
print ('Unzipping ...')
z = zipfile.ZipFile(BytesIO(response.content))
z.extractall(self.path)
if self.verbose:
print('... Done unzipping')
def parse_fullname_line(self, line):
'''Parses lines in file fullnamelineage.dmp and return elements in a list
'''
a = [item.replace('\t', '') for item in line.split('|')[:-1]]
tax_id = a[0].strip()
tax_name = a[1].strip()
something = [item.split(';') for item in a]
ancestor_name = [elem.lstrip() for elem in something[2][:-1]]
return [tax_id, tax_name, ancestor_name]
def parse_taxid_line(self, line):
'''Parses lines in file taxidlineage.dmp and return elements in a list
delimited by "\t|\n"
(tab, vertical bar, and newline) characters. Each record consists of one
or more fields delimited by "\t|\t" (tab, vertical bar, and tab) characters.
'''
a = [item.replace('\t', '') for item in line.split('|')[:-1]]
return a[1].split(' ')[:-1]
def count_line(self, file):
'''Efficiently count total number of lines in a given file
'''
with open(file) as f:
for i, l in enumerate(f):
pass
return i + 1
def parse_fullname_taxid(self):
'''Parse fullnamelineage.dmp and taxidlineage.dmp store in MongoDB
Always run first before loading anything else
(insert_one)
'''
full_name = os.path.join(self.path, 'fullnamelineage.dmp')
tax_id = os.path.join(self.path, 'taxidlineage.dmp')
i = 0
with open(full_name, 'r') as f1, open(tax_id, 'r') as f2:
count = min(self.max_entries, self.count_line(full_name))
for line_name, line_id in zip(f1, f2):
if i == self.max_entries:
break
if self.verbose and i % self.repetition == 0:
print ('Parsing lineage line {} of {}...'.format(i+1, count))
lineage_dict = {}
elem_name = self.parse_fullname_line(line_name)
elem_id = self.parse_taxid_line(line_id)
lineage_dict['tax_id'] = int(elem_name[0])
lineage_dict['tax_name'] = elem_name[1]
lineage_dict['anc_name'] = elem_name[2]
lineage_dict['anc_id'] = [int(item) for item in elem_id]
self.collection.insert_one( lineage_dict
)
i += 1
def parse_nodes_line(self, line):
'''Parse lines in nodes.dmp
'''
return [item.replace('\t', '') for item in line.split('|')[:-1]]
def parse_nodes(self):
'''nodes.dmp
'''
file_name = os.path.join(self.path, 'nodes.dmp')
i = 0
count = min(self.max_entries, self.count_line(file_name))
with open(file_name, 'r') as f:
for line in f:
if i == self.max_entries:
break
if self.verbose and i%self.repetition==0:
print ('Parsing nodes line {} of {} ...'.format(i+1, count))
node_dict = {}
elem = self.parse_nodes_line(line)
tax_id = int(elem[0])
node_dict['rank'] = elem[2]
node_dict['locus_name_prefix'] = elem[3]
node_dict['division_id'] = int(elem[4])
node_dict['gene_code'] = int(elem[6])
node_dict['comments'] = elem[-6]
node_dict['plastid_gene_code'] = elem[-5]
node_dict['hydrogenosome_gene_id'] = elem[-2]
self.collection.update_one( {'tax_id': tax_id},
{'$set': node_dict},
upsert = True
)
i += 1
def parse_division(self):
'''division.dmp
'''
file_name = os.path.join(self.path, 'division.dmp')
i = 0
count = min(self.max_entries, self.count_line(file_name))
with open(file_name, 'r') as f:
for line in f:
if i == self.max_entries:
break
if self.verbose and i % self.repetition == 0:
print ('Parsing division line {} of {} ...'.format(i+1, count))
name_dict = {}
elem = self.parse_nodes_line(line)
division_id = int(elem[0])
name_dict['division_cde'] = elem[1]
name_dict['division_name'] = elem[2]
name_dict['division_comments'] = elem[3]
self.collection.update_many( {'division_id': division_id},
{'$set': name_dict},
upsert = True
)
i += 1
def parse_names(self):
'''names.dmp
1 | all | | synonym |
1 | root | | scientific name |
2 | bacteria | bacteria <blast2> | blast name |
2 | Bacteria | Bacteria <prokaryotes> | scientific name |
2 | eubacteria | | genbank common name
'''
file_name = os.path.join(self.path, 'names.dmp')
i = 0
count = min(self.max_entries, self.count_line(file_name))
with open(file_name, 'r') as f:
for line in f:
if i == self.max_entries:
break
if self.verbose and i % self.repetition == 0:
print ('Parsing names line {} of {} ...'.format(i+1, count))
name_dict = {}
elem = self.parse_nodes_line(line)
tax_id = int(elem[0])
name_dict['name_txt'] = elem[1]
name_dict['unique_variant_name'] = elem[2]
name_dict['name_class'] = elem[3]
self.collection.update_one( {'tax_id': tax_id},
{'$addToSet': name_dict},
upsert = True
)
i += 1
def parse_gencode(self):
'''gencode.dmp
'''
file_name = os.path.join(self.path, 'gencode.dmp')
i = 0
count = min(self.max_entries, self.count_line(file_name))
with open(file_name, 'r') as f:
for line in f:
if i == self.max_entries:
break
if self.verbose and i % self.repetition == 0:
print ('Parsing gencode line {} of {} ...'.format(i+1, count))
gencode_dict = {}
elem = self.parse_nodes_line(line)
gene_code = int(elem[0])
gencode_dict['abbreviation'] = elem[1]
gencode_dict['gene_code_name'] = elem[2]
gencode_dict['gene_code_cde'] = elem[3]
gencode_dict['gene_code_starts'] = elem[4]
self.collection.update_many( {'gene_code': gene_code},
{'$set': gencode_dict},
upsert = True
)
i += 1
def insert_canon_anc(self, start=0):
"""Insert two arrays to each document, one is
canon_anc_id, the other is canon_anc_name
"""
query = {"canon_anc_ids": {"$exists": False}}
canon_info = {} # store info of canon to avoid multiple db queries
ids_ranked = deque()
canons = ['species', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom', 'superkingdom']
count = self.collection.count_documents(query)
projection = {'anc_name': 1, 'anc_id': 1, 'tax_id': 1}
docs = self.collection.find(filter=query, skip=start, projection=projection,
no_cursor_timeout=True, batch_size=1000).sort('tax_id', 1).hint('tax_id_1')
for i, doc in enumerate(docs):
if i == self.max_entries:
break
if self.verbose and i % 100 == 0:
print('Processing doc {} out of {}'.format(i + start, count))
canon_anc_names = deque()
canon_anc_ids = deque()
anc_ids = doc['anc_id']
anc_names = doc['anc_name']
if len(anc_ids) == 0:
continue
for anc_id, anc_name in zip(anc_ids, anc_names):
if anc_id not in ids_ranked: # no need to look in canon_info dictionary
rank = self.collection.find_one({'tax_id': anc_id})['rank']
if rank in canons:
canon_anc_ids.append(anc_id)
canon_anc_names.append(anc_name)
canon_info[anc_id] = True
ids_ranked.append(anc_id)
else:
ids_ranked.append(anc_id)
else: # no need to perform db lookups
c = canon_info.get(anc_id, False)
if c:
canon_anc_ids.append(anc_id)
canon_anc_names.append(anc_name)
else:
continue
# update doc
self.collection.update_one({'_id': doc['_id']},
{'$set': {'canon_anc_ids': list(canon_anc_ids),
'canon_anc_names': list(canon_anc_names)}},
upsert=False)
def main():
db = 'datanator'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = <PASSWORD>anator.config.core.get_config(
)['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
cache_dirname = '/root/karr_lab/datanator/datanator/data_source/cache/taxon_tree'
manager = TaxonTree(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=None,
db=db, verbose=True, username=username, password=password)
# manager.load_content()
# manager.insert_canon_anc(start=250600)
manager.insert_canon_anc(start=0)
if __name__ == '__main__':
main()
|
11499704
|
import networkx as nx
from networkx import pagerank, pagerank_numpy, pagerank_scipy
import time, json, os
output_folder = 'comparison_selected'
'''
!!! EGO !!!
'''
# ego_path = os.path.join(os.getcwd(), '../../ego_networks/')
ego_path = '../../ego_networks/'
ego_output_path = './comparison_ego_graphs/'
# print(ego_path)
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(ego_path):
for file in f:
if '.csv' in file:
files.append(os.path.join(r, file))
for f in files:
for algo in [pagerank, pagerank_numpy, pagerank_scipy]:
ego_graph = nx.read_edgelist(f, create_using=nx.DiGraph())
tic = time.time()
pr = algo(ego_graph, alpha=0.85)
toc = time.time()
duration = (toc-tic)*1000
print("PageRank on graph with |V|=" + str(len(ego_graph.nodes)) + " and |E|=" + str(len(ego_graph.edges)) + " by " + algo.__name__ )
file = open(ego_output_path + algo.__name__ + '_ego_network_v_' + str(len(ego_graph.nodes)) + '_e_' + str(len(ego_graph.edges)) + '.json', 'w')
file.write( json.dumps(pr) )
file.close
|
11499720
|
import datetime
import os
import subprocess
import numpy
from scipy.stats import norm
from . import romannumerals
# ToDo: Bring back scale bar
# ToDo: Add option for solid fill of vectors
def roundto(num, nearest):
"""
Rounds :param:`num` to the nearest increment of :param:`nearest`
"""
return int((num + (nearest / 2)) // nearest * nearest)
def convert_chromosome_name(chrom_string, dialect='ucsc'):
"""
Try to auto-detect chromosome number and convert it to the specified "dialect".
Valid dialects are "ucsc", "ensembl" and "yeast".
:param chrom_string:
:param source:
:param dest:
:return:
"""
try:
chrom_string = str(romannumerals.roman_to_int(chrom_string))
except ValueError:
pass
if dialect == 'ensembl':
if chrom_string == 'chrM':
return 'dmel_mitochonrdion_genome'
elif chrom_string[:3].lower() == 'chr':
return chrom_string[3:]
else:
return chrom_string
elif dialect == 'ucsc':
if chrom_string == 'dmel_mitochondrion_genome':
return 'chrM'
elif chrom_string[:3].lower() == 'chr':
return chrom_string
else:
return 'chr{}'.format(chrom_string)
elif dialect == 'yeast':
if chrom_string[:3].lower() == 'chr':
chrom_string = chrom_string[3:]
try:
return romannumerals.int_to_roman(int(chrom_string))
except ValueError:
return chrom_string
else:
raise ValueError('Unknown dialect {}'.format(dialect))
def binary_search_tag_file(tag_filename, search_target):
"""
Find the offset (in bytes) in :param:`tag_filename` that corresponds
to the start of the first tag that is equal to or greater than :param:`search_target`.
If none of the reads have a start position greater than :param:`search_target`,
return None.
Note that positions in tag files have a 1-based index.
"""
def get_read_start(file_offset):
tag_file.seek(file_offset)
if file_offset > 0:
_ = tag_file.readline() # read forward to get to a line start
this_line = tag_file.readline().strip()
if tag_file.tell() >= filesize:
# We've reached the end of the file and the reads are still upstream of the target
return None
else:
return int(this_line.split('\t')[1])
filesize = os.path.getsize(tag_filename)
search_window_start = 0
search_window_end = filesize - 1
guess_genomic_start = -1
guess = int((search_window_start + search_window_end) / 2)
with open(tag_filename, 'rt') as tag_file:
first_genomic_start = get_read_start(search_window_start)
# last_genomic_start = get_read_position(search_window_end)
if search_target < first_genomic_start:
return search_window_start
while search_window_end - search_window_start > 1:
guess = int((search_window_start + search_window_end) / 2)
guess_genomic_start = get_read_start(guess)
if guess_genomic_start == None:
return None
# print(search_window_start, guess, search_window_end, guess_genomic_start)
if guess_genomic_start < search_target:
# print('\ttoo low!')
search_window_start = guess
elif guess_genomic_start > search_target:
search_window_end = guess
# print('\ttoo high!')
else:
# print('\tjust right!')
break
if guess_genomic_start == -1:
return None
if guess_genomic_start < search_target:
guess += 1
tag_file.seek(guess)
_ = tag_file.readline()
guess = tag_file.tell()
return guess
def bgzip_gff(gff3_fname, bgzipped_fname):
"""
Compress a GFF3 file in block-gzip format (requires that bgzip be accessible on the current path).
If :param gff3_fname: ends with '.gz' assumes that the file is gzipped, otherwise assumes it is uncompressed.
:param gzipped_fname:
:param bgzipped_fname:
:return:
"""
if bgzipped_fname == gff3_fname:
log_print('Destination and source file cannot have the same name!')
cmd_line = '{} {} | sort -k1,1 -k4,4n | bgzip > {}'.format(('cat', 'zcat')[gff3_fname.endswith('.gz')], gff3_fname,
bgzipped_fname)
try:
assert os.path.isfile(gff3_fname) # needed since no error occurs otherwise
subprocess.check_call(cmd_line, shell=True)
except subprocess.CalledProcessError as cpe:
log_print('Unsuccessful. Got return code {}'.format(cpe.returncode))
except AssertionError:
log_print('{} not found!'.format(gff3_fname))
else:
log_print('Successfully generated block-gzipped file {} from {}'.format(bgzipped_fname, gff3_fname))
def generate_tabix_index(target_fname):
"""
Index :param target_fname: with tabix. Requires that the directory in which :param:target_fname: resides is
writeable.
:param target_fname:
:return:
"""
cmd_line = 'tabix -f -p gff {}'.format(target_fname)
try:
return_code = subprocess.check_call(cmd_line, shell=True)
except subprocess.CalledProcessError as cpe:
log_print('Unsuccessful. Got return code {}'.format(cpe.returncode))
else:
log_print('Successfully indexed block-gzipped file {}'.format(target_fname))
def pretty_now():
"""
Returns the current date/time in a nicely formatted string (without decimal seconds)
"""
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%b-%d %H:%M:%S')
def log_print(message, tabs=1):
"""
Print a chunk of text preceded by a timestamp and an optional number of tabs (default 1).
:param message:
:param tabs:
:return:
"""
print('{}{}{}'.format(pretty_now(), '\t' * tabs, message))
def gaussian_kernel(sd, sd_cutoff=3, normalize=False):
"""
Generate and return a numpy.Array whose elements are proportional to the PDF of a normal distribution
having standard deviation :param:`sd`.
:param sd:
:param sd_cutoff:
:param normalize:
:return:
"""
bw = sd_cutoff * sd * 2 + 1
midpoint = sd_cutoff * sd
kern = numpy.zeros(bw)
frozen_rv = norm(scale=sd)
for i in range(bw):
kern[i] = frozen_rv.pdf(i - midpoint)
if normalize:
kern = kern / kern.max()
return kern
def add_label(ax, tick, tick_label, axis='x'):
"""
Updates the set of ticks and tick labels for the specified matplotlib.Axes object
and axis.
If the tick already exists, it's label will be updated. If not, it will be created and labeled
appropriately.
"""
if axis == 'y':
tick_getter, label_getter = ax.get_yticks, ax.get_yticklabels
tick_setter, label_setter = ax.set_yticks, ax.set_yticklabels
else:
tick_getter, label_getter = ax.get_xticks, ax.get_xticklabels
tick_setter, label_setter = ax.set_xticks, ax.set_xticklabels
labels = dict(zip(tick_getter(), label_getter()))
labels[tick] = tick_label
new_ticks, new_labels = zip(*sorted(labels.items()))
tick_setter(new_ticks)
label_setter(new_labels)
def adjust_limits(ax, new_position, axis='y', padding_fraction=0.1):
"""
If necessary adjusts the limits for the specified :param axis: on
:param ax: to accomodate :param new_position: according to the
following scheme:
1. Assumes that the current limits are the
smallest and largest content item minus / plus a padding equal to
:param padding_fraction: * the span between the smallest
and largest content item.
2. If :param new_position: is beyond the inferred content limits,
adjust the padding to :param padding_fraction: * the new content
span, then adjust the plot limits to the new content limits
minus / plus the new padding.
"""
assert padding_fraction < 0.5, 'padding_fraction must be below 0.5!'
if axis == 'y':
limit_getter = ax.get_ylim
limit_setter = ax.set_ylim
else:
limit_getter = ax.get_xlim
limit_setter = ax.set_xlim
current_plot_min, current_plot_max = limit_getter()
current_plot_span = current_plot_max - current_plot_min
current_data_span = current_plot_span / (1 + 2 * padding_fraction)
current_pad = current_data_span * padding_fraction
current_data_min = current_plot_min + current_pad
current_data_max = current_plot_max - current_pad
# print(current_plot_min, current_plot_max, current_plot_span)
# print(current_data_min, current_data_max, current_data_span, current_pad)
if new_position > current_data_max:
new_data_min = current_data_min
new_data_max = new_position
elif new_position < current_data_min:
new_data_min = new_position
new_data_max = current_data_max
else:
# no changes needed
return
new_data_span = new_data_max - new_data_min
new_pad = new_data_span * padding_fraction
new_plot_min = new_data_min - new_pad
new_plot_max = new_data_max + new_pad
# print(new_data_min, new_data_max, new_data_span, new_pad)
# print(new_plot_min, new_plot_max)
limit_setter((new_plot_min, new_plot_max))
def diag_indices(n, k=0):
"""
Return the indices corresponding to the kth diagonal of an n X n array
in the form of a tuple of (x coords, y coords).
Created since numpy does not provide this functionality.
"""
if k <= 0:
x_coords = numpy.arange(-k, n)
y_coords = numpy.arange(0, n + k)
else:
x_coords = numpy.arange(0, n - k)
y_coords = numpy.arange(k, n)
return (x_coords, y_coords)
|
11499729
|
import colt
@colt.register("plugh", constructor="plugh_constructor")
class Plugh:
def __init__(self, x: str, y: str) -> None:
self.x = x
self.y = y
@classmethod
def plugh_constructor(cls, x: str, y: str) -> "Plugh":
x += "_x"
y += "_y"
return cls(x, y)
def test_colt_constructor():
config = {"@type": "plugh", "*": ["plugh"], "y": "plugh"}
obj = colt.build(config)
assert obj.x == "plugh_x"
assert obj.y == "plugh_y"
|
11499730
|
import numpy as np
from scipy.ndimage import distance_transform_edt
def visualize_masks(mask, mask_pred):
m = np.ones((256, 256, 3))
m[np.logical_and(mask, mask_pred)] = np.array([0.1, 0.5, 0.1])
m[np.logical_and(mask, np.logical_not(mask_pred))] = np.array([1, 0, 0])
m[np.logical_and(np.logical_not(mask), mask_pred)] = np.array([0, 0, 1])
return m
def compute_distance_transform(mask):
dist_out = distance_transform_edt(1 - mask)
dist_out = 2 * dist_out / max(mask.shape)
return dist_out
def rle_to_binary_mask(rle):
"""
rle should be coco format: {"counts": [], "size": []}
"""
if isinstance(rle, list):
return np.stack([rle_to_binary_mask(r) for r in rle])
counts = rle["counts"]
if isinstance(counts, str):
counts = list(map(int, counts.split(" ")))
mask = np.zeros(np.prod(rle["size"]), dtype=bool)
running_length = 0
for start, length in zip(counts[::2], counts[1::2]):
running_length += start
mask[running_length : running_length + length] = 1
running_length += length
return mask.reshape(rle["size"], order="F")
def binary_mask_to_rle(binary_mask):
counts = []
last_elem = 0
running_length = 0
for elem in binary_mask.ravel(order="F"):
if elem == last_elem:
pass
else:
counts.append(running_length)
running_length = 0
last_elem = elem
running_length += 1
counts.append(running_length)
rle = {"counts": " ".join(map(str, counts)), "size": list(binary_mask.shape)}
return rle
|
11499747
|
import os
from lingvo import model_registry
from lingvo.core import base_model_params
from lingvo.core import datasource
from lingvo.core import program
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import tokenizers
from lingvo.tasks.asr import input_generator
from lingvo.tasks.asr import ctc_model
@model_registry.RegisterSingleTaskModel
class Peoplesspeech100Base(base_model_params.SingleTaskModelParams):
"""Base parameters for Peoplesspeech 100k hour task."""
def _CommonInputParams(self, is_eval):
"""Input generator params for Peoplesspeech."""
p = input_generator.AsrInput.Params()
# Insert path to the base directory where the data are stored here.
# Generated using scripts in lingvo/tasks/asr/tools.
p.file_datasource = datasource.PrefixedDataSource.Params()
p.file_datasource.file_type = 'tfrecord'
p.file_datasource.file_pattern_prefix = 'gs://the-peoples-speech-west-europe/PeoplesSpeech/v0.7.1/'
p.frame_size = 80
# Interesting. First I've heard of this.
p.append_eos_frame = False
p.pad_to_max_seq_length = True
p.file_random_seed = 0
p.file_buffer_size = 10000
# N1 standard 2 has only 2 vCPUs, so we may want a larger machine.
# https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types
p.file_parallelism = 16
if is_eval:
p.source_max_length = 3600
p.bucket_upper_bound = [639, 1062, 1275, 1377, 1449, 1506, 1563, 3600]
else:
# So it looks like
p.source_max_length = 1710
p.bucket_upper_bound = [639, 1062, 1275, 1377, 1449, 1506, 1563, 1710]
p.bucket_batch_limit = [48] * 8
return p
def SetBucketSizes(self, params, bucket_upper_bound, bucket_batch_limit):
"""Sets bucket sizes for batches in params."""
params.bucket_upper_bound = bucket_upper_bound
params.bucket_batch_limit = bucket_batch_limit
return params
def Train(self):
p = self._CommonInputParams(is_eval=False)
p.file_datasource.file_pattern = 'train/train.tfrecords-*'
p.num_samples = 2292260
return p
def Dev(self):
p = self._CommonInputParams(is_eval=True)
p.file_datasource.file_pattern = (
'devtest/dev.tfrecords-00000-of-00001')
p.num_samples = 3000
return p
def Test(self):
p = self._CommonInputParams(is_eval=True)
p.file_datasource.file_pattern = (
'devtest/test.tfrecords-00000-of-00001')
p.num_samples = 3000
return p
def Task(self):
p = ctc_model.CTCModel.Params()
p.name = 'peoplesspeech'
# No default encoder params in this class.
tp = p.train
tp.learning_rate = 1e-4
tp.lr_schedule = schedule.ContinuousSchedule.Params().Set(
start_step=25_000, half_life_steps=5_000, min=1e-6)
tp.scale_gradients = False
tp.l2_regularizer_weight = None
# Setting p.eval.samples_per_summary to a large value ensures that dev,
# devother, test, testother are evaluated completely (since num_samples for
# each of these sets is less than 5000), while train summaries will be
# computed on 5000 examples.
p.eval.samples_per_summary = 2700
p.eval.decoder_samples_per_summary = 2700
return p
def ProgramSchedule(self):
return program.SimpleProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=500,
eval_dataset_names=['Dev', 'Train'],
eval_steps_per_loop=50,
decode_steps_per_loop=0)
@model_registry.RegisterSingleTaskModel
class Peoplesspeech100Grapheme(Peoplesspeech100Base):
GRAPHEME_TARGET_SEQUENCE_LENGTH = 620
GRAPHEME_VOCAB_SIZE = 76
BLANK_IDX = 73
def InitializeTokenizer(self, params):
"""Initializes a grapheme tokenizer."""
params.tokenizer = tokenizers.AsciiTokenizer.Params()
tokp = params.tokenizer
tokp.vocab_size = self.GRAPHEME_VOCAB_SIZE
tokp.append_eos = False
tokp.target_unk_id = 0
tokp.target_sos_id = 1
tokp.target_eos_id = 2
params.target_max_length = self.GRAPHEME_TARGET_SEQUENCE_LENGTH
return params
def Train(self):
p = super().Train()
return self.InitializeTokenizer(params=p)
def Dev(self):
p = super().Dev()
return self.InitializeTokenizer(params=p)
def Test(self):
p = super().Test()
return self.InitializeTokenizer(params=p)
def Task(self):
p = super().Task()
p.vocab_size = self.GRAPHEME_VOCAB_SIZE
p.blank_index = self.BLANK_IDX
return p
@model_registry.RegisterSingleTaskModel
class Grphm_DO_SpecAug_ConvStk_6x512Bidi(Peoplesspeech100Grapheme):
def Task(self):
p = super().Task()
ep = p.encoder_v2
ep.use_specaugment = True
elp = p.encoder_v2.lstm_block
elp.dropout.keep_prob = 0.8
elp.lstm_cell_size = 512
elp.num_lstm_layers = 6
elp.lstm_type = 'bidi'
ep.stacking_subsampler = None
ecp = ep.conv_subsampler
ecp.input_shape = [None, None, 80, 1]
return p
@model_registry.RegisterSingleTaskModel
class Grphm_DO_SpecAug_ConvStk_6x512Bidi_40batchsize(Peoplesspeech100Grapheme):
def Train(self):
p = super().Train()
# OOM with 48
p.bucket_batch_limit = [40] * 8
return p
def Task(self):
p = super().Task()
ep = p.encoder_v2
ep.use_specaugment = True
elp = p.encoder_v2.lstm_block
elp.dropout.keep_prob = 0.8
elp.lstm_cell_size = 512
elp.num_lstm_layers = 6
elp.lstm_type = 'bidi'
ep.stacking_subsampler = None
ecp = ep.conv_subsampler
ecp.input_shape = [None, None, 80, 1]
return p
|
11499786
|
import asyncio
from .azip import azip
from .active_aiter import active_aiter
from .map_filter_aiter import map_filter_aiter
from .push_aiter import push_aiter
class gated_aiter:
"""
Returns an aiter that you can "push" integer values into.
When a number is pushed, that many items are allowed out through the gate.
This is kind of like a discrete version of an electronic transistor.
:type aiter: aiter
:param aiter: an async iterator
:return: an async iterator yielding the same values as the original aiter
:rtype: :class:`aiter.gated_aiter <gated_aiter>`
"""
def __init__(self, aiter):
self._gate = push_aiter()
self._open_aiter = active_aiter(azip(aiter, map_filter_aiter(range, self._gate))).__aiter__()
self._semaphore = asyncio.Semaphore()
def __aiter__(self):
return self
async def __anext__(self):
async with self._semaphore:
return (await self._open_aiter.__anext__())[0]
def push(self, count):
"""
Note that several additional items are allowed through the gated_aiter.
:type count: int
:param count: the number of items that can be allowed out the aiter. These are cumulative.
"""
if not self._gate.is_stopped():
self._gate.push(count)
def stop(self):
"""
After the previously authorized items (from `push`) are pulled out the aiter, the aiter will exit.
"""
self._gate.stop()
|
11499791
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "sparselearning",
version = "1.0.0",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("Sparse learning library including sparse momentum algorithm."),
license = "GNU",
keywords = "deep learning, sparse learning",
url = "http://packages.python.org/sparselearning",
packages=['sparselearning'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 1 - Alpha",
"Topic :: Machine Learning",
],
)
|
11499800
|
import torch
import argparse
import os
import numpy as np
from misc.utils import set_log, make_env, set_policy
from tensorboardX import SummaryWriter
def main(args):
# Create directories
if not os.path.exists("./logs"):
os.makedirs("./logs")
if not os.path.exists("./pytorch_models"):
os.makedirs("./pytorch_models")
# Set logs
tb_writer = SummaryWriter('./logs/tb_{0}'.format(args.log_name))
log = set_log(args)
# Create env
env = make_env(log, args)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Initialize policy
agent = set_policy(env, tb_writer, log, args, name=args.algorithm)
if args.test:
from tester import test
test(agent=agent, env=env, log=log, tb_writer=tb_writer, args=args)
else:
from trainer import train
train(agent=agent, env=env, log=log, tb_writer=tb_writer, args=args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
# Algorithm
parser.add_argument(
"--algorithm", type=str, choices=["DQN"], required=True,
help="Algorithm to train agent")
parser.add_argument(
"--tau", default=0.01, type=float,
help="Target network update rate")
parser.add_argument(
"--batch-size", default=128, type=int,
help="Batch size for both actor and critic")
parser.add_argument(
"--critic-lr", default=0.001, type=float,
help="Learning rate for critic")
parser.add_argument(
"--discount", default=0.99, type=float,
help="Discount factor")
# Env
parser.add_argument(
"--env-name", type=str, required=True,
help="OpenAI gym environment name")
parser.add_argument(
"--n-action", type=int, default=4,
help="# of possible actions")
# Misc
parser.add_argument(
"--test", action="store_true",
help="If True, perform test")
parser.add_argument(
"--prefix", default="", type=str,
help="Prefix for tb_writer and logging")
parser.add_argument(
"--seed", default=0, type=int,
help="Sets Gym, PyTorch and Numpy seeds")
args = parser.parse_args()
# Set log name
args.log_name = "env::%s_prefix::%s_log" % (args.env_name, args.prefix)
main(args=args)
|
11499803
|
def get_funds(json):
j = json
financial_org_name = j['name']
financial_org_permalink = j['permalink']
funds = j['funds']
fds = []
for f in funds:
fund_name = f['name']
funded_year = ifnull(f['funded_year'],1900)
funded_month = ifnull(f['funded_month'],1)
funded_day = ifnull(f['funded_day'],1)
funded_date = (str(funded_year) + "-" + str(funded_month) + "-" +
str(funded_day) + "T00:00:00Z")
raised_amount = f['raised_amount']
raised_currency_code = f['raised_currency_code']
extracted_at = datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%SZ")
fd = (fund_name,financial_org_permalink,financial_org_name,
funded_year,funded_month,funded_day,funded_date,raised_amount,
raised_currency_code,extracted_at)
fds.append(fd)
return fds
|
11499830
|
from Utils.Eval.Metrics import ComputeMetrics
class LGBMImportance:
def __init__(self, model=None, model_path=None):
if model_path is not None:
self.model = self.load_model(model_path)
elif model is not None:
self.model = model
def load_model(self, path):
from Models.GBM.LightGBM import LightGBM
model = LightGBM()
model.load_model(path)
return model
def fit(self, *params):
print("FIT PARAMS")
print(params)
def score(self, X_test, Y_test):
predictions = self.model.get_prediction(X_test)
cm = ComputeMetrics(predictions, Y_test.to_numpy())
# Evaluating
rce = cm.compute_rce()
print(rce)
return rce
|
11499858
|
from enum import Enum
from marshmallow import Schema, fields
from marshmallow_enum import EnumField
class SurfboardMetricModel:
def __init__(self, status, speed_in_mph, altitude_in_feet, water_temperature_in_f):
# We will automatically generate the new id
self.id = 0
self.status = status
self.speed_in_mph = speed_in_mph
self.altitude_in_feet = altitude_in_feet
self.water_temperature_in_f = water_temperature_in_f
class SurfboardMetricManager():
last_id = 0
def __init__(self):
self.metrics = {}
def insert_metric(self, metric):
self.__class__.last_id += 1
metric.id = self.__class__.last_id
self.metrics[self.__class__.last_id] = metric
def get_metric(self, id):
return self.metrics[id]
def delete_metric(self, id):
del self.metrics[id]
class SurferStatus(Enum):
IDLE = 0
PADDLING = 1
RIDING = 2
RIDE_FINISHED = 3
WIPED_OUT = 4
class SurfboardMetricSchema(Schema):
id = fields.Integer(dump_only=True)
status = EnumField(enum=SurferStatus, required=True)
speed_in_mph = fields.Integer(required=True)
altitude_in_feet = fields.Integer(required=True)
water_temperature_in_f = fields.Integer(required=True)
|
11499881
|
import unittest
import os
class TestdriverDecline(unittest.TestCase):
def test_driver_decline(self):
from MaaSSim.simulators import simulate as simulator_driver_decl
from MaaSSim.traveller import travellerEvent
from MaaSSim.utils import get_config
from MaaSSim.decisions import dummy_False
from MaaSSim.decisions import f_decline
CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config_platform_choices.json')
params = get_config(CONFIG_PATH, root_path=os.path.dirname(__file__)) # load from .json file
params.times.patience = 600 # 1 hour of simulation
params.nP = 10 # reuqests (and passengers)
params.nV = 10 # vehicles
params.simTime = 4
params.nD = 1
sim = simulator_driver_decl(params=params, f_driver_decline=f_decline)
del sim
del params
|
11499887
|
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
def getStats(name):
ff = open('{}.pol_scores'.format(name),'r')
scores = []
for line in ff.readlines():
scores.append(float(line))
ff.close()
print('\n=== Politeness Scores in {} === '.format(name))
print('max : {}'.format(np.max(scores)))
print('min : {}'.format(np.min(scores)))
print('mean : {}'.format(np.mean(scores)))
print('median : {}'.format(np.median(scores)))
print('std. dev. : {}'.format(np.std(scores)))
def getMinMaxMean(name):
ff = open('{}.pol_scores'.format(name),'r')
scores = []
for line in ff.readlines():
scores.append(float(line))
ff.close()
ff = open('../TokenisedResponses/{}.index'.format(name),'r')
idx = [int(ll) for ll in ff.readlines()]
ff.close()
mins = []
maxs = []
means = []
medians = []
pointer = 0
for ii in idx:
if ii != 0:
ss = scores[pointer:pointer+ii]
mins.append(np.min(ss))
maxs.append(np.max(ss))
means.append(np.mean(ss))
medians.append(np.median(ss))
pointer += ii
return mins, maxs, means, medians
def plotDist(names):
all_scores = OrderedDict()
minv = 9999
maxv = -9999
for nn in names:
mins, maxs, means, medians = getMinMaxMean(nn)
scores = maxs[:]
if np.min(scores) < minv: minv = np.min(scores)
if np.max(scores) > maxv: maxv = np.max(scores)
all_scores[nn] = scores
label_names = {'Increase':'iResp', 'Decrease':'dResp', 'NoChange':'kResp'}
line_type = {'Increase':'solid', 'Decrease':'dashed', 'NoChange':'dotted'}
colors = {'Increase':'red', 'Decrease':'blue', 'NoChange':'green'}
bins = np.linspace(0,10,50)
for nn in names:
norms = [(v-minv)*10./(maxv-minv) for v in all_scores[nn]]
all_scores[nn] = norms[:]
sns.kdeplot(all_scores[nn],alpha=1,label=label_names[nn],ls=line_type[nn],color=colors[nn])
#plt.hist(all_scores[nn],bins,alpha=0.3,label=nn,density=True)
font_size = 18
plt.rc('font',size=font_size)
plt.xlabel('plt_max scores',fontsize=font_size)
plt.ylabel('density of probability',fontsize=font_size)
plt.grid()
plt.legend()
plt.show()
if __name__ == '__main__':
names = ['Increase','Decrease','NoChange']
plotDist(names)
#for nn in names:
#getStats(nn)
|
11499901
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from skimage.io import imread
from scipy.misc import imresize
from util import log
__IMAGENET_IMG_PATH__ = './datasets/tiny_imagenet/tiny-imagenet-200/'
__IMAGENET_LIST_PATH__ = './datasets/tiny_imagenet'
rs = np.random.RandomState(123)
class Dataset(object):
def __init__(self, ids, name='default',
max_examples=None, is_train=True):
self._ids = list(ids)
self.name = name
self.is_train = is_train
if max_examples is not None:
self._ids = self._ids[:max_examples]
file = os.path.join(__IMAGENET_IMG_PATH__, self._ids[0])
with open(os.path.join(__IMAGENET_IMG_PATH__, 'wnids.txt')) as f:
self.label_list = f.readlines()
self.label_list = [label.strip() for label in self.label_list]
with open(os.path.join(__IMAGENET_IMG_PATH__, 'val/val_annotations.txt')) as f:
self.val_label_list = f.readlines()
self.val_label_list = [label.split('\t')[1] for label in self.val_label_list]
try:
imread(file)
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
log.info("Reading Done: %s", file)
def load_image(self, id):
img = imread(
os.path.join(__IMAGENET_IMG_PATH__, id))/255.
img = imresize(img, [72, 72])
if len(img.shape) == 2:
img = np.stack([img, img, img], axis=-1)
y = np.random.randint(img.shape[0]-64)
x = np.random.randint(img.shape[1]-64)
img = img[y:y+64, x:x+64, :3]
l = np.zeros(200)
if id.split('/')[1] == 'train':
l[self.label_list.index(id.split('/')[-3])] = 1
elif id.split('/')[1] == 'val':
img_idx = int(id.split('/')[-1].split('_')[-1].split('.')[0])
l[self.label_list.index(self.val_label_list[img_idx])] = 1
return img, l
def get_data(self, id):
# preprocessing and data augmentation
m, l = self.load_image(id)
return m, l
@property
def ids(self):
return self._ids
def __len__(self):
return len(self.ids)
def __size__(self):
return 64, 64
def __repr__(self):
return 'Dataset (%s, %d examples)' % (
self.name,
len(self)
)
def create_default_splits(is_train=True, ratio=0.8):
id_train, id_test = all_ids()
dataset_train = Dataset(id_train, name='train', is_train=False)
dataset_test = Dataset(id_test, name='test', is_train=False)
return dataset_train, dataset_test
def all_ids():
id_train_path = os.path.join(__IMAGENET_LIST_PATH__, 'train_list.txt')
id_val_path = os.path.join(__IMAGENET_LIST_PATH__, 'val_list.txt')
try:
with open(id_train_path, 'r') as fp:
id_train = [s.strip() for s in fp.readlines() if s]
with open(id_val_path, 'r') as fp:
id_val = [s.strip() for s in fp.readlines() if s]
except:
raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
rs.shuffle(id_train)
rs.shuffle(id_val)
return id_train, id_val
|
11499933
|
import warnings
from collections import namedtuple
from functools import partial
import numpy
from scipy import stats
import pandas
import statsmodels.api as sm
from statsmodels.tools.decorators import cache_readonly
try:
from tqdm import tqdm
except ImportError: # pragma: no cover
tqdm = None
from wqio import utils
from wqio import bootstrap
from wqio.ros import ROS
from wqio import validate
from wqio.features import Location, Dataset
_Stat = namedtuple("_stat", ["stat", "pvalue"])
def _dist_compare(x, y, stat_comp_func):
if (len(x) == len(y)) and numpy.equal(x, y).all():
return _Stat(numpy.nan, numpy.nan)
return stat_comp_func(x, y, alternative="two-sided")
class DataCollection(object):
"""Generalized water quality comparison object.
Parameters
----------
dataframe : pandas.DataFrame
Dataframe all of the data to analyze.
rescol, qualcol, stationcol, paramcol : string
Column labels for the results, qualifiers, stations (monitoring
locations), and parameters (pollutants), respectively.
.. note::
Non-detect results should be reported as the detection
limit of that observation.
ndval : string or list of strings, options
The values found in ``qualcol`` that indicates that a
result is a non-detect.
othergroups : list of strings, optional
The columns (besides ``stationcol`` and ``paramcol``) that
should be considered when grouping into subsets of data.
pairgroups : list of strings, optional
Other columns besides ``stationcol`` and ``paramcol`` that
can be used define a unique index on ``dataframe`` such that it
can be "unstack" (i.e., pivoted, cross-tabbed) to place the
``stationcol`` values into columns. Values of ``pairgroups``
may overlap with ``othergroups``.
useros : bool (default = True)
Toggles the use of regression-on-order statistics to estimate
non-detect values when computing statistics.
filterfxn : callable, optional
Function that will be passed to the ``filter`` method of a
``pandas.Groupby`` object to remove groups that should not be
analyzed (for whatever reason). If not provided, all groups
returned by ``dataframe.groupby(by=groupcols)`` will be used.
bsiter : int
Number of iterations the bootstrapper should use when estimating
confidence intervals around a statistic.
showpbar : bool (True)
When True and the `tqdm` module is available, this will toggle the
appears of progress bars in long-running group by-apply operations.
"""
# column that stores the censorsip status of an observation
cencol = "__censorship"
def __init__(
self,
dataframe,
rescol="res",
qualcol="qual",
stationcol="station",
paramcol="parameter",
ndval="ND",
othergroups=None,
pairgroups=None,
useros=True,
filterfxn=None,
bsiter=10000,
showpbar=True,
):
# cache for all of the properties
self._cache = {}
# basic input
self.raw_data = dataframe
self._raw_rescol = rescol
self.qualcol = qualcol
self.stationcol = stationcol
self.paramcol = paramcol
self.ndval = validate.at_least_empty_list(ndval)
self.othergroups = validate.at_least_empty_list(othergroups)
self.pairgroups = validate.at_least_empty_list(pairgroups)
self.useros = useros
self.filterfxn = filterfxn or utils.non_filter
self.bsiter = bsiter
self.showpbar = showpbar
# column that stores ROS'd values
self.roscol = "ros_" + rescol
# column stators "final" values
if self.useros:
self.rescol = self.roscol
else:
self.rescol = rescol
# columns to group by when ROS'd, doing general stats
self.groupcols = [self.stationcol, self.paramcol] + self.othergroups
self.groupcols_comparison = [self.paramcol] + self.othergroups
# columns to group and pivot by when doing paired stats
self.pairgroups = self.pairgroups + [self.stationcol, self.paramcol]
# final column list of the tidy dataframe
self.tidy_columns = self.groupcols + [self._raw_rescol, self.cencol]
# the "raw" data with the censorship column added
self.data = dataframe.assign(
**{self.cencol: dataframe[self.qualcol].isin(self.ndval)}
).reset_index()
@cache_readonly
def tidy(self):
if self.useros:
def fxn(g):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rosdf = (
ROS(
df=g,
result=self._raw_rescol,
censorship=self.cencol,
as_array=False,
)
.rename(columns={"final": self.roscol})
.loc[:, [self._raw_rescol, self.roscol, self.cencol]]
)
return rosdf
else:
def fxn(g):
g[self.roscol] = numpy.nan
return g
if tqdm and self.showpbar:
def make_tidy(df):
tqdm.pandas(desc="Tidying the DataCollection")
return df.groupby(self.groupcols).progress_apply(fxn)
else:
def make_tidy(df):
return df.groupby(self.groupcols).apply(fxn)
keep_cols = self.tidy_columns + [self.roscol]
with warnings.catch_warnings():
warnings.simplefilter("once")
_tidy = (
self.data.reset_index()[self.tidy_columns]
.groupby(by=self.groupcols)
.filter(self.filterfxn)
.pipe(make_tidy)
.reset_index()
.sort_values(by=self.groupcols)
)
return _tidy[keep_cols]
@cache_readonly
def paired(self):
_pairs = (
self.data.reset_index()
.groupby(by=self.groupcols)
.filter(self.filterfxn)
.set_index(self.pairgroups)
.unstack(level=self.stationcol)
.rename_axis(["value", self.stationcol], axis="columns")
)[[self._raw_rescol, self.cencol]]
return _pairs
def generic_stat(
self,
statfxn,
use_bootstrap=True,
statname=None,
has_pvalue=False,
filterfxn=None,
**statopts
):
"""Generic function to estimate a statistic and its CIs.
Parameters
----------
statfxn : callable
A function that takes a 1-D sequnce and returns a scalar
results. Its call signature should be in the form:
``statfxn(seq, **kwargs)``.
use_bootstrap : bool, optional
Toggles using a BCA bootstrapping method to estimate the
95% confidence interval around the statistic.
statname : string, optional
Name of the statistic. Included as a column name in the
final dataframe.
has_pvalue : bool, optional
Set to ``True`` if ``statfxn`` returns a tuple of the
statistic and it's p-value.
**statopts : optional kwargs
Additional keyword arguments that will be passed to
``statfxn``.
Returns
-------
stat_df : pandas.DataFrame
A dataframe all the results of the ``statfxn`` when applied
to ``self.tidy.groupby(self.groupcols)``.
Examples
--------
This actually demonstrates how ``DataCollection.mean`` is
implemented.
>>> import numpy
>>> import wqio
>>> from wqio.tests import helpers
>>> df = helpers.make_dc_data_complex()
>>> dc = DataCollection(df, rescol='res', qualcol='qual',
... stationcol='loc', paramcol='param',
... ndval='<')
>>> means = dc.generic_stat(numpy.mean, statname='Arith. Mean')
You can also use ``lambda`` objects
>>> pctl35 = dc.generic_stat(lambda x: numpy.percentile(x, 35),
... statname='pctl35', use_bootstrap=False)
"""
if statname is None:
statname = "stat"
if filterfxn is None:
filterfxn = utils.non_filter
def fxn(x):
data = x[self.rescol].values
if use_bootstrap:
stat = statfxn(data)
lci, uci = bootstrap.BCA(data, statfxn=statfxn)
values = [lci, stat, uci]
statnames = ["lower", statname, "upper"]
else:
values = validate.at_least_empty_list(statfxn(data, **statopts))
if hasattr(values, "_fields"): # nametuple
statnames = values._fields
else: # tuple
statnames = [statname]
if has_pvalue:
statnames.append("pvalue")
return pandas.Series(values, index=statnames)
groups = (
self.tidy.groupby(by=self.groupcols)
.filter(filterfxn)
.groupby(by=self.groupcols)
)
if tqdm and self.showpbar:
tqdm.pandas(desc="Computing stats")
vals = groups.progress_apply(fxn)
else:
vals = groups.apply(fxn)
results = (
vals.unstack(level=self.stationcol)
.pipe(utils.swap_column_levels, 0, 1)
.rename_axis(["station", "result"], axis="columns")
)
return results
@cache_readonly
def count(self):
return (
self.generic_stat(
lambda x: x.shape[0], use_bootstrap=False, statname="Count"
)
.fillna(0)
.astype(int)
)
@cache_readonly
def inventory(self):
counts = (
self.tidy.groupby(by=self.groupcols + [self.cencol])
.size()
.unstack(level=self.cencol)
.fillna(0)
.astype(int)
.rename_axis(None, axis="columns")
.rename(columns={False: "Detect", True: "Non-Detect"})
.assign(Count=lambda df: df.sum(axis="columns"))
)
if "Non-Detect" not in counts.columns:
counts["Non-Detect"] = 0
return counts[["Count", "Non-Detect"]]
@cache_readonly
def median(self):
return self.generic_stat(numpy.median, statname="median")
@cache_readonly
def mean(self):
return self.generic_stat(numpy.mean, statname="mean")
@cache_readonly
def std_dev(self):
return self.generic_stat(numpy.std, statname="std. dev.", use_bootstrap=False)
def percentile(self, percentile):
"""Return the percentiles (0 - 100) for the data."""
return self.generic_stat(
lambda x: numpy.percentile(x, percentile),
statname="pctl {}".format(percentile),
use_bootstrap=False,
)
@cache_readonly
def logmean(self):
return self.generic_stat(
lambda x, axis=0: numpy.mean(numpy.log(x), axis=axis), statname="Log-mean"
)
@cache_readonly
def logstd_dev(self):
return self.generic_stat(
lambda x, axis=0: numpy.std(numpy.log(x), axis=axis),
use_bootstrap=False,
statname="Log-std. dev.",
)
@cache_readonly
def geomean(self):
geomean = numpy.exp(self.logmean)
geomean.columns.names = ["station", "Geo-mean"]
return geomean
@cache_readonly
def geostd_dev(self):
geostd = numpy.exp(self.logstd_dev)
geostd.columns.names = ["station", "Geo-std. dev."]
return geostd
@cache_readonly
def shapiro(self):
return self.generic_stat(
stats.shapiro,
use_bootstrap=False,
has_pvalue=True,
statname="shapiro",
filterfxn=lambda x: x.shape[0] > 3,
)
@cache_readonly
def shapiro_log(self):
return self.generic_stat(
lambda x: stats.shapiro(numpy.log(x)),
use_bootstrap=False,
has_pvalue=True,
filterfxn=lambda x: x.shape[0] > 3,
statname="log-shapiro",
)
@cache_readonly
def lilliefors(self):
return self.generic_stat(
sm.stats.lilliefors,
use_bootstrap=False,
has_pvalue=True,
statname="lilliefors",
)
@cache_readonly
def lilliefors_log(self):
return self.generic_stat(
lambda x: sm.stats.lilliefors(numpy.log(x)),
use_bootstrap=False,
has_pvalue=True,
statname="log-lilliefors",
)
@cache_readonly
def anderson_darling(self):
raise NotImplementedError
return self.generic_stat(
utils.anderson_darling,
use_bootstrap=False,
has_pvalue=True,
statname="anderson-darling",
)
@cache_readonly
def anderson_darling_log(self):
raise NotImplementedError
return self.generic_stat(
lambda x: utils.anderson_darling(numpy.log(x)),
use_bootstrap=False,
has_pvalue=True,
statname="log-anderson-darling",
)
def comparison_stat(self, statfxn, statname=None, paired=False, **statopts):
"""Generic function to apply comparative hypothesis tests to
the groups of the ``DataCollection``.
Parameters
----------
statfxn : callable
A function that takes a 1-D sequnce and returns a scalar
results. Its call signature should be in the form:
``statfxn(seq, **kwargs)``.
statname : string, optional
Name of the statistic. Included as a column name in the
final dataframe.
apired : bool, optional
Set to ``True`` if ``statfxn`` requires paired data.
**statopts : optional kwargs
Additional keyword arguments that will be passed to
``statfxn``.
Returns
-------
stat_df : pandas.DataFrame
A dataframe all the results of the ``statfxn`` when applied
to ``self.tidy.groupby(self.groupcols)`` or
``self.paired.groupby(self.groupcols)`` when necessary.
Examples
--------
This actually demonstrates how ``DataCollection.mann_whitney``
is implemented.
>>> from scipy import stats
>>> import wqio
>>> from wqio.tests import helpers
>>> df = helpers.make_dc_data_complex()
>>> dc = DataCollection(df, rescol='res', qualcol='qual',
... stationcol='loc', paramcol='param',
... ndval='<')
>>> mwht = dc.comparison_stat(stats.mannwhitneyu,
... statname='mann_whitney',
... alternative='two-sided')
"""
if paired:
data = self.paired
generator = utils.numutils._paired_stat_generator
rescol = self._raw_rescol
else:
data = self.tidy
generator = utils.numutils._comp_stat_generator
rescol = self.rescol
station_columns = [self.stationcol + "_1", self.stationcol + "_2"]
meta_columns = self.groupcols_comparison
index_cols = meta_columns + station_columns
results = generator(
data,
meta_columns,
self.stationcol,
rescol,
statfxn,
statname=statname,
**statopts
)
return pandas.DataFrame.from_records(results).set_index(index_cols)
@cache_readonly
def mann_whitney(self):
return self.comparison_stat(
partial(_dist_compare, stat_comp_func=stats.mannwhitneyu),
statname="mann_whitney",
)
@cache_readonly
def ranksums(self):
return self.comparison_stat(stats.ranksums, statname="rank_sums")
@cache_readonly
def t_test(self):
return self.comparison_stat(stats.ttest_ind, statname="t_test", equal_var=False)
@cache_readonly
def levene(self):
return self.comparison_stat(stats.levene, statname="levene", center="median")
@cache_readonly
def wilcoxon(self):
return self.comparison_stat(
partial(_dist_compare, stat_comp_func=stats.wilcoxon),
statname="wilcoxon",
paired=True,
)
@cache_readonly
def kendall(self):
return self.comparison_stat(
stats.kendalltau, statname="kendalltau", paired=True
)
@cache_readonly
def spearman(self):
return self.comparison_stat(
stats.spearmanr, statname="spearmanrho", paired=True
)
@cache_readonly
def theilslopes(self, logs=False):
raise NotImplementedError
@cache_readonly
def locations(self):
_locations = []
groups = (
self.data.groupby(by=self.groupcols)
.filter(self.filterfxn)
.groupby(by=self.groupcols)
)
cols = [self._raw_rescol, self.qualcol]
for names, data in groups:
loc_dict = dict(zip(self.groupcols, names))
loc = (
data.set_index(self.pairgroups)[cols]
.reset_index(level=self.stationcol, drop=True)
.pipe(
Location,
station_type=loc_dict[self.stationcol].lower(),
rescol=self._raw_rescol,
qualcol=self.qualcol,
ndval=self.ndval,
bsiter=self.bsiter,
useros=self.useros,
)
)
loc.definition = loc_dict
_locations.append(loc)
return _locations
def datasets(self, loc1, loc2):
""" Generate ``Dataset`` objects from the raw data of the
``DataColletion``.
Data are first grouped by ``self.groupcols`` and
``self.stationcol``. Data frame each group are then queried
for into separate ``Lcoations`` from ``loc1`` and ``loc2``.
The resulting ``Locations`` are used to create a ``Dataset``.
Parameters
----------
loc1, loc2 : string
Values found in the ``self.stationcol`` property that will
be used to distinguish the two ``Location`` objects for the
``Datasets``.
Yields
------
``Dataset`` objects
"""
groupcols = list(filter(lambda g: g != self.stationcol, self.groupcols))
for names, data in self.data.groupby(by=groupcols):
ds_dict = dict(zip(groupcols, names))
ds_dict[self.stationcol] = loc1
infl = self.selectLocations(squeeze=True, **ds_dict)
ds_dict[self.stationcol] = loc2
effl = self.selectLocations(squeeze=True, **ds_dict)
ds_dict.pop(self.stationcol)
dsname = "_".join(names).replace(", ", "")
if effl:
ds = Dataset(infl, effl, useros=self.useros, name=dsname)
ds.definition = ds_dict
yield ds
@staticmethod
def _filter_collection(collection, squeeze, **kwargs):
items = list(collection)
for key, value in kwargs.items():
if numpy.isscalar(value):
items = [r for r in filter(lambda x: x.definition[key] == value, items)]
else:
items = [r for r in filter(lambda x: x.definition[key] in value, items)]
if squeeze:
if len(items) == 1:
items = items[0]
elif len(items) == 0:
items = None
return items
def selectLocations(self, squeeze=False, **conditions):
""" Select ``Location`` objects meeting specified criteria
from the ``DataColletion``.
Parameters
----------
squeeze : bool, optional
When True and only one object is found, it returns the bare
object. Otherwise, a list is returned.
**conditions : optional parameters
The conditions to be applied to the definitions of the
``Locations`` to filter them out. If a scalar is provided
as the value, normal comparison (==) is used. If a sequence
is provided, the ``in`` operator is used.
Returns
-------
locations : list of ``wqio.Location`` objects
Example
-------
>>> from wqio.tests.helpers import make_dc_data_complex
>>> import wqio
>>> df = make_dc_data_complex()
>>> dc = wqio.DataCollection(df, rescol='res', qualcol='qual',
... stationcol='loc', paramcol='param',
... ndval='<', othergroups=None,
... pairgroups=['state', 'bmp'],
... useros=True, bsiter=10000)
>>> locs = dc.selectLocations(param=['A', 'B'], loc=['Inflow', 'Reference'])
>>> len(locs)
4
>>> locs[0].definition
{'loc': 'Inflow', 'param': 'A'}
"""
locations = self._filter_collection(
self.locations.copy(), squeeze=squeeze, **conditions
)
return locations
def selectDatasets(self, loc1, loc2, squeeze=False, **conditions):
""" Select ``Dataset`` objects meeting specified criteria
from the ``DataColletion``.
Parameters
----------
loc1, loc2 : string
Values found in the ``self.stationcol`` property that will
be used to distinguish the two ``Location`` objects for the
``Datasets``.
squeeze : bool, optional
When True and only one object is found, it returns the bare
object. Otherwise, a list is returned.
**conditions : optional parameters
The conditions to be applied to the definitions of the
``Locations`` to filter them out. If a scalar is provided
as the value, normal comparison (==) is used. If a sequence
is provided, the ``in`` operator is used.
Returns
-------
locations : list of ``wqio.Location`` objects
Example
-------
>>> from wqio.tests.helpers import make_dc_data_complex
>>> import wqio
>>> df = make_dc_data_complex()
>>> dc = wqio.DataCollection(df, rescol='res', qualcol='qual',
... stationcol='loc', paramcol='param',
... ndval='<', othergroups=None,
... pairgroups=['state', 'bmp'],
... useros=True, bsiter=10000)
>>> dsets = dc.selectDatasets('Inflow', 'Outflow', squeeze=False,
... param=['A', 'B'])
>>> len(dsets)
2
>>> dsets[0].definition
{'param': 'A'}
"""
datasets = self._filter_collection(
self.datasets(loc1, loc2), squeeze=squeeze, **conditions
)
return datasets
def n_unique(self, column):
return (
self.data.loc[:, self.groupcols + [column]]
.drop_duplicates()
.groupby(self.groupcols)
.size()
.unstack(level=self.stationcol)
.pipe(utils.add_column_level, column, "result")
.swaplevel(axis="columns")
.fillna(0)
.astype(int)
)
def stat_summary(self, percentiles=None, groupcols=None, useros=True):
""" A generic, high-level summary of the data collection.
Parameters
----------
groupcols : list of strings, optional
The columns by which ``self.tidy`` will be grouped when
computing the statistics.
useros : bool, optional
Toggles of the use of the ROS'd (``True``) or raw
(``False``) data.
Returns
-------
stat_df : pandas.DataFrame
"""
if useros:
col = self.roscol
else:
col = self.rescol
if groupcols is None:
groupcols = self.groupcols
else:
groupcols = validate.at_least_empty_list(groupcols)
ptiles = percentiles or [0.1, 0.25, 0.5, 0.75, 0.9]
summary = (
self.tidy.groupby(by=groupcols)
.apply(lambda g: g[col].describe(percentiles=ptiles).T)
.drop("count", axis="columns")
)
return self.inventory.join(summary).unstack(level=self.stationcol)
|
11499934
|
import sqlite3
class DBHandler:
def __init__(self, db_path, debug=False):
self._debug = debug
self._connect = sqlite3.connect(db_path)
self._cursor = self._connect.cursor()
def execute(self, sql):
if self._debug:
print("[Longan Debug]", end='\t')
print(sql)
self._cursor.execute(sql)
return self._cursor.fetchall()
def commit(self):
self._connect.commit()
def close(self):
self._cursor.close()
self._connect.close()
def desc(self):
return self._cursor.description
def affect(self):
return self._cursor.rowcount
def last_id(self):
return self._cursor.lastrowid
|
11499948
|
data_tidy = data.reset_index().melt(id_vars=["datetime"], var_name='station', value_name='no2')
data_tidy.head()
|
11499969
|
from metrics import calculate_metrics
import torch
from torch import nn
import sklearn
import sklearn.metrics
import numpy as np
from tqdm import tqdm
import wandb
import datetime
import pickle
from PIL import Image
import PIL
from collections import defaultdict
import mxnet as mx
from mxnet import ndarray as nd
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
class MetricMonitor:
def __init__(self, float_precision=5):
self.float_precision = float_precision
self.reset()
def reset(self):
self.metrics = defaultdict(lambda: {"val": 0, "count": 0, "avg": 0})
def update(self, metric_name, val):
metric = self.metrics[metric_name]
metric["val"] += val
metric["count"] += 1
metric["avg"] = metric["val"] / metric["count"]
def __str__(self):
return " | ".join(
[
"{metric_name}: {avg:.{float_precision}f}".format(
metric_name=metric_name, avg=metric["avg"], float_precision=self.float_precision
)
for (metric_name, metric) in self.metrics.items()
]
)
def train(net,trainloader,validationloader,n_epochs=10,lr=0.1):
MSE = torch.nn.MSELoss()
data_set = load_bin("faces_emore/lfw.bin", (112,112))
wandb.init(project='', entity='')
wandb.config.lr1 = 0.005
wandb.config.lr2 = 0.1
net.to("cuda:0")
net.train()
criterion = nn.CrossEntropyLoss()
param2 = list(net.module.model.parameters()) + list(net.module.fc.parameters()) + list(net.module.fc2.parameters())
optimizer2 = torch.optim.SGD(param2, lr=wandb.config.lr2,weight_decay=5e-4,momentum=0.9)
iteration = 0
best_score = 100
rate_decrease=1
patience = 1
for epoch in range(0,n_epochs):
metric_monitor = MetricMonitor()
stream = tqdm(trainloader)
for _, sample in enumerate(stream, 0):
net.train()
inputs = sample['image']
inputs_masked = sample['image_masked']
labels = sample['identity']
labels2 = sample['mask']
inputs,inputs_masked, labels,labels2 = inputs.to("cuda:0"),inputs_masked.to("cuda:0"), labels.to("cuda:0"),labels2.to("cuda:0")
optimizer2.zero_grad()
outputs,e1,e2,mask = net(inputs,label=labels)
loss = (criterion(outputs, labels)) + 0.1 * criterion(mask*0,labels2)
outputs,e1_,e2,mask = net(inputs_masked,label=labels)
loss += (criterion(outputs, labels)) + 0.1 * criterion(mask,labels2)
loss /= 2
loss += MSE(e1,e1_)/3
loss.backward()
optimizer2.step()
metric_monitor.update("Loss P", loss.item())
wandb.log({"Loss P":loss.item()})
iteration +=1
stream.set_description("Epoch: {epoch}. Train. {metric_monitor}".format(epoch=epoch, metric_monitor=metric_monitor))
fmr100 = validate(net,data_set,str(epoch))
if fmr100 < best_score:
best_score = fmr100
torch.save(net.module.state_dict(), "uai_batch" + str(epoch+1) +".mdl")
print("SAVED THE MODEL")
patience = 1
else:
if patience == 0:
patience = 1
rate_decrease /= 10
optimizer2 = torch.optim.SGD(param2, lr=wandb.config.lr2 * rate_decrease,weight_decay=5e-4,momentum=0.9)
print("New Learning Rate")
print(wandb.config.lr2 * rate_decrease)
else: patience -= 1
print('Finished Training')
def validate(net,data_set,epoch):
net.eval()
with torch.no_grad():
metrics = test(data_set, net, 128,epoch)
print("FMR100 = " + str(metrics[1]*100))
wandb.log({"FMR100":metrics[1]*100})
print("AUC = " + str(metrics[5]))
wandb.log({"AUC":metrics[5]})
wandb.log({"GMean":metrics[3]})
wandb.log({"IMean":metrics[4]})
return metrics[1]
masked_labels = []
@torch.no_grad()
def load_bin(path, image_size):
try:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f) # py2
except UnicodeDecodeError as e:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f, encoding='bytes') # py3
#print(len(issame_list))
data_list = []
for flip in [0, 1]:
data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
data_list.append(data)
for idx in range(len(issame_list) * 2):
#pdb.set_trace()
#im = Image.fromarray(img.asnumpy())
#im.save("new_dataset/"+str(idx)+".jpg")
if idx % 2 == 0:
try:
im = Image.open("new_dataset_masked2/"+str(idx)+".jpg")
R, G, B = im.split()
im = PIL.Image.merge("RGB", (B, G, R))
img = mx.nd.array(np.array(im))
masked_labels.append(1)
except:
im = Image.open("new_dataset/"+str(idx)+".jpg")
R, G, B = im.split()
im = PIL.Image.merge("RGB", (B, G, R))
img = mx.nd.array(np.array(im))
masked_labels.append(0)
else:
#_bin = bins[idx]
#img = mx.image.imdecode(_bin)
im = Image.open("new_dataset/"+str(idx)+".jpg")
R, G, B = im.split()
im = PIL.Image.merge("RGB", (B, G, R))
img = mx.nd.array(np.array(im))
masked_labels.append(0)
#if img.shape[1] != image_size[0]:
# img = mx.image.resize_short(img, image_size[0])
img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0, 1]:
if flip == 1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
if idx % 1000 == 0:
print('loading bin', idx)
print(data_list[0].shape)
return data_list, issame_list
@torch.no_grad()
def test(data_set, backbone, batch_size,epoch):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
masked = []
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
print(i)
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = data[bb - batch_size: bb]
time0 = datetime.datetime.now()
img = ((_data / 255) - 0.5) / 0.5
img = img.to(device)
_,net_out,_,y2 = backbone(img,inference = True)
masked.append((i,y2.detach().cpu().numpy()))
del img
_embeddings = net_out.detach().cpu().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
if i % 1 == 0:
print('loading bin', i)
print(time_consumed)
masked2 = []
i = 0
with open("mask_prediction.txt","w") as w:
for mask in masked:
label = mask[0]
for mask2 in mask[1]:
mask2=mask2.item()
w.write(str(label) + "," + str(masked_labels[i]) + "," + str(mask2) + "\n")
i+=1
_xnorm = 0.0
_xnorm_cnt = 0
print("Normalizing")
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
positives = []
negatives = []
print(len(issame_list))
for embedding1, embedding2,label in zip(embeddings1,embeddings2,issame_list):
dist = 1- torch.cdist(torch.from_numpy(embedding1).view(1, -1), torch.from_numpy(embedding2).view(1, -1))/2
if label == 1:
positives.append(dist)
else:
negatives.append(dist)
return calculate_metrics(positives,negatives,epoch)
|
11499973
|
import pytest
print(pytest.main(["-qqs", "pytests/test_factory.py"]))
print(pytest.main(["-qqs", "pytests/test_db.py"]))
print(pytest.main(["-qqs", "pytests/test_auth.py"]))
print(pytest.main(["-qqs", "pytests/test_blog.py"]))
print(pytest.main(["-qqs", "pytests/test_reply.py"]))
print(pytest.main(["-qqs", "pytests/test_reply_del.py"]))
print(pytest.main(["-qqs", "pytests/test_anonym.py"]))
print(pytest.main(["-qqs", "pytests/test_cli_del_post.py"]))
print(pytest.main(["-qqs", "pytests/test_cli_del_user.py"]))
|
11500003
|
import enum
import multiprocessing as mp
from typing import Dict
import numpy as np
import torch
from cid.rollouts.base import BaseRolloutGenerator
class ParallelRolloutGenerator(BaseRolloutGenerator):
"""Generator producing multiple environment rollouts in parallel
Parallel rollouts are generated in worker processes, where each process has
its own copy of the environment.
"""
def __init__(self, n_parallel_rollouts, env_fn, agent_fn,
rollout_generator_fn, seed, gin_conf=None):
assert n_parallel_rollouts > 0
self._processes = []
self._pipes = []
ctx = mp.get_context('spawn')
for idx in range(n_parallel_rollouts):
worker_seed = seed + idx
process, pipe = self._start_worker_process(ctx,
idx,
env_fn,
agent_fn,
rollout_generator_fn,
worker_seed,
gin_conf)
self._processes.append(process)
self._pipes.append(pipe)
@staticmethod
def _start_worker_process(ctx, worker_id, env_fn, agent_fn,
rollout_generator_fn, worker_seed,
gin_conf=None):
parent_pipe, worker_pipe = ctx.Pipe(duplex=True)
args = (worker_id,
worker_pipe,
parent_pipe,
CloudpickleWrapper(env_fn),
CloudpickleWrapper(agent_fn),
CloudpickleWrapper(rollout_generator_fn),
worker_seed,
gin_conf)
name = f'ParallelRolloutGenerator-Worker-{worker_id}'
process = ctx.Process(target=worker_main,
name=name,
args=args)
# If the main process crashes, we should not cause things to hang
process.daemon = True
process.start()
worker_pipe.close()
return process, parent_pipe
def rollout(self, agent, n_steps, evaluate=False, render=False):
for pipe in self._pipes:
pipe.send((Commands.SYNC_PARAMS, agent.get_state()))
for pipe in self._pipes:
pipe.recv()
for idx, pipe in enumerate(self._pipes):
# Render only in first worker process
render = render if idx == 0 else False
pipe.send((Commands.ROLLOUT, (n_steps, evaluate, render)))
# Receive tuple of (episode, stats) from each worker
results = [pipe.recv() for pipe in self._pipes]
episodes = {key: np.concatenate([res[0][key] for res in results])
for key in results[0][0]}
stats = {key: [res[1][key] for res in results]
for key in results[0][1]}
return episodes, stats
def reset(self):
for i, pipe in enumerate(self._pipes):
pipe.send((Commands.RESET, None))
obs = [pipe.recv() for pipe in self._pipes]
return np.array(obs)
def close(self):
for pipe in self._pipes:
pipe.send((Commands.CLOSE, None))
for pipe in self._pipes:
pipe.recv()
pipe.close()
for process in self._processes:
process.join()
@property
def example_transition(self) -> Dict[str, np.ndarray]:
self._pipes[0].send((Commands.EXAMPLE_TRANSITION, None))
return self._pipes[0].recv()
@property
def transition_help(self) -> Dict[str, str]:
self._pipes[0].send((Commands.TRANSITION_HELP, None))
return self._pipes[0].recv()
@property
def observation_space(self) -> 'gym.Space':
self._pipes[0].send((Commands.OBSERVATION_SPACE, None))
return self._pipes[0].recv()
@property
def action_space(self) -> 'gym.Space':
self._pipes[0].send((Commands.ACTION_SPACE, None))
return self._pipes[0].recv()
def worker_main(worker_id, pipe, parent_pipe, env_fn, agent_fn,
rollout_generator_fn, seed, gin_conf):
"""Entry-point of worker processes"""
parent_pipe.close()
if gin_conf is not None:
# At the moment, gin configs are not propagated to new processes
# automatically, so we have to reload the config here. Somewhat
# inconvenient.
import gin
gin.parse_config(gin_conf)
# Each process needs to be seeded individually
np.random.seed(seed)
torch.manual_seed(seed)
env = env_fn()
env.seed(seed)
rollout_gen = rollout_generator_fn(env)
agent = agent_fn(rollout_gen.observation_space, rollout_gen.action_space)
try:
while True:
cmd, data = pipe.recv()
if cmd == Commands.SYNC_PARAMS:
agent.set_state(data)
pipe.send(None)
elif cmd == Commands.ROLLOUT:
pipe.send(rollout_gen.rollout(agent, *data))
elif cmd == Commands.RESET:
pipe.send(rollout_gen.reset())
elif cmd == Commands.EXAMPLE_TRANSITION:
pipe.send(rollout_gen.example_transition)
elif cmd == Commands.TRANSITION_HELP:
pipe.send(rollout_gen.transition_help)
elif cmd == Commands.OBSERVATION_SPACE:
pipe.send(rollout_gen.observation_space)
elif cmd == Commands.ACTION_SPACE:
pipe.send(rollout_gen.action_space)
elif cmd == Commands.CLOSE:
pipe.send(None)
break
else:
raise NotImplementedError(f'Unknown command {cmd}')
except KeyboardInterrupt:
print('ParallelRolloutWorker: got KeyboardInterrupt')
finally:
rollout_gen.close()
class Commands(enum.Enum):
"""Commands for communication between parent and worker processes"""
ROLLOUT = 1
SYNC_PARAMS = 2
RESET = 3
CLOSE = 4
EXAMPLE_TRANSITION = 5
TRANSITION_HELP = 6
OBSERVATION_SPACE = 7
ACTION_SPACE = 8
class CloudpickleWrapper:
"""Wrapper to cloudpickle functions
Adapted from Gym
"""
def __init__(self, fn):
self.fn = fn
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.fn)
def __setstate__(self, ob):
import pickle
self.fn = pickle.loads(ob)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
|
11500029
|
import logging
import json
import re
from pyenvisalink.dsc_envisalinkdefs import *
from pyenvisalink import AlarmState
_LOGGER = logging.getLogger(__name__)
loggingconfig = {'level': 'DEBUG',
'format': '%(asctime)s %(levelname)s <%(name)s %(module)s %(funcName)s> %(message)s',
'datefmt': '%a, %d %b %Y %H:%M:%S'}
logging.basicConfig(**loggingconfig)
evl_verboseTrouble = {
0 : 'Service is Required',
1 : 'AC Power Lost',
2 : 'Telephone Line Fault',
3 : 'Failure to communicate',
4 : 'Zone/Sensor Fault',
5 : 'Zone/Sensor Tamper',
6 : 'Zone/Sensor Low Battery',
7 : 'Loss of time'
}
alarmState = AlarmState.get_initial_alarm_state(64, 8)
def handle_keypad_update(code, data):
"""Handle general- non partition based info"""
if code == '849':
bits = "{0:016b}".format(int(data,16))
trouble_description = ""
ac_present = True
print(bits)
for i in range(0, 7):
if bits[15-i] == '1':
trouble_description += evl_verboseTrouble[i] + ', '
if i == 1:
ac_present = False
new_status = {'alpha':trouble_description.strip(', '), 'ac_present': ac_present}
else:
new_status = evl_ResponseTypes[code]['status']
for part in alarmState['partition']:
alarmState['partition'][part]['status'].update(new_status)
_LOGGER.debug(str.format("(All partitions) state has updated: {0}", json.dumps(new_status)))
_LOGGER.info('Alarm State before:')
print(alarmState['partition'])
handle_keypad_update('849','02')
_LOGGER.info('Alarm State after:')
print(alarmState['partition'])
|
11500030
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .rnn_cnn_listener import RNNCNNListener
class GRUCNNListener(RNNCNNListener):
def __init__(self,kwargs, obs_shape, vocab_size=100, max_sentence_length=10, agent_id='l0', logger=None):
"""
:param obs_shape: tuple defining the shape of the stimulus following `(nbr_distractors+1, nbr_stimulus, *stimulus_shape)`
where, by default, `nbr_distractors=1` and `nbr_stimulus=1` (static stimuli).
:param vocab_size: int defining the size of the vocabulary of the language.
:param max_sentence_length: int defining the maximal length of each sentence the speaker can utter.
:param agent_id: str defining the ID of the agent over the population.
:param logger: None or somee kind of logger able to accumulate statistics per agent.
"""
super(GRUCNNListener, self).__init__(
kwargs=kwargs,
obs_shape=obs_shape,
vocab_size=vocab_size,
max_sentence_length=max_sentence_length,
agent_id=agent_id,
logger=logger,
rnn_type='gru'
)
|
11500060
|
from pathlib import Path
import nltk
from nltk.stem import WordNetLemmatizer
from rake_nltk import Rake
from .common import load_data, run, GetWords
def main():
run(GetRakeWords(), 'rake')
class GetRakeWords(GetWords):
def __init__(self):
super(GetRakeWords, self).__init__()
self.rake = Rake()
def __call__(self, sent):
self.rake.extract_keywords_from_text(sent)
keywords = self.rake.get_ranked_phrases_with_scores()
res = []
for v, k in keywords:
ks = super().__call__(k)
v = v // len(ks) if len(ks) > 0 else 0
for w in ks:
res.append((w, v))
return list(res)
if __name__ == '__main__':
main()
|
11500071
|
import random
import numpy as np
import torch
def set_all_random_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
|
11500080
|
from rest_framework.exceptions import APIException
class BaseException(APIException):
pass
class FileException(BaseException):
pass
|
11500137
|
import unittest
import tableauserverclient as TSC
class WorkbookModelTests(unittest.TestCase):
def test_invalid_project_id(self):
self.assertRaises(ValueError, TSC.WorkbookItem, None)
workbook = TSC.WorkbookItem("10")
with self.assertRaises(ValueError):
workbook.project_id = None
def test_invalid_show_tabs(self):
workbook = TSC.WorkbookItem("10")
with self.assertRaises(ValueError):
workbook.show_tabs = "Hello"
with self.assertRaises(ValueError):
workbook.show_tabs = None
|
11500152
|
from util.ffi import cimport, Struct
from ctypes.wintypes import DWORD, WCHAR
from ctypes import windll, byref, create_unicode_buffer, create_string_buffer
from ctypes import c_ushort, sizeof
from gui.native.win.winutil import WinStruct
from gui.textutil import default_font
from cgui import PyGetFontUnicodeRanges
# constants used in AddFontResourceEx function
FR_PRIVATE = 0x10
FR_NOT_ENUM = 0x20
def loadfont(fontpath, private = True, enumerable = False):
'''
Makes fonts located in file "fontpath" available to the font system.
private if True, other processes cannot see this font, and this font
will be unloaded when the process dies
enumerable if True, this font will appear when enumerating fonts
see http://msdn2.microsoft.com/en-us/library/ms533937.aspx
'''
if isinstance(fontpath, str):
pathbuf = create_string_buffer(fontpath)
AddFontResourceEx = windll.gdi32.AddFontResourceExA
elif isinstance(fontpath, unicode):
pathbuf = create_unicode_buffer(fontpath)
AddFontResourceEx = windll.gdi32.AddFontResourceExW
else:
raise TypeError('fontpath must be a str or unicode')
flags = (FR_PRIVATE if private else 0) | (FR_NOT_ENUM if not enumerable else 0)
numFontsAdded = AddFontResourceEx(byref(pathbuf), flags, 0)
return bool(numFontsAdded)
def unloadfont(fontpath, private = True, enumerable = False):
'''
Unloads the fonts in the specified file.
see http://msdn2.microsoft.com/en-us/library/ms533925.aspx
'''
if isinstance(fontpath, str):
pathbuf = create_string_buffer(fontpath)
RemoveFontResourceEx = windll.gdi32.RemoveFontResourceExA
elif isinstance(fontpath, unicode):
pathbuf = create_unicode_buffer(fontpath)
RemoveFontResourceEx = windll.gdi32.RemoveFontResourceExW
else:
raise TypeError('fontpath must be a str or unicode')
flags = (FR_PRIVATE if private else 0) | (FR_NOT_ENUM if not enumerable else 0)
return bool(RemoveFontResourceEx(byref(pathbuf), flags, 0))
_fontranges = {}
# TODO: bloom filters?
def MemoizedFontRanges(font):
key = hash(font.NativeFontInfoDesc)
if key in _fontranges:
return _fontranges[key]
else:
return _fontranges.setdefault(key, PyGetFontUnicodeRanges(font))
def font_has_char(font, char):
ranges = MemoizedFontRanges(font)
char = ord(unicode(char))
for start, len in ranges:
end = start + len
if start <= char < end:
return True
return False
def main():
import wx
dc = wx.MemoryDC()
dc.SetFont(default_font())
size = GetFontUnicodeRanges(dc.GetHDC(), 0)
if not size: raise Exception(GFURerror)
numRanges = (size - sizeof(DWORD) * 4) / sizeof(WCRANGE)
class GLYPHSET(WinStruct):
_fields_ = [('cbThis', DWORD),
('flAccel', DWORD),
('cGlyphsSupported', DWORD),
('cRanges', DWORD),
('ranges', WCRANGE * numRanges),
]
g = GLYPHSET(cbThis = size, ranges = [WCRANGE() for x in xrange(numRanges)])
if not GetFontUnicodeRanges(dc, glyphset.ptr):
raise Exception(GFURerror)
print g
GFURerror = 'GetFontUnicodeRanges failed, see http://msdn2.microsoft.com/en-us/library/ms533944(VS.85).aspx'
if __name__ == '__main__':
import wx
a = wx.PySimpleApp()
main()
|
11500202
|
import asyncio
import contextlib
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from http.cookies import Morsel # noqa
from pathlib import Path
from types import SimpleNamespace
from typing import (
Any,
Awaitable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
)
import aiohttp
import click
from rich.console import Console, PagerContext, RenderableType
from rich.pager import Pager
from rich.spinner import Spinner
from rich.status import Status as RichStatus
from rich.style import StyleType
from rich.text import Text as RichText
from neuro_sdk import Client, ConfigError, Factory, gen_trace_id
from neuro_sdk.config import _ConfigData, load_user_config
from .asyncio_utils import Runner
log = logging.getLogger(__name__)
TEXT_TYPE = ("application/json", "text", "application/x-www-form-urlencoded")
HEADER_TOKEN_PATTERN = re.compile(
r"(Bearer|Basic|Digest|Mutual)\s+(?P<token>[^ ]+\.[^ ]+\.[^ ]+)"
)
_T = TypeVar("_T")
class MaybePager(Pager):
"""Uses the pager installed on the system."""
def __init__(self, console: Console) -> None:
self._console = console
self._limit = console.size[1] * 2 / 3
def show(self, content: str) -> None:
"""Use the same pager used by pydoc."""
if self._console.is_terminal and len(content.splitlines()) > self._limit:
# Enforce ANSI sequence handling (colors etc.)
os.environ["LESS"] = "-R"
click.echo_via_pager(content)
else:
print(content, end="")
class Status(RichStatus):
# Patched version of library class, avoid spinner animation
# reset on updates that do not change spinner style
def update(
self,
status: Optional[RenderableType] = None,
*,
spinner: Optional[str] = None,
spinner_style: Optional[StyleType] = None,
speed: Optional[float] = None,
) -> None:
if status is not None:
self.status = status
if spinner is not None:
self.spinner = spinner
if spinner_style is not None:
self.spinner_style = spinner_style
if speed is not None:
self.speed = speed
if spinner is not None or spinner_style is not None or speed is not None:
self._spinner = Spinner(
self.spinner, style=self.spinner_style, speed=self.speed
)
self._live.update(self.renderable, refresh=True)
@dataclass
class Root:
color: bool
tty: bool
disable_pypi_version_check: bool
network_timeout: float
config_path: Path
trace: bool
force_trace_all: bool
verbosity: int
trace_hide_token: bool
command_path: str
command_params: List[Dict[str, Optional[str]]]
skip_gmp_stats: bool
show_traceback: bool
iso_datetime_format: bool
_client: Optional[Client] = None
_factory: Optional[Factory] = None
_runner: Runner = field(init=False)
console: Console = field(init=False)
def __post_init__(self) -> None:
self._runner = Runner(debug=self.verbosity >= 2)
self._runner.__enter__()
self.console = Console(
color_system="auto" if self.color else None,
force_terminal=self.tty,
markup=False,
emoji=False,
highlight=False,
log_path=False,
)
if not self.console.is_terminal or self.console.is_dumb_terminal:
# resize with wider width to prevent wrapping/cropping
self.console = Console(
color_system="auto" if self.color else None,
force_terminal=self.tty,
highlight=False,
log_path=False,
width=2048,
)
self.err_console = Console(
file=sys.stderr,
color_system="auto" if self.color else None,
force_terminal=self.tty,
markup=False,
emoji=False,
highlight=False,
log_path=False,
)
if not self.err_console.is_terminal or self.err_console.is_dumb_terminal:
# resize with wider width to prevent wrapping/cropping
self.err_console = Console(
file=sys.stderr,
color_system="auto" if self.color else None,
force_terminal=self.tty,
markup=False,
emoji=False,
highlight=False,
log_path=False,
width=2048,
)
def close(self) -> None:
if self._client is not None:
self.run(self._client.close())
try:
# Suppress prints unhandled exceptions
# on event loop closing
sys.stderr = None # type: ignore
self._runner.__exit__(*sys.exc_info())
finally:
sys.stderr = sys.__stderr__
self.soft_reset_tty()
def run(self, main: Awaitable[_T]) -> _T:
return self._runner.run(main)
@property
def _config(self) -> _ConfigData:
assert self._client is not None
return self._client.config._config_data
@property
def quiet(self) -> bool:
return self.verbosity < 0
@property
def terminal_size(self) -> Tuple[int, int]:
return self.console.size
@property
def timeout(self) -> aiohttp.ClientTimeout:
return aiohttp.ClientTimeout(
None, None, self.network_timeout, self.network_timeout
)
@property
def client(self) -> Client:
assert self._client is not None
return self._client
@property
def factory(self) -> Factory:
if self._factory is None:
self._factory = Factory(
path=self.config_path,
trace_configs=[self._create_trace_config()],
trace_id=gen_trace_id(),
trace_sampled=True if self.force_trace_all else None,
)
return self._factory
async def init_client(self) -> Client:
if self._client is not None:
return self._client
client = await self.factory.get(timeout=self.timeout)
self._client = client
return self._client
async def get_user_config(self) -> Mapping[str, Any]:
try:
client = await self.init_client()
except ConfigError:
return load_user_config(self.config_path.expanduser())
else:
return await client.config.get_user_config()
def _create_trace_config(self) -> aiohttp.TraceConfig:
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(self._on_request_start)
trace_config.on_request_chunk_sent.append(self._on_request_chunk_sent)
trace_config.on_request_end.append(self._on_request_end)
trace_config.on_response_chunk_received.append(self._on_response_chunk_received)
return trace_config
def _print_trace(self, lines: List[str]) -> None:
for line in lines:
if self.trace:
self.print(line, style="dim", err=True)
log.debug(line)
def _process_chunk(self, chunk: bytes, printable: bool) -> List[str]:
if not chunk:
return []
if printable:
return chunk.decode(errors="replace").split("\n")
else:
return [f"[binary {len(chunk)} bytes]"]
async def _on_request_start(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
data: aiohttp.TraceRequestStartParams,
) -> None:
path = data.url.raw_path
if data.url.raw_query_string:
path += "?" + data.url.raw_query_string
lines = [f"> {data.method} {path} HTTP/1.1"]
for key, val in data.headers.items():
if self.trace_hide_token:
val = self._sanitize_header_value(val)
lines.append(f"> {key}: {val}")
lines.append("> ")
self._print_trace(lines)
content_type = data.headers.get("Content-Type", "")
context.request_printable = content_type.startswith(TEXT_TYPE)
async def _on_request_chunk_sent(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
data: aiohttp.TraceRequestChunkSentParams,
) -> None:
chunk = data.chunk
lines = [
"> " + line
for line in self._process_chunk(chunk, context.request_printable)
]
self._print_trace(lines)
async def _on_request_end(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
data: aiohttp.TraceRequestEndParams,
) -> None:
lines = [f"< HTTP/1.1 {data.response.status} {data.response.reason}"]
for key, val in data.response.headers.items():
lines.append(f"< {key}: {val}")
self._print_trace(lines)
content_type = data.response.headers.get("Content-Type", "")
context.response_printable = content_type.startswith(TEXT_TYPE)
async def _on_response_chunk_received(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
data: aiohttp.TraceResponseChunkReceivedParams,
) -> None:
chunk = data.chunk
lines = [
"< " + line
for line in self._process_chunk(chunk, context.response_printable)
]
self._print_trace(lines)
def _sanitize_header_value(self, text: str) -> str:
for token in self._find_all_tokens(text):
token_safe = self._sanitize_token(token)
text = text.replace(token, token_safe)
return text
def _sanitize_token(self, token: str) -> str:
tail_len: int = 5
# at least a third part of the token should be hidden
if tail_len >= len(token) // 3:
return f"<hidden {len(token)} chars>"
hidden = f"<hidden {len(token) - tail_len * 2} chars>"
return token[:tail_len] + hidden + token[-tail_len:]
def _find_all_tokens(self, text: str) -> Iterator[str]:
for match in HEADER_TOKEN_PATTERN.finditer(text):
yield match.group("token")
async def cancel_with_logging(self, task: "asyncio.Task[Any]") -> None:
if not task.done():
task.cancel()
try:
with contextlib.suppress(asyncio.CancelledError):
await task
except Exception as exc:
if self.show_traceback:
log.exception(str(exc), stack_info=True)
else:
log.error(str(exc))
def soft_reset_tty(self) -> None:
if self.tty:
# Soft reset the terminal.
# For example, Midnight Commander often leaves
# scrolling margins (DECSTBM) aligned only
# to a part of the screen size
sys.stdout.write("\x1b[!p")
sys.stdout.flush()
@contextlib.contextmanager
def status(self, message: str) -> Iterator[Status]:
status = Status(
RichText.from_markup(message), console=self.console, spinner="dots"
)
if self.verbosity >= 0:
with status:
yield status
else:
yield status
def pager(self) -> PagerContext:
return self.console.pager(MaybePager(self.console), styles=True, links=True)
def print(self, *objects: Any, err: bool = False, **kwargs: Any) -> None:
console = self.err_console if err else self.console
console.print(*objects, **kwargs)
|
11500203
|
def main():
year = int(raw_input("Enter the year: "))
if year % 4 == 0 and year % 100 != 0:
print "Its a leap year"
elif year % 4 == 0 and year % 400 == 0:
print "Its a leap year"
else:
print "Its not a leap year"
main()
|
11500228
|
import os
import time
import numpy as np
import nibabel as nb
# import nilearn.image as image
# import nipype.interfaces.utility as util
# import nipype.pipeline.engine as pe
# import scipy as sp
# from nilearn import datasets
# from nilearn.image import resample_img
# from nilearn.image.image import mean_img
# from nilearn.input_data import NiftiMasker
# from nilearn.plotting import plot_roi, show
# from sklearn import cluster, datasets
# from sklearn.neighbors import kneighbors_graph
# from sklearn.preprocessing import StandardScaler
def test_timeseries_bootstrap():
"""
Tests the timeseries_bootstrap method of BASC workflow
"""
from PyBASC.utils import timeseries_bootstrap
np.random.seed(27)
random_state = np.random.RandomState(seed=27)
x = np.arange(50).reshape((5, 10)).T
actual, other = timeseries_bootstrap(x, 3, random_state=random_state)
desired = np.array([
[4, 14, 24, 34, 44],
[5, 15, 25, 35, 45],
[6, 16, 26, 36, 46],
[8, 18, 28, 38, 48],
[9, 19, 29, 39, 49],
[0, 10, 20, 30, 40],
[7, 17, 27, 37, 47],
[8, 18, 28, 38, 48],
[9, 19, 29, 39, 49],
[8, 18, 28, 38, 48],
])
np.testing.assert_equal(actual, desired)
def test_standard_bootstrap():
"""
Tests the standard_bootstrap method of BASC workflow
"""
from PyBASC.utils import standard_bootstrap
np.random.seed(27)
random_state = np.random.RandomState(seed=27)
x = np.arange(50).reshape((5,10)).T
actual = standard_bootstrap(x, random_state=random_state)
desired = np.array([
[3, 13, 23, 33, 43],
[8, 18, 28, 38, 48],
[8, 18, 28, 38, 48],
[8, 18, 28, 38, 48],
[0, 10, 20, 30, 40],
[5, 15, 25, 35, 45],
[8, 18, 28, 38, 48],
[1, 11, 21, 31, 41],
[2, 12, 22, 32, 42],
[1, 11, 21, 31, 41],
])
np.testing.assert_equal(actual, desired)
# def test_adjacency_matrix():
# """
# Tests the adjacency_matrix of BASC workflow
# """
# x = np.asarray([1, 2, 2, 3, 1])[:,np.newaxis]
# actual = adjacency_matrix(x).astype(int)
# desired = np.array([[1, 0, 0, 0, 1],
# [0, 1, 1, 0, 0],
# [0, 1, 1, 0, 0],
# [0, 0, 0, 1, 0],
# [1, 0, 0, 0, 1]])
# np.testing.assert_equal(actual, desired)
# def generate_blobs():
# np.random.seed(27)
# offset = np.random.randn(30)
# x1 = np.random.randn(200,30) + 2*offset
# x2 = np.random.randn(100,30) + 44*np.random.randn(30)
# x3 = np.random.randn(400,30)
# blobs = np.vstack((x1,x2,x3))
# return blobs
# def generate_simple_blobs(x):
# np.random.seed(x)
# offset = np.random.randn(30)
# x1 = np.random.randn(200,30) + 2*offset
# x2 = np.random.randn(100,30) + 44*np.random.randn(30)+ 2*offset
# blobs = np.vstack((x1,x2))
# return blobs
# def generate_blobs_3d():
# np.random.seed(27)
# x1 = np.random.randn(200,3) + np.array([1.4, 1.8, 22.2])
# x2 = np.random.randn(100,3) + np.array([4.7, 4.0, 9.6])
# x3 = np.random.randn(400,3) + np.array([100.7, 100.0, 100.8])
# blobs = np.vstack((x1,x2,x3))
# return blobs
# def test_cluster_timeseries():
# """
# Tests the cluster_timeseries method on three blobs in three dimensions (to make correlation possible)
# """
# roi_mask_nparray='empty'
# blobs = generate_blobs_3d()
# n_clusters=2
# y_predict = cluster_timeseries(blobs, roi_mask_nparray, n_clusters, similarity_metric = 'correlation', affinity_threshold=0.0, neighbors=10)
# def test_cross_cluster_timeseries():
# np.random.seed(30)
# offset = np.random.randn(30)
# x1 = np.random.randn(20,30) + 10*offset
# x2 = np.random.randn(10,30) + 44*np.random.randn(30)
# data1 = np.vstack((x1,x2))
# data2 = data1
# actual = cross_cluster_timeseries(data1, data2, n_clusters=2, similarity_metric='correlation', affinity_threshold=0.0)
# desired = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
# 1, 1, 1, 1, 1, 1, 1])
# np.testing.assert_equal(actual,desired)
# print('Correlation equals ', 1-sp.spatial.distance.correlation(actual,desired))
# def test_individual_stability_matrix():
# """
# Tests individual_stability_matrix method on three gaussian blobs.
# """
# import utils
# import numpy as np
# import scipy as sp
# desired = np.load(home + '/git_repo/PyBASC/tests/ism_test.npy')
# blobs = generate_blobs()
# ism = utils.individual_stability_matrix(blobs, 20, 3, similarity_metric='correlation')
# #how to use test here?
# # np.corrcoef(ism.flatten(),desired.flatten())
# # np.testing.assert_equal(ism,desired)
# #
# # corr=np.array(sp.spatial.distance.cdist(ism, desired, metric = 'correlation'))
# #
# assert False
# def test_cross_cluster_individual_stability_matrix():
# """
# Tests individual_stability_matrix method on three gaussian blobs.
# """
# blobs1 = generate_simple_blobs(27)
# blobs2 = generate_simple_blobs(27)
# blobs2 = blobs2[0:150,:]
# ism = individual_stability_matrix(blobs1, 10, 2, Y2 = blobs2, cross_cluster = True)
# return ism
# def test_expand_ism_options():
# import time
# import random
# import pandas as pd
# import numpy as np
# n=60
# k=55
# i=0
# vec1=[]
# for x in range(0, n):
# vec1.append(random.randint(0, k-1))
# temp=np.random.random((k,k))
# vec1=np.array(vec1)
# sizevec1=len(vec1)
# matshape=(sizevec1,sizevec1)
# target_mat=np.zeros(matshape)
# source_mat=temp*temp.T
# np.fill_diagonal(source_mat, 1)
# transform_mat=np.zeros((len(source_mat),len(target_mat)))
# #Slow Solution
# matrixtime = time.time()
# for row in range(0,target_mat.shape[0]):
# #print 'row is ', row
# for column in range(0,target_mat.shape[1]):
# #print 'column is', column
# if (row == column):
# target_mat[row,column]=1
# else:
# target_mat[row,column] = source_mat.item(int(vec1[row]), int(vec1[column]))
# print((time.time() - matrixtime))
# target_mat_slow=target_mat
# #XU MACKENZIE SOLUTION
# target_mat=np.zeros(matshape)
# matrixtime = time.time()
# for i in range(0,len(target_mat)):
# transform_mat[vec1[i],i]=1
# temp=np.dot(source_mat,transform_mat)
# target_mat=np.dot(temp.T,transform_mat)
# target_mat_XM=target_mat
# target_mat=np.zeros(matshape)
# XM_time= time.time() - matrixtime
# print((time.time() - matrixtime))
# #Older 'fast' solution
# matrixtime = time.time()
# for row in range(0,source_mat.shape[0]):
# #print('row is ', row)
# #for column in range(0, source_mat.shape[1]):
# for column in range(0, row):
# rowmatch = np.array([vec1==row])
# rowmatch = rowmatch*1
# colmatch = np.array([vec1==column])
# colmatch = colmatch*1
# match_matrix=rowmatch*colmatch.T
# target_mat=target_mat+(match_matrix*source_mat[row,column])
# print((time.time() - matrixtime))
# target_mat_fast=target_mat
# target_mat=np.zeros(matshape)
# target_mat_slow==target_mat_fast
# target_mat_fast==target_mat_XM
# target_mat_slow==target_mat_XM
# def test_data_compress_expand():
# import os
# import numpy as np
# import nibabel as nb
# import utils
# import pandas as pd
# import sklearn as sk
# #Setup
# subject_file = home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz'
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# roi2_mask_file= home + '/git_repo/PyBASC/masks/RC_Quarter_Res.nii.gz'
# n_bootstraps=100
# n_clusters=10
# output_size=20
# cross_cluster=True
# cbb_block_size=None
# affinity_threshold=0.5
# print( 'Calculating individual stability matrix of:', subject_file)
# data = nb.load(subject_file).get_data().astype('float32')
# print( 'Data Loaded')
# if (roi2_mask_file != None):
# print( 'Setting up NIS')
# roi_mask_file_nb = nb.load(roi_mask_file)
# roi2_mask_file_nb= nb.load(roi2_mask_file)
# roi_mask_nparray = nb.load(roi_mask_file).get_data().astype('float32').astype('bool')
# roi2_mask_nparray = nb.load(roi2_mask_file).get_data().astype('float32').astype('bool')
# roi1data = data[roi_mask_nparray]
# roi2data = data[roi2_mask_nparray]
# #add code that uploads the roi1data and roi2data, divides by the mean and standard deviation of the timeseries
# roi1data=sk.preprocessing.normalize(roi1data, norm='l2')
# roi2data=sk.preprocessing.normalize(roi2data, norm='l2')
# print( 'Compressing data')
# data_dict1 = utils.data_compression(roi1data.T, roi_mask_file_nb, roi_mask_nparray, output_size)
# Y1_compressed = data_dict1['data']
# Y1_compressed = Y1_compressed.T
# Y1_labels = pd.DataFrame(data_dict1['labels'])
# Y1_labels=np.array(Y1_labels)
# print( 'Y1 compressed')
# print( 'Compressing Y2')
# data_dict2 = utils.data_compression(roi2data.T, roi2_mask_file_nb, roi2_mask_nparray, output_size)
# Y2_compressed = data_dict2['data']
# Y2_compressed=Y2_compressed.T
# Y2_labels = pd.DataFrame(data_dict2['labels'])
# print( 'Y2 compressed')
# print('Going into ism')
# ism = utils.individual_stability_matrix(Y1_compressed, n_bootstraps, n_clusters, Y2_compressed, cross_cluster, cbb_block_size, affinity_threshold)
# #ism=ism/n_bootstraps # was already done in ism
# print('Expanding ism')
# voxel_num=roi1data.shape[0]
# voxel_ism = utils.expand_ism(ism, Y1_labels)
# #voxel_ism=voxel_ism*100 # was already done in ism
# voxel_ism=voxel_ism.astype("uint8")
# def test_nifti_individual_stability():
# subject_file = home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz'
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# roi2_mask_file= None#home + '/git_repo/PyBASC/masks/RC_Quarter_Res.nii.gz'
# n_bootstraps=100
# n_clusters=2
# output_size=20
# cross_cluster=False
# similarity_metric='correlation'
# cbb_block_size=None
# affinity_threshold=0.5
# nifti_individual_stability(subject_file, roi_mask_file, n_bootstraps, n_clusters, output_size, similarity_metric, cross_cluster, roi2_mask_file, cbb_block_size, affinity_threshold)
# def test_cluster_matrix_average():
# import utils
# import basc
# import matplotlib.pyplot as plt
# roi_mask_nparray='empty'
# blobs = generate_blobs()
# n_clusters=3
# similarity_metric='correlation'
# ism = utils.individual_stability_matrix(blobs, 100, n_clusters, similarity_metric)
# y_predict = utils.cluster_timeseries(blobs, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold=0.0, neighbors = 10)
# cluster_voxel_scores, K_mask = utils.cluster_matrix_average(ism, y_predict)
# plt.imshow(K_mask)
# #%% TEST BASC.PY
# #Remaining Tests to write:
# #Join_group_stability
# #cluster_selection
# #individual_group_clustered_maps
# #ndarray_to_vol
# def new_test_group_stability_matrix():
# """
# Tests group_stability_matrix method. This creates a dataset of blobs varying only by additive zero-mean gaussian
# noise and calculates the group stability matrix.
# """
# import utils
# import basc
# bootstrap=20
# blobs = generate_blobs()
# ism_dataset = np.zeros((5, blobs.shape[0], blobs.shape[0]))
# indiv_stability_list=[]
# for i in range(ism_dataset.shape[0]):
# ism_dataset[i] = utils.individual_stability_matrix(blobs + 0.2*np.random.randn(blobs.shape[0], blobs.shape[1]), bootstrap, 3, similarity_metric='correlation',affinity_threshold = 0.0)
# f = 'ism_dataset_%i.npy' % i
# indiv_stability_list.append(f)
# np.save(f, ism_dataset[i])
# #indiv_stability_list=ism_list
# n_bootstraps=10
# n_clusters=3
# G = basc.map_group_stability(indiv_stability_list, n_bootstraps, n_clusters)
# return G
# def test_group_stability_matrix():
# """
# Tests group_stability_matrix method. This creates a dataset of blobs varying only by additive zero-mean gaussian
# noise and calculates the group stability matrix.
# """
# #def map_group_stability(indiv_stability_list, n_clusters, bootstrap_list, stratification=None):
# blobs = generate_blobs()
# ism_dataset = np.zeros((5, blobs.shape[0], blobs.shape[0]))
# ism_list = []
# for i in range(ism_dataset.shape[0]):
# ism_dataset[i] = individual_stability_matrix(blobs + 0.2*np.random.randn(blobs.shape[0], blobs.shape[1]), 10, 3, affinity_threshold = 0.0)
# f = 'ism_dataset_%i.npy' % i
# ism_list.append(f)
# np.save(f, ism_dataset[i])
# G, cluster_G, cluster_voxel_scores = group_stability_matrix(ism_list, 10, 3, [0,1,1,1,0])
# return G, cluster_g, cluster_voxel_scores
# def test_individual_group_clustered_maps():
# # indiv_stability_list
# # clusters_G
# # roi_mask_file
# #
# # import utils
# # import basc
# #
# # bootstrap=20
# # blobs = generate_blobs()
# #
# # ism_dataset = np.zeros((5, blobs.shape[0], blobs.shape[0]))
# #
# # indiv_stability_list=[]
# #
# # for i in range(ism_dataset.shape[0]):
# # ism_dataset[i] = utils.individual_stability_matrix(blobs + 0.2*np.random.randn(blobs.shape[0], blobs.shape[1]), 10, 3, affinity_threshold = 0.0)
# # f = 'ism_dataset_%i.npy' % i
# # indiv_stability_list.append(f)
# # np.save(f, ism_dataset[i])
# #
# # G, cluster_G, cluster_voxel_scores = group_stability_matrix(ism_list, 10, 3, [0,1,1,1,0])
# import basc
# import utils
# subject_file_list= [home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub3/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz']
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# dataset_bootstraps=5
# timeseries_bootstraps=5
# n_clusters=3
# output_size=10
# similarity_metric = 'correlation'
# bootstrap_list=list(range(0,dataset_bootstraps))
# cross_cluster=True
# blocklength=1
# roi2_mask_file= home + '/git_repo/PyBASC/masks/RC_Quarter_Res.nii.gz'
# cbb_block_size=None
# affinity_threshold= 0.5 #* len(subject_file_list)
# out_dir= home + '/PyBASC_outputs/IGCM_HowsItWork3'
# run=True
# indiv_stability_list=[]
# for i in range(0,len(subject_file_list)):
# temp = basc.nifti_individual_stability(subject_file_list[i], roi_mask_file, timeseries_bootstraps, n_clusters, output_size,similarity_metric, cross_cluster, roi2_mask_file, blocklength, cbb_block_size, affinity_threshold)
# #def nifti_individual_stability(subject_file, roi_mask_file, n_bootstraps, n_clusters, output_size, similarity_metric, cross_cluster=False, roi2_mask_file=None, blocklength=1, cbb_block_size=None, affinity_threshold=0.5):
# #temp=temp/timeseries_bootstraps
# indiv_stability_list.append(temp)
# G_file=[]
# for i in range(0,dataset_bootstraps):
# temp2= map_group_stability(indiv_stability_list, n_clusters, bootstrap_list, roi_mask_file)
# G_file.append(temp2)
# G, clusters_G, ism_gsm_corr, gsm_file, clusters_G_file, ism_gsm_corr_file= basc.join_group_stability(indiv_stability_list, G_file, dataset_bootstraps, n_clusters, roi_mask_file)
# #k_mask,k_mask_file, icvs, cluster_voxel_scores,
# for i in range(0,len(subject_file_list)):
# icvs_file, cluster_voxel_scores_file, k_mask_file, ind_group_cluster_stability_file, individualized_group_clusters_img_file =basc.individual_group_clustered_maps(indiv_stability_list[i], clusters_G, roi_mask_file)
# return icvs_file, cluster_voxel_scores_file, k_mask_file, ind_group_cluster_stability_file, individualized_group_clusters_img_file # G, clusters_G, cluster_voxel_scores, ism_gsm_corr, gsm_file, clusters_G_file, , ism_gsm_corr_file
# #output_names=['icvs_file',
# # 'cluster_voxel_scores_file',
# # 'k_mask_file',
# # 'ind_group_cluster_stability_file',
# # 'individualized_group_clusters_img_file'],
# def test_save_igcm_nifti(cluster_voxel_scores_file):
# import basc
# import utils
# subject_file_list= [home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub3/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz']
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# dataset_bootstraps=5
# timeseries_bootstraps=5
# n_clusters=3
# output_size=10
# bootstrap_list=list(range(0,dataset_bootstraps))
# cross_cluster=True
# roi2_mask_file= home + '/git_repo/PyBASC/masks/RC_Quarter_Res.nii.gz'
# cbb_block_size=None
# affinity_threshold= 0.5 #* len(subject_file_list)
# out_dir= home + '/PyBASC_outputs/SaveIGCMDebug2'
# run=True
# indiv_stability_list=[]
# for i in range(0,len(subject_file_list)):
# temp = basc.nifti_individual_stability(subject_file_list[i], roi_mask_file, timeseries_bootstraps, n_clusters, output_size, cross_cluster, roi2_mask_file, cbb_block_size, affinity_threshold)
# #temp=temp/timeseries_bootstraps
# indiv_stability_list.append(temp)
# G_file=[]
# for i in range(0,dataset_bootstraps):
# temp2= map_group_stability(indiv_stability_list, n_clusters, bootstrap_list)
# G_file.append(temp2)
# G, clusters_G, ism_gsm_corr, gsm_file, clusters_G_file, ism_gsm_corr_file= basc.join_group_stability(indiv_stability_list, G_file, dataset_bootstraps, n_clusters)
# #k_mask,k_mask_file, icvs, cluster_voxel_scores,
# for i in range(0,len(subject_file_list)):
# icvs_file, cluster_voxel_scores_file, k_mask_file, ind_group_cluster_stability_file =basc.individual_group_clustered_maps(indiv_stability_list[i], clusters_G, roi_mask_file)
# basc.save_igcm_nifti(cluster_voxel_scores_file,clusters_G_file,roi_mask_file)
# #%% TEST BASC WORKFLOW
# def test_basc_workflow_runner():
# from basc_workflow_runner import run_basc_workflow
# #import utils
# subject_file_list= [home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub3/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz',
# home + '/git_repo/PyBASC/sample_data/sub2/Func_Quarter_Res.nii.gz']
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# dataset_bootstraps=20
# timeseries_bootstraps=20
# n_clusters=4
# output_size=10
# blocklength=1
# bootstrap_list=list(range(0,dataset_bootstraps))
# cross_cluster=True
# similarity_metric='correlation'
# roi2_mask_file= home + '/git_repo/PyBASC/masks/RC_Quarter_Res.nii.gz'
# affinity_threshold= [0.0] * len(subject_file_list)
# out_dir= home + '/PyBASC_outputs/Testing_spatialconstraint'
# run=True
# basc_test= run_basc_workflow(subject_file_list, roi_mask_file, dataset_bootstraps, timeseries_bootstraps, n_clusters, output_size, bootstrap_list, proc_mem, similarity_metric, cross_cluster=cross_cluster, roi2_mask_file=roi2_mask_file, blocklength=blocklength, affinity_threshold=affinity_threshold, out_dir=out_dir, run=run)
# #PyBASC_test=run_basc_workflow(subject_file_list, roi_mask_file, dataset_bootstraps, timeseries_bootstraps, n_clusters, output_size, bootstrap_list, proc_mem, similarity_metric, cross_cluster=cross_cluster, roi2_mask_file=roi2_mask_file, blocklength=blocklength, affinity_threshold=affinity_threshold, out_dir=out_dir, run=run)
# #%%
# def heavy_test_basc_workflow_runner():
# #%%
# from basc_workflow_runner import run_basc_workflow
# import utils
# subject_file_list= ['/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060280/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz',
# '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060384/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz']#,
# # '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060429/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060503/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060603/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060864/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz']
# proc_mem= [3,6] #first is number of proc , second total number of mem
# roi_mask_file=home + '/git_repo/PyBASC/masks/Yeo7_3mmMasks/BilateralStriatumThalamus_3mm.nii.gz'
# dataset_bootstraps=2
# timeseries_bootstraps=100
# n_clusters=8
# output_size=500
# bootstrap_list=list(range(0,dataset_bootstraps))
# cross_cluster=True
# blocklength=2
# similarity_metric='correlation'
# roi2_mask_file=home + '/git_repo/PyBASC/masks/Yeo7_3mmMasks/YeoTest2.nii.gz'
# affinity_threshold= [0.0] * len(subject_file_list)
# out_dir= home + '/PyBASC_outputs/NewWOrkerTest'
# run=True
# basc_test= run_basc_workflow(subject_file_list, roi_mask_file, dataset_bootstraps, timeseries_bootstraps, n_clusters, output_size, bootstrap_list, proc_mem, similarity_metric, cross_cluster=cross_cluster, roi2_mask_file=roi2_mask_file, blocklength=blocklength, affinity_threshold=affinity_threshold, out_dir=out_dir, run=run)
# #%%
# def test_compare_stability_matrices():
# import utils
# import basc
# bootstrap=20
# blobs = generate_blobs()
# n_bootstraps=10
# n_clusters=5
# subjects=20
# ism_dataset = np.zeros((subjects, blobs.shape[0], blobs.shape[0]))
# ism_list = []
# for i in range(ism_dataset.shape[0]):
# ism_dataset[i] = utils.individual_stability_matrix(blobs + 0.2*np.random.randn(blobs.shape[0], blobs.shape[1]), n_bootstraps, n_clusters, affinity_threshold = 0.0)
# f = 'ism_dataset_%i.npy' % i
# ism_list.append(f)
# np.save(f, ism_dataset[i])
# indiv_stability_list=ism_list
# G = basc.map_group_stability(ism_list, n_bootstraps, n_clusters)
# gsm=np.load(G)
# gsm=gsm.astype("float64")
# corr= []
# corr= np.zeros((subjects,1))
# for j in range(ism_dataset.shape[0]):
# ism=ism_dataset[j].astype("float64")
# corr[j] = utils.compare_stability_matrices(gsm,ism)
# meandist5 = corr.mean()
# vardist5 = corr.var()
# sumdist5 = corr.cumsum()
# #%%
# def NED_heavy_basc_workflow_test():
# #%%
# from basc_workflow_runner import run_basc_workflow
# import utils
# import time
# matrixtime = time.time()
# # subject_file_list= ['/data/rockland_sample/A00060603/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00060503/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00060429/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00060384/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00060280/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00059935/functional_mni/_scan_dsc_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00059875/functional_mni/_scan_dsc_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00059734/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz',
# # '/data/rockland_sample/A00059733/functional_mni/_scan_clg_2_rest_645/bandpassed_demeaned_filtered_antswarp.nii.gz']
# subject_file_list= ['/data/Projects/anikolai/rockland_downsampled/A00018030/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00027159/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00027167/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00027439/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00027443/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00030980/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00030981/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031216/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031219/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031410/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031411/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031578/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00031881/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00032008/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00032817/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00033231/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00033714/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00034073/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00034074/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00034350/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035291/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035292/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035364/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035377/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035869/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035940/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035941/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00035945/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00037125/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00037368/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00037458/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00037459/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00037483/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00038603/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00038706/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00039075/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00039159/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00039866/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040342/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040440/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040556/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040798/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040800/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00040815/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00041503/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043240/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043282/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043494/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043740/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043758/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00043788/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00044084/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00044171/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00050743/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00050847/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00051063/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00051603/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00051690/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00051691/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00051758/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052069/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052165/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052183/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052237/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052461/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052613/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00052614/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053203/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053320/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053390/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053490/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053744/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00053873/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00054206/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00055693/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00056703/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057405/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057480/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057725/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057862/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057863/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00057967/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058004/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058053/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058060/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058061/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058215/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058229/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058516/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058537/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058570/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058685/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00058951/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059109/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059325/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059427/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059733/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059734/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059865/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059875/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00059935/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060280/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060384/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060429/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060503/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060603/3mm_resampled.nii.gz',
# '/data/Projects/anikolai/rockland_downsampled/A00060846/3mm_resampled.nii.gz']
# roi_mask_file=home + '/git_repo/BASC/masks/BG_3mm.nii.gz'
# dataset_bootstraps=50
# timeseries_bootstraps=50
# n_clusters=3
# output_size=400
# cross_cluster=True
# bootstrap_list=list(range(0,dataset_bootstraps))
# proc_mem=[10,80]
# #roi2_mask_file=home + '/git_repo/BASC/masks/yeo_3mm.nii.gz'
# roi2_mask_file=home + '/git_repo/BASC/masks/yeo2_3mm.nii.gz'
# affinity_threshold= [0.5] * 107 #[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
# out_dir= '/data/Projects/anikolai/BASC_outputs/NKITest'
# run=True
# basc_test= run_basc_workflow(subject_file_list, roi_mask_file, dataset_bootstraps, timeseries_bootstraps, n_clusters, output_size, bootstrap_list, proc_mem, cross_cluster=cross_cluster, roi2_mask_file=roi2_mask_file, affinity_threshold=affinity_threshold, out_dir=out_dir, run=run)
# print((time.time() - matrixtime))
# #%% TESTS UNDER CONSTRUCTION
# # NDARRAY TO VOL
# def test_ndarray_to_vol():
# import basc
# import nibabel as nb
# subject_file = home + '/git_repo/PyBASC/sample_data/sub1/Func_Quarter_Res.nii.gz'
# subject_file = home + '/git_repo/PyBASC/sample_data/test.nii.gz'
# data = nb.load(subject_file).get_data().astype('float32')
# roi_mask_file= home + '/git_repo/PyBASC/masks/LC_Quarter_Res.nii.gz'
# print( 'Data Loaded')
# roi_mask_file_nb = nb.load(roi_mask_file)
# roi_mask_nparray = nb.load(roi_mask_file).get_data().astype('float32').astype('bool')
# roi1data = data[roi_mask_nparray]
# data_array=roi1data
# sample_file=subject_file
# filename=home + '/git_repo/PyBASC/sample_data/ndarray_to_vol_test.nii.gz'
# basc.ndarray_to_vol(data_array, roi_mask_file, roi_mask_file, filename)
# def test_co_clustering():
# import numpy as np
# import nibabel as nb
# from matplotlib import pyplot as plt
# import sklearn as sk
# from sklearn.datasets import make_biclusters
# from sklearn.datasets import samples_generator as sg
# from sklearn.cluster.bicluster import SpectralCoclustering
# from sklearn.metrics import consensus_score
# # REAL DATA
# subject_file= '/Users/aki.nikolaidis/Desktop/NKI_SampleData/A00060280/3mm_bandpassed_demeaned_filtered_antswarp.nii.gz'
# roi_mask_file=home + '/git_repo/basc/masks/BG_3mm.nii.gz'
# roi2_mask_file=home + '/git_repo/basc/masks/yeo2_3mm.nii.gz'
# data = nb.load(subject_file).get_data().astype('float32')
# print( 'Data Loaded')
# print( 'Setting up NIS')
# roi_mask_file_nb = nb.load(roi_mask_file)
# roi2_mask_file_nb= nb.load(roi2_mask_file)
# roi_mask_nparray = nb.load(roi_mask_file).get_data().astype('float32').astype('bool')
# roi2_mask_nparray = nb.load(roi2_mask_file).get_data().astype('float32').astype('bool')
# roi1data = data[roi_mask_nparray]
# roi2data = data[roi2_mask_nparray]
# #add code that uploads the roi1data and roi2data, divides by the mean and standard deviation of the timeseries
# roi1data=sk.preprocessing.normalize(roi1data, norm='l2')
# roi2data=sk.preprocessing.normalize(roi2data, norm='l2')
# dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(roi1data, roi2data, metric = 'correlation'))
# sim_btwn_data_1_2=1-dist_btwn_data_1_2
# sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0
# sim_btwn_data_1_2[sim_btwn_data_1_2<0]=0
# sim_btwn_data_1_2=sim_btwn_data_1_2+(np.random.rand(len(sim_btwn_data_1_2),len(sim_btwn_data_1_2[1,:])))/100
# sim_btwn_data_1_2[sim_btwn_data_1_2>1]=1
# sum(sum(sim_btwn_data_1_2==np.inf))
# sum(sum(sim_btwn_data_1_2==np.nan))
# model = SpectralCoclustering(n_clusters=5, random_state=0, n_init=100)
# model.fit(sim_btwn_data_1_2)
# fit_data = sim_btwn_data_1_2[np.argsort(model.row_labels_)]
# fit_data = fit_data[:, np.argsort(model.column_labels_)]
# plt.matshow(fit_data, cmap=plt.cm.Blues)
# plt.title("After biclustering; rearranged to show biclusters")
# plt.show()
# #SIMULATION DATA
# import numpy as np
# from matplotlib import pyplot as plt
# from sklearn.datasets import make_biclusters
# from sklearn.datasets import samples_generator as sg
# from sklearn.cluster.bicluster import SpectralCoclustering
# from sklearn.metrics import consensus_score
# #Creating Simulated Data
# data, rows, columns = make_biclusters(
# shape=(300, 100), n_clusters=5, noise=5,
# shuffle=False, random_state=0)
# plt.matshow(data, cmap=plt.cm.Blues)
# plt.title("Original dataset")
# data, row_idx, col_idx = sg._shuffle(data, random_state=0)
# plt.matshow(data, cmap=plt.cm.Blues)
# plt.title("Shuffled dataset")
# #Creating Model
# model = SpectralCoclustering(n_clusters=5, random_state=0)
# model.fit(data)
# score = consensus_score(model.biclusters_,
# (rows[:, row_idx], columns[:, col_idx]))
# print("consensus score: {:.3f}".format(score))
# fit_data = data[np.argsort(model.row_labels_)]
# fit_data = fit_data[:, np.argsort(model.column_labels_)]
# plt.matshow(fit_data, cmap=plt.cm.Blues)
# plt.title("After biclustering; rearranged to show biclusters")
# plt.show()
# ####################################################################
# ####################################################################
# from sklearn import cluster
# import scipy as sp
# import time
# from sklearn import cluster, datasets
# import numpy as np
# from matplotlib import pyplot as plt
# from sklearn.datasets import make_biclusters
# from sklearn.datasets import samples_generator as sg
# from sklearn.cluster.bicluster import SpectralCoclustering
# from sklearn.metrics import consensus_score
# data1 = generate_simple_blobs(27)
# data2 = generate_simple_blobs(27)
# data2 = data2[0:150,:]
# print("Calculating Cross-clustering")
# print("Calculating pairwise distances between areas")
# dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(roi1data, roi2data, metric = 'correlation'))
# sim_btwn_data_1_2=1-dist_btwn_data_1_2
# sim_btwn_data_1_2[sim_btwn_data_1_2<0]=0
# co_cluster=cluster.SpectralCoclustering()
# co_cluster.fit(sim_btwn_data_1_2)
# score = consensus_score(co_cluster.biclusters_,
# (rows[:, row_idx], columns[:, col_idx]))
# print("consensus score: {:.3f}".format(score))
# fit_data = data[np.argsort(co_cluster.row_labels_)]
# fit_data = fit_data[:, np.argsort(co_cluster.column_labels_)]
# plt.matshow(fit_data, cmap=plt.cm.Blues)
# plt.title("After biclustering; rearranged to show biclusters")
# plt.show()
|
11500253
|
from os.path import join, dirname
from setuptools import setup, find_packages
from version import get_version
setup(
name='git-lfs',
version=get_version(),
description='A lightweight Git Large File Storage fetcher',
author='Changaco',
author_email='<EMAIL>',
url='https://github.com/liberapay/git-lfs-fetch.py',
license='CC0',
packages=find_packages(exclude=['tests']),
long_description=open(join(dirname(__file__), 'README.rst')).read(),
long_description_content_type='text/x-rst',
keywords='git lfs',
)
|
11500263
|
import logging
import onnx
from onnx import numpy_helper
from onnx.helper import make_model, make_tensor_value_info, make_opsetid
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import __PRODUCER__
from furiosa_sdk_quantizer.frontend.onnx import __DOMAIN__, __OPSET_VERSION__
logger = logging.getLogger("Furiosa-Quantizer")
logging.basicConfig(level=logging.INFO)
def name_nodes(model):
for idx, node in enumerate(model.graph.node):
node.name = "%s_%d" % (node.op_type, idx)
return model
def eliminate_unused_initializer(model):
model = _eliminate_unused_quantization_annotation(model)
node_input_names = [node_input for node in model.graph.node for node_input in node.input]
qtensor_names = [
qtensor_name.value
for annot in model.graph.quantization_annotation
for qtensor_name in annot.quant_parameter_tensor_names
]
unused_initializer = list()
for init in model.graph.initializer:
# Even if an init is not an input of a node, do not remove it if defined in graph.quantization_annotation.
if init.name not in node_input_names and init.name not in qtensor_names:
unused_initializer.append(init)
for unused in unused_initializer:
model.graph.initializer.remove(unused)
return model
def eliminate_unused_input(model):
node_input_names = [node_input for node in model.graph.node for node_input in node.input]
unused_input = list()
for input in model.graph.input:
if input.name not in node_input_names:
unused_input.append(input)
for unused in unused_input:
model.graph.input.remove(unused)
return model
def eliminate_unused_output(model):
node_output_names = [node_output for node in model.graph.node for node_output in node.output]
unused_output = list()
for output in model.graph.output:
if output.name not in node_output_names:
unused_output.append(output)
for unused in unused_output:
model.graph.output.remove(unused)
return model
def eliminate_unused_value_info(model):
node_output_names = [node_output for node in model.graph.node for node_output in node.output]
graph_output_names = [vi.name for vi in model.graph.output]
unused_value_info = list()
for value_info in model.graph.value_info:
if value_info.name not in node_output_names:
unused_value_info.append(value_info)
if value_info.name in graph_output_names:
unused_value_info.append(value_info)
for unused in unused_value_info:
model.graph.value_info.remove(unused)
return model
def _eliminate_unused_quantization_annotation(model):
node_input_names = [node_input for node in model.graph.node for node_input in node.input]
node_output_names = [node_output for node in model.graph.node for node_output in node.output]
unused_quant_annot = list()
for quant_annot in model.graph.quantization_annotation:
if quant_annot.tensor_name not in set(node_input_names + node_output_names):
unused_quant_annot.append(quant_annot)
for unused in unused_quant_annot:
model.graph.quantization_annotation.remove(unused)
return model
def eliminate_unused_protos(model):
funcs = [
eliminate_unused_initializer,
eliminate_unused_input,
eliminate_unused_output,
eliminate_unused_value_info,
]
for func in funcs:
model = func(model)
return model
def include_initializer_to_graph_input(model):
input_value_names = [inp.name for inp in model.graph.input]
for init in model.graph.initializer:
if init.name not in input_value_names:
dims = numpy_helper.to_array(init).shape
value_info = make_tensor_value_info(init.name, init.data_type, dims)
model.graph.input.append(value_info)
# do not append duplicated initializer to graph input
input_value_names.append(init.name)
return model
def rebuild_model(model, new_nodes, eliminate=True, renaming=True):
# remove all nodes and re-make model.graph based on newly given nodes.
model.graph.ClearField("node")
model.graph.node.extend(new_nodes)
default_opset = make_opsetid(__DOMAIN__, __OPSET_VERSION__)
model = make_model(model.graph, opset_imports=[default_opset])
# eliminate all unused protos such as initializer, input, output, and value_info.
if eliminate:
model = eliminate_unused_protos(model)
# include initializer to graph input
model = include_initializer_to_graph_input(model)
# rename node.name
if renaming:
model = name_nodes(model)
model.producer_name = __PRODUCER__
return model
def fix_batch_size_as_one(model):
"""
fix batch_size = 1 if dim_param is given.
"""
for input in model.graph.input:
try:
batch_dim = input.type.tensor_type.shape.dim[0]
except IndexError:
continue
if batch_dim.dim_param:
logger.info(
"Dynamic batch size is detected at input_name: {}. "
"Fix batch_size=1 for valid shape inference.".format(input.name)
)
input.type.tensor_type.shape.dim[0].dim_value = 1
return model
def make_conv_bias_name_unique(model):
# Renames Conv operators' biases, if necessary, to make their names
# unique so that the biases can be associated with different
# quantization scale parameters.
initializer = {init.name: init for init in model.graph.initializer}
seen = set()
for node in model.graph.node:
if node.op_type != "Conv" or len(node.input) < 3:
continue
bias = node.input[2]
if bias not in seen:
seen.add(bias)
continue
tensor = onnx.TensorProto()
tensor.CopyFrom(initializer[bias])
# HACK: This attempts to give the bias tensor a new unique name.
# Although it is unlikely, there is a possibility that the new
# name is already occupied by a tensor in the model.
tensor.name = f"{bias}_{node.output[0]}"
node.input[2] = tensor.name
model.graph.initializer.append(tensor)
return model
|
11500301
|
from urllib.parse import urlparse
from researchhub.settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
def get_s3_url(bucket, key, with_credentials=False):
s3 = 's3://'
if with_credentials is True:
return (
f'{s3}{AWS_ACCESS_KEY_ID}:{AWS_SECRET_ACCESS_KEY}@{bucket}{key}'
)
return f'{s3}{bucket}{key}'
def http_to_s3(url, with_credentials=False):
parsed = urlparse(url)
bucket = parsed.netloc.split('.', maxsplit=1)[0]
key = parsed.path
return get_s3_url(bucket, key, with_credentials=with_credentials)
|
11500303
|
from elasticapm.conf.constants import TRANSACTION
from elasticapm.processors import for_events
@for_events(TRANSACTION)
def filter_processor(client, event):
return event
# TODO: Resolve key error
# event_url = event['context']['request']['url']['full']
# if ('ignore_apm' in event_url) or ('/health/' in event_url):
# return False
# return event
|
11500338
|
import datetime
from webuntis.utils.timetable_utils import table
from .. import WebUntisTestCase
class StubPeriod(object):
def __init__(self, start, end):
self.start = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M')
self.end = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M')
class BasicUsage(WebUntisTestCase):
def test_empty(self):
assert table([]) == []
def test_simple(self):
monday = [
StubPeriod('2012-05-03 08:01', '2012-05-03 09:00'),
StubPeriod('2012-05-03 09:01', '2012-05-03 10:00')
]
tuesday = [
StubPeriod('2012-06-03 08:01', '2012-06-03 09:00'),
StubPeriod('2012-06-03 09:01', '2012-06-03 10:00')
]
wednesday = [
StubPeriod('2012-07-03 08:01', '2012-07-03 09:00'),
StubPeriod('2012-07-03 09:01', '2012-07-03 10:00')
]
given_input = set(monday + tuesday + wednesday)
rows = table(given_input)
assert len(rows) == 2
assert all(len(row) == 3 for time, row in rows)
assert all(all(len(cell) == 1 for date, cell in row) for time, row in rows)
assert all(all(list(cell)[0] in given_input for date, cell in row) for time, row in rows)
|
11500366
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import stochastic_pooling
fX = theano.config.floatX
def test_stochastic_pool_2d_node_serialization():
tn.check_serialization(stochastic_pooling.StochasticPool2DNode("a"))
def test_stochastic_pool_2d_node1():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 4, 4)),
stochastic_pooling.StochasticPool2DNode("m",
pool_size=(2, 2),
deterministic=True)]
).network()
fn = network.function(["i"], ["m"])
x = np.arange(16).astype(fX).reshape(1, 1, 4, 4)
pre_pool = np.array([[[[[0, 1, 4, 5], [2, 3, 6, 7]],
[[8, 9, 12, 13], [10, 11, 14, 15]]]]], dtype=fX)
ans = ((pre_pool ** 2) / pre_pool.sum(axis=-1)[..., None]).sum(axis=-1)
np.testing.assert_allclose(fn(x)[0],
ans,
rtol=1e-5)
nt.assert_equal(network["m"].get_vw("default").shape,
ans.shape)
def test_stochastic_pool_2d_node2():
# testing that stochastic version works
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 4, 4)),
stochastic_pooling.StochasticPool2DNode("m",
pool_size=(2, 2))]
).network()
fn = network.function(["i"], ["m"])
x = np.arange(16).astype(fX).reshape(1, 1, 4, 4)
fn(x)
|
11500372
|
import numpy as np
## Policies
def p_exposed_growth(params, substep, state_history, prev_state):
exposed_population = params['infection_rate']*prev_state['infected']*(prev_state['susceptible']/(prev_state['susceptible']+ prev_state['exposed'] + prev_state['infected'] + prev_state['recovered']))
return {'exposed_growth': np.ceil(exposed_population)}
def p_infected_growth(params, substep, state_history, prev_state):
infected_population = params['exposure_rate']*prev_state['exposed'] - (1 - params['death_rate']) * params['recovering_rate'] * prev_state['infected'] - params['death_rate'] * params['death_proportion_rate'] * prev_state['infected']
return {'infected_growth': np.ceil(infected_population)}
def p_recovered_growth(params, substep, state_history, prev_state):
recovered_population = (1 - params['death_rate']) * params['recovering_rate'] * prev_state['infected']
return {'recovered_growth': np.ceil(recovered_population)}
def p_dead_growth(params, substep, state_history, prev_state):
dead_population = params['death_rate']*params['death_proportion_rate'] * prev_state['infected']
return {'dead_growth': np.ceil(dead_population)}
## SUFs
def s_susceptible_population(params, substep, state_history, prev_state, policy_input):
updated_susceptible_population = prev_state['susceptible'] - policy_input['exposed_growth']
return ('susceptible', max(updated_susceptible_population, 0))
def s_exposed_population(params, substep, state_history, prev_state, policy_input):
updated_exposed_population = prev_state['exposed'] + policy_input['exposed_growth'] - policy_input['infected_growth']
return ('exposed', max(updated_exposed_population, 0))
def s_infected_population(params, substep, state_history, prev_state, policy_input):
updated_infected_population = prev_state['infected'] + policy_input['infected_growth'] - policy_input['recovered_growth']
return ('infected', max(updated_infected_population, 0))
def s_recovered_population(params, substep, state_history, prev_state, policy_input):
updated_recovered_population = prev_state['recovered'] + policy_input['recovered_growth']
return ('recovered', max(updated_recovered_population, 0))
def s_dead_population(params, substep, state_history, prev_state, policy_input):
updated_dead_population = prev_state['dead'] + policy_input['dead_growth']
return ('dead', max(updated_dead_population, 0))
|
11500385
|
import cake.system
from cake.test.framework import caketest
@caketest(fixture="scriptinclude")
def testScriptIncludeMissingTraceback(t):
out = t.runCake()
out.checkFailed()
out.checkHasLineMatching("Failed to include cake script missing\\.cake:")
out.checkHasLinesInOrder([
" from include2.cake",
" from include1.cake",
" from build.cake",
])
|
11500390
|
import torch
from torch import nn
class multilabel_cross_entropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self,y_pred, y_true):
y_true = y_true.float()
y_pred = torch.mul((1.0 - torch.mul(y_true,2.0)),y_pred)
y_pred_neg = y_pred - torch.mul(y_true,1e12)
# y_pred_neg = y_pred_neg
y_pred_pos = y_pred - torch.mul(1.0 - y_true,1e12)
# y_pred_pos = y_pred_pos
zeros = torch.zeros_like(y_pred[..., :1])
# zeros = zeros
y_pred_neg = torch.cat([y_pred_neg, zeros], axis=-1)
y_pred_pos = torch.cat([y_pred_pos, zeros], axis=-1)
neg_loss = torch.logsumexp(y_pred_neg, axis=-1)
pos_loss = torch.logsumexp(y_pred_pos, axis=-1)
loss = torch.mean(neg_loss + pos_loss)
return loss
|
11500407
|
variables = {}
while(True):
line = input().split()
if line == ["0"]:
break
if "=" in line:
variables[line[0]] = int(line[2])
else:
additions = [x for x in line if x != "+"]
digits = [item for item in additions if item.isdigit()]
undefined = [item for item in additions if item not in variables and item not in digits]
defined = [item for item in additions if item in variables and not item in digits]
calc = []
if len(defined) > 0:
calc = [sum([variables.get(item) for item in defined]) ]
if len(digits) > 0:
calc[0] += sum([int(x) for x in digits])
else:
if len(digits) > 0:
calc = [sum([int(x) for x in digits])]
print(" + ".join([str(s) for s in calc] + undefined))
|
11500411
|
import pygame
import math
import numpy as np
import random
import time
def checkpattern(col, row):
if row % 2:
if col % 2: #If unequal
return True
else: #if equal
return False
else:
if col % 2: #If unequal
return False
else: #if equal
return True
def drawSpirit(screen, col, row, type, myfont):
"""
Draws a spirit at pos col, row of type = [E (empty), B (bomb), 1, 2, 3, 4, 5, 6]
"""
if type == 'E':
if checkpattern(col,row):
c = (242, 244, 247)
else:
c = (247, 249, 252)
pygame.draw.rect(screen, c, pygame.Rect(col*SIZEOFSQ, row*SIZEOFSQ, SIZEOFSQ, SIZEOFSQ))
else:
drawSpirit(screen, col, row, 'E', myfont)
if type == 1:
text = myfont.render("1", 1, (0, 204, 0))
elif type == 2:
text = myfont.render("2", 1, (255, 204, 0))
elif type == 3:
text = myfont.render("3", 1, (204, 0, 0))
elif type == 4:
text = myfont.render("4", 1, (0, 51, 153))
elif type == 5:
text = myfont.render("5", 1, (255, 102, 0))
elif type == 6:
text = myfont.render("6", 1, (255, 102, 0))
elif type == 'flag':
text = myfont.render("F", 1, (255, 0, 0))
#Get the text rectangle and center it inside the rectangles
textRect = text.get_rect()
textRect.center = (col*SIZEOFSQ + int(0.5*SIZEOFSQ)),(row*SIZEOFSQ + int(0.5*SIZEOFSQ))
screen.blit(text, textRect)
def findNeighbors2(y, x, grid): #Taken online, y = col x = row, return [(row,col),(row,col)]
COLS = grid.shape[1]
ROWS = grid.shape[0]
neighbors = [(y2, x2) for x2 in range(x-1, x+2)
for y2 in range(y-1, y+2)
if (-1 < x < COLS and
-1 < y < ROWS and
(x != x2 or y != y2) and
(0 <= x2 < COLS) and
(0 <= y2 < ROWS))]
return neighbors
def findNeighbors(rowin, colin, grid):
""" Takes col, row and grid as input and returns as list of neighbors
"""
COLS = grid.shape[1]
ROWS = grid.shape[0]
neighbors = []
for col in range(colin-1, colin+2):
for row in range(rowin-1, rowin+2):
if (-1 < rowin < ROWS and
-1 < colin < COLS and
(rowin != row or colin != col) and
(0 <= col < COLS) and
(0 <= row < ROWS)):
neighbors.append((row,col))
return neighbors
def sumMines(grid, col, row):
""" Finds amount of mines adjacent to a field.
"""
mines = 0
neighbors = findNeighbors(row, col, grid)
for n in neighbors:
if grid[n[0],n[1]] == 'B':
mines = mines + 1
return mines
def initBoard(screen, grid, startcol, startrow, mines):
""" Initializes the board
"""
#Randomly place bombs
COLS = grid.shape[1]
ROWS = grid.shape[0]
while mines > 0:
(row, col) = (random.randint(0, ROWS-1), random.randint(0, COLS-1))
#if (col,row) not in findNeighbors(startcol, startrow, grid) and grid[col][row] != 'B' and (col, row) not in (startcol, startrow):
if (row,col) not in findNeighbors(startrow, startcol, grid) and (row,col) != (startrow, startcol) and grid[row][col] != 'B':
grid[row][col] = 'B'
mines = mines - 1
#Get rest of board when bombs have been placed
for col in range(COLS):
for row in range(ROWS):
if grid[row][col] != 'B':
totMines = sumMines(grid, col, row)
if totMines > 0:
grid[row][col] = totMines
else:
grid[row][col] = 'E'
return grid
def printBoard(grid):
COLS = grid.shape[1]
ROWS = grid.shape[0]
for row in range(0,ROWS):
print(' ')
for col in range(0,COLS):
print(grid[row][col], end=' ')
def reveal(screen, grid, col, row, myfont, checked, press = "LM"):
if press == "LM":
if checked[row][col] != 0:
return
checked[row][col] = checked[row][col] + 1
if grid[row][col] != 'B':
#print(grid[row][col])
drawSpirit(screen, col, row, grid[row][col], myfont)
#pygame.display.flip()
#time.sleep(0.2)
#print(checked)
#time.sleep(5)
if grid[row][col] == 'E':
neighbors = findNeighbors(row, col, grid)
for n in neighbors:
if not checked[n[0],n[1]]:
reveal(screen, grid, n[1], n[0], myfont, checked)
elif press == "RM":
drawSpirit(screen, col, row, "flag", myfont)
if __name__ == "__main__":
ROWS = 6
COLS = 6
SIZEOFSQ = 100
MINES = 6
grid = np.zeros((ROWS,COLS), dtype=object)
#color of squares
c1 = (4, 133, 223)
c2 = (4, 145, 223)
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont("monospace-bold", 100)
screen = pygame.display.set_mode((COLS * SIZEOFSQ, ROWS * SIZEOFSQ))
rects = []
#Initialize Game:
for row in range(ROWS):
for col in range(COLS):
if checkpattern(col, row):
c = c1
else:
c = c2
rects.append(pygame.draw.rect(screen, c, pygame.Rect(col*SIZEOFSQ, row*SIZEOFSQ, SIZEOFSQ, SIZEOFSQ)))
done = False
firstClick = True
while not done:
for event in pygame.event.get(): #If someone clicks or does something
#pygame.draw.rect(screen, (0, 128, 255), pygame.Rect(30, 30, 60, 60))
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for i, rect in enumerate(rects):
if rect.collidepoint(pos):
#print(i)
col = i % COLS
row = math.floor(i/COLS)
print(row, col)
if firstClick:
grid = initBoard(screen, grid, col, row, MINES)
firstClick = False
printBoard(grid)
if pygame.mouse.get_pressed() == (1, 0, 0):
reveal(screen, grid, col, row, myfont, np.zeros_like(grid))
elif pygame.mouse.get_pressed() == (0, 0, 1):
reveal(screen, grid, col, row, myfont, np.zeros_like(grid), press = "RM")
"""
neighbors = findNeighbors(col,row, grid)
for n in neighbors:
drawSpirit(screen, n[0], n[1], 'one', myfont)
"""
pygame.display.flip()
|
11500420
|
import os
from dataclasses import dataclass
from optimize_images_x.calcs import human
@dataclass
class Task:
filepath: str
status: int
original_filesize: int = 0
final_filesize: int = 0
@property
def filename(self):
return os.path.basename(self.filepath)
@property
def orig_file_size_h(self) -> str:
return human(self.original_filesize)
@property
def final_file_size_h(self) -> str:
if self.final_filesize != 0:
return human(self.final_filesize)
else:
return ''
@property
def bytes_saved(self) -> int:
if self.final_filesize == 0:
return 0
elif self.final_filesize == self.original_filesize:
return 0
else:
return self.original_filesize - self.final_filesize
@property
def percent_saved(self) -> float:
if self.bytes_saved == 0:
return 0.0
return self.bytes_saved / self.original_filesize * 100
|
11500450
|
from sklearn import linear_model
from sklearn.neighbors import NearestNeighbors
import pandas as pd
from dowhy.causal_estimator import CausalEstimate
from dowhy.causal_estimators.propensity_score_estimator import PropensityScoreEstimator
class PropensityScoreMatchingEstimator(PropensityScoreEstimator):
""" Estimate effect of treatment by finding matching treated and control
units based on propensity score.
Straightforward application of the back-door criterion.
For a list of standard args and kwargs, see documentation for
:class:`~dowhy.causal_estimator.CausalEstimator`.
Supports additional parameters as listed below.
"""
def __init__(
self,
*args,
propensity_score_model=None,
recalculate_propensity_score=True,
propensity_score_column="propensity_score",
**kwargs):
"""
:param propensity_score_model: Model used to compute propensity score.
Can be any classification model that supports fit() and
predict_proba() methods. If None, LogisticRegression is used.
:param recalculate_propensity_score: Whether the propensity score
should be estimated. To use pre-computed propensity scores,
set this value to False. Default=True.
:param propensity_score_column: Column name that stores the
propensity score. Default='propensity_score'
"""
super().__init__(
*args,
propensity_score_model=propensity_score_model,
recalculate_propensity_score=recalculate_propensity_score,
propensity_score_column=propensity_score_column,
**kwargs)
self.logger.info("INFO: Using Propensity Score Matching Estimator")
self.symbolic_estimator = self.construct_symbolic_estimator(self._target_estimand)
self.logger.info(self.symbolic_estimator)
def _estimate_effect(self):
if self.recalculate_propensity_score is True:
if self.propensity_score_model is None:
self.propensity_score_model = linear_model.LogisticRegression()
self.propensity_score_model.fit(self._observed_common_causes, self._treatment)
self._data[self.propensity_score_column] = self.propensity_score_model.predict_proba(self._observed_common_causes)[:, 1]
else:
# check if the user provides a propensity score column
if self.propensity_score_column not in self._data.columns:
raise ValueError(f"Propensity score column {self.propensity_score_column} does not exist. Please specify the column name that has your pre-computed propensity score.")
else:
self.logger.info(f"INFO: Using pre-computed propensity score in column {self.propensity_score_column}")
# this assumes a binary treatment regime
treated = self._data.loc[self._data[self._treatment_name[0]] == 1]
control = self._data.loc[self._data[self._treatment_name[0]] == 0]
# TODO remove neighbors that are more than a given radius apart
# estimate ATT on treated by summing over difference between matched neighbors
control_neighbors = (
NearestNeighbors(n_neighbors=1, algorithm='ball_tree')
.fit(control[self.propensity_score_column].values.reshape(-1, 1))
)
distances, indices = control_neighbors.kneighbors(treated[self.propensity_score_column].values.reshape(-1, 1))
self.logger.debug("distances:")
self.logger.debug(distances)
att = 0
numtreatedunits = treated.shape[0]
for i in range(numtreatedunits):
treated_outcome = treated.iloc[i][self._outcome_name].item()
control_outcome = control.iloc[indices[i]][self._outcome_name].item()
att += treated_outcome - control_outcome
att /= numtreatedunits
#Now computing ATC
treated_neighbors = (
NearestNeighbors(n_neighbors=1, algorithm='ball_tree')
.fit(treated[self.propensity_score_column].values.reshape(-1, 1))
)
distances, indices = treated_neighbors.kneighbors(control[self.propensity_score_column].values.reshape(-1, 1))
atc = 0
numcontrolunits = control.shape[0]
for i in range(numcontrolunits):
control_outcome = control.iloc[i][self._outcome_name].item()
treated_outcome = treated.iloc[indices[i]][self._outcome_name].item()
atc += treated_outcome - control_outcome
atc /= numcontrolunits
if self._target_units == "att":
est = att
elif self._target_units == "atc":
est = atc
elif self._target_units == "ate":
est = (att*numtreatedunits + atc*numcontrolunits)/(numtreatedunits+numcontrolunits)
else:
raise ValueError("Target units string value not supported")
estimate = CausalEstimate(estimate=est,
control_value=self._control_value,
treatment_value=self._treatment_value,
target_estimand=self._target_estimand,
realized_estimand_expr=self.symbolic_estimator,
propensity_scores=self._data[self.propensity_score_column])
return estimate
def construct_symbolic_estimator(self, estimand):
expr = "b: " + ", ".join(estimand.outcome_variable) + "~"
# TODO -- fix: we are actually conditioning on positive treatment (d=1)
var_list = estimand.treatment_variable + estimand.get_backdoor_variables()
expr += "+".join(var_list)
return expr
|
11500463
|
from typing import Tuple
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.utils.extmath import weighted_mode
from .math import sigmoid
def k_neighbors_classify(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
n_neighbors: int = 20,
similarity=cosine_similarity) -> Tuple[np.ndarray, np.ndarray]:
preds = np.zeros(len(X_test), dtype=int)
scores = np.zeros(len(X_test), dtype=float)
sim = sigmoid(similarity(X_test, X_train))
for i in range(len(X_test)):
candidates = np.argsort(sim[i])[-n_neighbors:]
labels = y_train[candidates]
weights_ = sim[i][candidates]
mode, score = weighted_mode(labels, weights_)
preds[i] = int(mode[0])
scores[i] = score[0]
return preds, scores
def k_neighbors_classify_scores(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
n_neighbors: int = 20,
similarity=cosine_similarity) -> Tuple[np.ndarray, np.ndarray]:
real_labels, y_train = np.unique(y_train, return_inverse=True)
scores = np.zeros((len(X_test), len(real_labels)), dtype=float)
sim = sigmoid(similarity(X_test, X_train))
for i in range(len(X_test)):
candidates = np.argsort(sim[i])[-n_neighbors:]
for it in candidates:
scores[i][y_train[it]] += sim[i][it]
return scores, real_labels
|
11500474
|
from unittest import TestCase
from altimeter.core.graph.field.util import camel_case_to_snake_case
class TestCamelCaseToSnakeCase(TestCase):
def test_snake_case_input(self):
test_str = "snake_case_input"
self.assertEqual(test_str, camel_case_to_snake_case(test_str))
def test_camel_case_input(self):
test_str = "CamelCaseInput"
expected_out = "camel_case_input"
self.assertEqual(expected_out, camel_case_to_snake_case(test_str))
|
11500480
|
from kines import date_util
from freezegun import freeze_time
import datetime as dt
@freeze_time("2020-11-01 7:00:00", tz_offset=+dt.timedelta(hours=5, minutes=30))
def test_to_iterator_timestamp():
print("now = ", dt.datetime.now())
assert dt.datetime(2020, 11, 1, 5, 55) == date_util.to_iterator_timestamp("1h5m")
assert dt.datetime(2020, 11, 1, 6, 55) == date_util.to_iterator_timestamp("5m")
assert dt.datetime(2020, 11, 1, 6, 00) == date_util.to_iterator_timestamp("1h")
assert "2016-04-04T19:58:46.480-00:00" == date_util.to_iterator_timestamp(
"2016-04-04T19:58:46.480-00:00"
)
|
11500541
|
import pandas as pd
from rebar import arrdict
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributions
import torch.testing
from torch import nn
import geotorch
from . import expectations, common
from logging import getLogger
log = getLogger(__name__)
μ0 = 0
σ0 = 10
def pairwise_indices(N):
j, k = torch.as_tensor(np.indices((N, N)).reshape(2, -1))
j, k = j[j != k], k[j != k]
return j, k
def pairwise_diffs(μ, Σ):
j, k = pairwise_indices(len(μ))
μd = μ[j] - μ[k]
σ2d = Σ[j, j] - Σ[j, k] - Σ[k, j] + Σ[k, k]
return μd, σ2d
def as_square(xd, fill=0.):
N = int((1 + (1 + 4*len(xd))**.5)/2)
j, k = pairwise_indices(N)
y = torch.full((N, N), fill).double()
y[j, k] = xd
return y
# def to_double(m):
# for m in
_cache = None
class ELBO(nn.Module):
def __init__(self, N, constrain=True):
super().__init__()
self.N = N
self.register_parameter('μ', nn.Parameter(torch.zeros((N,)).double()))
self.register_parameter('Σ', nn.Parameter(torch.eye(N).double()))
# Useful to be able to turn this off for testing
if constrain:
geotorch.positive_definite(self, 'Σ')
# This is expensive to construct, so let's cache it
global _cache
if _cache is None:
_cache = expectations.normal(lambda d: -np.log(1 + np.exp(-d)))
self.expectation = _cache
def expected_prior(self):
# Constant isn't strictly needed, but it does help with testing
const = -1/2*np.log(2*np.pi) - np.log(σ0)
return const - 1/(2*σ0**2)*((self.μ - μ0)**2 + torch.diag(self.Σ))
def expected_log_likelihood(self, n, w):
# Constant isn't strictly needed, but it does help with testing
const = torch.lgamma(n.double()+1) - torch.lgamma(w.double()+1) - torch.lgamma((n-w).double()+1)
μd, σ2d = pairwise_diffs(self.μ, self.Σ)
p = self.expectation(μd, σ2d)
q = self.expectation(-μd, σ2d)
p = as_square(p, -np.log(2))
q = as_square(q, -np.log(2))
return const + w*p + (n - w)*q
def cross_entropy(self, n, w):
return -self.expected_prior().sum() - self.expected_log_likelihood(n, w).sum()
def entropy(self):
if torch.isnan(torch.logdet(self.Σ)):
raise ValueError('Σ has become negdef')
return 1/2*(self.N*np.log(2*np.pi*np.e) + torch.logdet(self.Σ))
def forward(self, n, w):
return -self.cross_entropy(n, w) + self.entropy()
def _solve(n, w, soln=None, max_iter=100, tol=1e-9, **kwargs):
n = torch.as_tensor(n)
w = torch.as_tensor(w)
#TODO: Find a better way of converting everything to double
elbo = ELBO(n.size(0)).double()
if soln is not None:
elbo.μ.data[:] = torch.as_tensor(soln.μ)
elbo.Σ = torch.as_tensor(soln.Σ)
# The gradients around here can be a little explode-y; a line search is a bit slow but
# keeps us falling up any cliffs.
optim = torch.optim.LBFGS(
elbo.parameters(),
line_search_fn='strong_wolfe',
tolerance_change=tol,
max_iter=max_iter,
**kwargs)
trace = []
def closure():
l = -elbo(n, w)
if torch.isnan(l):
raise ValueError('Hit a nan.')
optim.zero_grad()
l.backward()
grads = [p.grad for p in elbo.parameters()]
paramnorm = torch.cat([p.data.flatten() for p in elbo.parameters()]).pow(2).mean().pow(.5)
gradnorm = torch.cat([g.flatten() for g in grads]).pow(2).mean().pow(.5)
relnorm = gradnorm/paramnorm
trace.append(arrdict.arrdict(
l=l,
gradnorm=gradnorm,
relnorm=relnorm,
Σ=elbo.Σ).detach().clone())
return l
try:
optim.step(closure)
closure()
except ValueError as e:
log.warn(f'activelo did not converge: "{str(e)}"')
μd, σ2d = map(as_square, pairwise_diffs(elbo.μ, elbo.Σ))
return arrdict.arrdict(
n=n,
w=w,
μ=elbo.μ,
Σ=elbo.Σ,
μd=μd,
σd=σ2d**.5,
trace=arrdict.stack(trace)).detach().numpy()
def solve(n, w, **kwargs):
if isinstance(n, pd.DataFrame):
return arrdict.arrdict({k: common.pandify(v, n.index) for k, v in solve(n.values, w.values, **common.numpyify(kwargs)).items()})
return _solve(n, w, **kwargs)
def test_elbo():
elbo = ELBO(2, constrain=False).double()
elbo.μ[:] = torch.tensor([1., 2.])
elbo.Σ[:] = torch.tensor([[1., .5], [.5, 2.]])
approx = torch.distributions.MultivariateNormal(elbo.μ, elbo.Σ)
s = approx.sample((100000,))
# Test entropy
expected = -approx.log_prob(s).mean()
torch.testing.assert_allclose(expected, elbo.entropy(), rtol=.01, atol=.01)
# Test prior
prior = torch.distributions.MultivariateNormal(torch.full((2,), μ0), σ0**2 * torch.eye(2))
expected = prior.log_prob(s).mean()
torch.testing.assert_allclose(expected, elbo.expected_prior().sum(), rtol=.01, atol=.01)
# Test likelihood
n = torch.tensor([[0, 3], [3, 0]])
w = torch.tensor([[0, 1], [1, 0]])
s = torch.distributions.MultivariateNormal(elbo.μ, elbo.Σ).sample((100000,))
d = s[:, :, None] - s[:, None, :]
r = 1/(1 + torch.exp(-d))
log_likelihood = torch.distributions.Binomial(n, r).log_prob(w.float())
expected = log_likelihood.mean(0)
torch.testing.assert_allclose(expected, elbo.expected_log_likelihood(n, w), rtol=.01, atol=.01)
def test_solver():
#TODO: Should Monte-Carlo the posterior to a small problem and check the KL-div from
# it to the approx'd posterior
pass
|
11500549
|
from nose.tools import eq_, ok_
from tornado.httpclient import HTTPRequest
import json
import mockito
from astral.api.client import TicketsAPI
from astral.api.tests import BaseTest
from astral.models import Ticket, Stream, Node
from astral.models.tests.factories import TicketFactory
class TicketHandlerTest(BaseTest):
def test_delete(self):
node = Node.me()
ticket = TicketFactory(destination=node)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'DELETE'), self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(Ticket.get_by(id=ticket.id), None)
ok_(Stream.get_by(slug=ticket.stream.slug))
def test_get(self):
node = Node.me()
ticket = TicketFactory(destination=node)
mockito.when(TicketsAPI).create(mockito.any(),
destination_uuid=mockito.any()).thenReturn(
{'source': ticket.destination.uuid,
'source_port': ticket.source_port,
'hops': ticket.hops})
response = self.fetch(ticket.absolute_url())
eq_(response.code, 200)
result = json.loads(response.body)
ok_('ticket' in result)
eq_(result['ticket']['stream'], ticket.stream.slug)
def test_confirm(self):
node = Node.me()
ticket = TicketFactory(destination=node, confirmed=False)
data = {'confirmed': True}
eq_(ticket.confirmed, False)
self.http_client.fetch(HTTPRequest(
self.get_url(ticket.absolute_url()), 'PUT', body=json.dumps(data)),
self.stop)
response = self.wait()
eq_(response.code, 200)
eq_(ticket.confirmed, True)
|
11500558
|
import re
import json
import fnmatch
import sys
config = json.load(open('config.json'))
test_error_set = set(config['TestErrorMap'].keys())
obsolete_disabled_tests = set()
all_tests = set()
failing_tests = set()
unimpl_tests = set()
disabled_tests = set()
passed_tests = set()
for line in sys.stdin:
m = re.match('^(PASSED|UNIMPLEMENTED|FAILED|DISABLED) \((.*)\)$', line.strip())
if m:
status, name = m.groups()
if name in test_error_set:
test_error_set.remove(name)
all_tests.add(name)
if status == 'FAILED':
failing_tests.add(name)
elif status == 'UNIMPLEMENTED':
unimpl_tests.add(name)
elif status == 'DISABLED':
disabled_tests.add(name)
elif status == 'PASSED':
passed_tests.add(name)
if disabled_tests:
for disabled_glob in sorted(config['DisabledTests'].keys()):
tests_matching_glob = fnmatch.filter(disabled_tests, disabled_glob)
if not tests_matching_glob:
print 'DisabledTests glob', disabled_glob, 'matches no tests'
else:
print '(DisabledTests unchecked)'
print len(all_tests), 'total tests'
print len(passed_tests), 'passed'
print len(failing_tests), 'tests failing'
print len(unimpl_tests), 'tests not supported'
if test_error_set:
print 'unknown TestErrorMap keys', test_error_set
|
11500563
|
import operator
import tempfile
import os
import inspect
import pytest
import pytest
import ast_tools
import fault
from hwtypes import UIntVector
import magma as m
from magma.testing import check_files_equal
Register = m.Register
from ast_tools import SymbolTable
class DualClockRAM(m.Circuit):
io = m.IO(
RADDR=m.In(m.Bits[8]),
WADDR=m.In(m.Bits[8]),
WDATA=m.In(m.Bits[8]),
RDATA=m.Out(m.Bits[8]),
WE=m.In(m.Bit),
RCLK=m.In(m.Clock),
WCLK=m.In(m.Clock)
)
m.wire(m.bits(0, 8), io.RDATA)
def test_sequential2_basic():
@m.sequential2()
class Basic:
def __init__(self):
self.x = Register(m.Bits[4])()
self.y = Register(m.Bits[4])()
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
return self.y(self.x(I))
m.compile("build/TestSequential2Basic", Basic)
assert check_files_equal(__file__, f"build/TestSequential2Basic.v",
f"gold/TestSequential2Basic.v")
def test_sequential2_assign():
@m.sequential2()
class Basic:
def __init__(self):
self.x = Register(m.Bits[4])()
self.y = Register(m.Bits[4])()
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
O = self.y
self.y = self.x
self.x = I
return O
m.compile("build/TestSequential2Assign", Basic)
assert check_files_equal(__file__, f"build/TestSequential2Assign.v",
f"gold/TestSequential2Assign.v")
# should be the same as basic
assert check_files_equal(__file__, f"build/TestSequential2Assign.v",
f"gold/TestSequential2Basic.v")
def test_sequential2_hierarchy():
@m.sequential2()
class Foo:
def __init__(self):
self.x = Register(m.Bits[4])()
self.y = Register(m.Bits[4])()
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
return self.y(self.x(I))
@m.sequential2()
class Bar:
def __init__(self):
self.x = Foo()
self.y = Foo()
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
return self.y(self.x(I))
m.compile("build/TestSequential2Hierarchy", Bar)
assert check_files_equal(__file__, f"build/TestSequential2Hierarchy.v",
f"gold/TestSequential2Hierarchy.v")
def test_sequential2_pre_unroll(capsys):
with tempfile.TemporaryDirectory() as tmpdir:
l0 = inspect.currentframe().f_lineno + 1
@m.sequential2(pre_passes=[ast_tools.passes.loop_unroll()],
post_passes=[
ast_tools.passes.debug(dump_source_filename=True,
dump_source_lines=True)],
env=locals().update(y=2),
debug=True, path=tmpdir, file_name="foo.py")
class LoopUnroll:
def __init__(self):
self.regs = [[Register(m.Bits[4])() for _ in range(3)] for _ in range(2)]
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
O = self.regs[1][-1]
for i in ast_tools.macros.unroll(range(y)):
for j in ast_tools.macros.unroll(range(2)):
self.regs[1 - i][2 - j] = self.regs[1 - i][1 - j]
self.regs[1 - i][0] = self.regs[i][-1] if m.Bit(i == 0) else I
return O
with open(os.path.join(tmpdir, "foo.py"), "r") as output:
assert output.read() == """\
def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
O_0 = self.regs[1][-1]
self.regs[1 - 0][2 - 0] = self.regs[1 - 0][1 - 0]
self.regs[1 - 0][2 - 1] = self.regs[1 - 0][1 - 1]
self.regs[1 - 0][0] = __phi(m.Bit(0 == 0), self.regs[0][-1], I)
self.regs[1 - 1][2 - 0] = self.regs[1 - 1][1 - 0]
self.regs[1 - 1][2 - 1] = self.regs[1 - 1][1 - 1]
self.regs[1 - 1][0] = __phi(m.Bit(1 == 0), self.regs[1][-1], I)
__0_return_0 = O_0
return __0_return_0
"""
assert capsys.readouterr().out == f"""\
BEGIN SOURCE_FILENAME
{os.path.abspath(__file__)}
END SOURCE_FILENAME
BEGIN SOURCE_LINES
{l0+11}: def __call__(self, I: m.Bits[4]) -> m.Bits[4]:
{l0+12}: O = self.regs[1][-1]
{l0+13}: for i in ast_tools.macros.unroll(range(y)):
{l0+14}: for j in ast_tools.macros.unroll(range(2)):
{l0+15}: self.regs[1 - i][2 - j] = self.regs[1 - i][1 - j]
{l0+16}: self.regs[1 - i][0] = self.regs[i][-1] if m.Bit(i == 0) else I
{l0+17}: return O
END SOURCE_LINES
"""
m.compile("build/TestSequential2NestedLoopUnroll", LoopUnroll)
assert check_files_equal(__file__, f"build/TestSequential2NestedLoopUnroll.v",
f"gold/TestSequential2NestedLoopUnroll.v")
def test_dual_clock_ram():
@m.sequential2()
class DefaultClock:
def __call__(self) -> m.Bits[8]:
rdata = DualClockRAM()(
RADDR=m.bits(0, 8),
WADDR=m.bits(0, 8),
WDATA=m.bits(0, 8),
WE=m.bit(0),
# Default CLK will be wired up implicitly
# RCLK=CLK
# WCLK=CLK
)
return rdata
m.compile("build/TestSequential2DefaultClock", DefaultClock)
assert check_files_equal(__file__,
f"build/TestSequential2DefaultClock.v",
f"gold/TestSequential2DefaultClock.v")
@m.sequential2()
class ExplicitClock:
def __call__(self, WCLK: m.Clock, RCLK: m.Clock) -> m.Bits[8]:
rdata = DualClockRAM()(
RADDR=m.bits(0, 8),
WADDR=m.bits(0, 8),
WDATA=m.bits(0, 8),
WE=m.bit(0),
# Wiring clocks explicitly
RCLK=RCLK,
WCLK=WCLK
)
return rdata
m.compile("build/TestSequential2ExplicitClock", ExplicitClock)
assert check_files_equal(__file__,
f"build/TestSequential2ExplicitClock.v",
f"gold/TestSequential2ExplicitClock.v")
def test_sequential2_return_tuple():
@m.sequential2()
class Basic:
def __init__(self):
self.x = Register(m.Bits[4])()
self.y = Register(m.Bits[4])()
def __call__(self, I: m.Bits[4], S: m.Bit) -> (m.Bits[4], m.Bits[4]):
self.y = self.x
self.x = I
if S:
return self.x, self.y
else:
return self.y, self.x
m.compile("build/TestSequential2ReturnTuple", Basic, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2ReturnTuple.v",
f"gold/TestSequential2ReturnTuple.v")
def test_sequential2_custom_annotations():
annotations = {"I": m.Bits[4], "S": m.Bit, "return": m.Bits[4]}
@m.sequential2(annotations=annotations)
class Basic:
def __init__(self):
self.x = Register(m.Bits[4])()
self.y = Register(m.Bits[4])()
# Bad annotations to make sure they're overridden
def __call__(self, I: int, S: str) -> tuple:
O = self.y
self.y = self.x
self.x = I
return O
m.compile("build/TestSequential2CustomAnnotations", Basic, inline=True)
assert check_files_equal(__file__,
f"build/TestSequential2CustomAnnotations.v",
f"gold/TestSequential2CustomAnnotations.v")
def test_sequential2_counter():
@m.sequential2()
class Test2:
def __init__(self):
self.count = m.Register(T=m.SInt[16], init=m.sint(0, 16))()
def __call__(self) -> m.SInt[16]:
self.count = self.count + 1
return self.count
m.compile("build/TestSequential2Counter", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2Counter.v",
f"gold/TestSequential2Counter.v")
def test_sequential2_counter_if():
@m.sequential2()
class Test2:
def __init__(self):
self.count = m.Register(T=m.SInt[16], init=m.sint(0, 16))()
def __call__(self, sel: m.Bit) -> m.SInt[16]:
if sel:
self.count = self.count + 1
return self.count
m.compile("build/TestSequential2CounterIf", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2CounterIf.v",
f"gold/TestSequential2CounterIf.v")
def test_sequential2_product():
@m.sequential2()
class Test:
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bit)]:
if sel:
return m.namedtuple(a=m.bit(0))
else:
return m.namedtuple(a=m.bit(1))
m.compile("build/TestSequential2Product", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2Product.v",
f"gold/TestSequential2Product.v")
def test_sequential2_arr_of_bits():
T = m.Array[15, m.Bits[7]]
@m.sequential2()
class Test2:
def __init__(self):
self.reg_arr = m.Register(T=T)()
def __call__(self, I: T) -> T:
O = self.reg_arr
self.reg_arr = I
return O
m.compile("build/TestSequential2ArrOfBits", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2ArrOfBits.v",
f"gold/TestSequential2ArrOfBits.v")
def test_sequential2_getitem():
T = m.Array[8, m.Bits[7]]
@m.sequential2()
class Test2:
def __init__(self):
self.reg_arr = m.Register(T=T)()
self.index = m.Register(T=m.Bits[3])()
def __call__(self, I: T, index: m.Bits[3]) -> m.Array[2, m.Bits[7]]:
out = m.array([self.reg_arr[index], self.reg_arr[self.index]])
self.reg_arr = I
self.index = index
return out
m.compile("build/TestSequential2GetItem", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2GetItem.v",
f"gold/TestSequential2GetItem.v")
def test_sequential2_slice():
@m.sequential2()
class TestSequential2Slice:
def __init__(self):
self.mem = m.Register(T=m.Bits[8 * 8])()
def __call__(self, write_addr: m.UInt[3], write_data: m.Bits[8],
read_addr: m.UInt[3]) -> m.Bits[8]:
read_data = m.get_slice(self.mem, m.zext(read_addr, 3) * 8, 8)
self.mem = m.set_slice(self.mem, write_data,
m.zext(write_addr, 3) * 8, 8)
return read_data
m.compile("build/TestSequential2Slice", TestSequential2Slice, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2Slice.v",
f"gold/TestSequential2Slice.v")
tester = fault.SynchronousTester(TestSequential2Slice,
TestSequential2Slice.CLK)
tester.circuit.write_addr = 1
tester.circuit.write_data = 2
tester.circuit.read_addr = 1
tester.advance_cycle()
tester.circuit.O.expect(2)
tester.circuit.write_addr = 2
tester.circuit.write_data = 3
tester.circuit.read_addr = 2
tester.advance_cycle()
tester.circuit.O.expect(3)
# Check addr 1 wasn't overwriten
tester.circuit.read_addr = 1
tester.advance_cycle()
tester.circuit.O.expect(2)
build_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"build"
)
tester.compile_and_run("verilator", skip_compile=True, directory=build_dir,
flags=["-Wno-unused"])
def test_sequential2_prev():
@m.sequential2()
class Test2:
def __init__(self):
self.cnt = m.Register(T=m.UInt[3])()
def __call__(self) -> m.UInt[3]:
self.cnt = self.cnt + 1
return self.cnt.prev()
m.compile("build/TestSequential2Prev", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2Prev.v",
f"gold/TestSequential2Prev.v")
def test_sequential2_reset():
@m.sequential2(reset_type=m.AsyncReset, has_enable=True)
class Test2:
def __init__(self):
# reset_type and has_enable will be set implicitly
self.cnt = m.Register(T=m.UInt[3])()
def __call__(self) -> m.UInt[3]:
self.cnt = self.cnt + 1
return self.cnt
m.compile("build/TestSequential2Reset", Test2, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2Reset.v",
f"gold/TestSequential2Reset.v")
def test_sequential2_ite_array():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Array[1, m.Bit]:
self.v = sel
if sel:
return m.array([m.bit(0)])
else:
return m.array([self.v.prev()])
m.compile("build/TestSequential2IteArray", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteArray.v",
f"gold/TestSequential2IteArray.v")
def test_sequential2_ite_array2():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Array[2, m.Bit]:
self.v = sel
if sel:
return m.array([m.bit(0), m.bit(0)])
else:
return m.array([self.v.prev(), m.bit(1)])
m.compile("build/TestSequential2IteArray2", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteArray2.v",
f"gold/TestSequential2IteArray2.v")
def test_sequential2_ite_array_error():
with pytest.raises(TypeError,
match="Found incompatible types .* in mux inference"):
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Array[1, m.Bit]:
self.v = sel
if sel:
return m.array([m.bit(0), m.bit(0)])
else:
return m.array([self.v.prev()])
def test_sequential2_ite_tuple():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Tuple[m.Bit]:
self.v = sel
if sel:
return m.tuple_(self.v.prev())
else:
return m.tuple_(self.v.prev())
m.compile("build/TestSequential2IteTuple", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteTuple.v",
f"gold/TestSequential2IteTuple.v")
def test_sequential2_ite_tuple2():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Tuple[m.Bit]:
self.v = sel
if sel:
return m.tuple_(m.bit(0))
else:
return m.tuple_(self.v.prev())
m.compile("build/TestSequential2IteTuple2", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteTuple2.v",
f"gold/TestSequential2IteTuple2.v")
def test_sequential2_ite_tuple_error_type():
with pytest.raises(TypeError,
match="Found incompatible types .* in mux inference"):
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.Tuple[m.Bit]:
self.v = sel
if sel:
return m.tuple_(m.bits(0, 2))
else:
return m.tuple_(self.v.prev())
def test_sequential2_ite_product():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bit)]:
self.v = sel
if sel:
return m.namedtuple(a=self.v.prev())
else:
return m.namedtuple(a=self.v.prev())
m.compile("build/TestSequential2IteProduct", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteProduct.v",
f"gold/TestSequential2IteProduct.v")
def test_sequential2_ite_product2():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bit)]:
self.v = sel
if sel:
return m.namedtuple(a=m.bit(0))
else:
return m.namedtuple(a=self.v.prev())
m.compile("build/TestSequential2IteProduct2", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteProduct2.v",
f"gold/TestSequential2IteProduct2.v")
def test_sequential2_ite_product_error_type():
with pytest.raises(TypeError,
match="Found incompatible types .* in mux inference"):
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bit)]:
self.v = sel
if sel:
return m.namedtuple(a=m.bits(0, 2))
else:
return m.namedtuple(a=self.v.prev())
def test_sequential2_ite_product_error_keys():
with pytest.raises(TypeError,
match="Found incompatible types .* in mux inference"):
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bit)]:
self.v = sel
if sel:
return m.namedtuple(a=m.bit(0))
else:
return m.namedtuple(b=self.v.prev())
def test_sequential2_ite_nested():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(
a=m.AnonProduct[dict(b=m.Bit)],
c=m.Tuple[m.Bit])
]:
self.v = sel
if sel:
return m.namedtuple(a=m.namedtuple(b=m.bit(0)),
c=m.tuple_(m.bit(0)))
else:
return m.namedtuple(a=m.namedtuple(b=self.v.prev()),
c=m.tuple_(self.v.prev()))
m.compile("build/TestSequential2IteNested", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteNested.v",
f"gold/TestSequential2IteNested.v")
def test_sequential2_ite_bits():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bits[8], init=m.bits(0, 8))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bits[8])]:
self.v = self.v
if sel:
return m.namedtuple(a=self.v.prev())
else:
return m.namedtuple(a=self.v.prev())
m.compile("build/TestSequential2IteBits", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteBits.v",
f"gold/TestSequential2IteBits.v")
def test_sequential2_ite_bits2():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bits[8], init=m.bits(0, 8))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bits[8])]:
self.v = self.v
if sel:
return m.namedtuple(a=m.bits(0, 8))
else:
return m.namedtuple(a=self.v.prev())
m.compile("build/TestSequential2IteBits2", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteBits2.v",
f"gold/TestSequential2IteBits2.v")
def test_sequential2_ite_bits3():
@m.sequential2()
class Test:
def __init__(self):
self.v = m.Register(T=m.Bit, init=m.bit(0))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(a=m.Bits[8])]:
self.v = self.v
if sel:
return m.namedtuple(a=m.concat(m.bits(0, 4),
m.repeat(self.v.prev(), 3),
self.v.prev()))
else:
return m.namedtuple(a=m.bits(self.v.prev(), 8))
m.compile("build/TestSequential2IteBits3", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteBits3.v",
f"gold/TestSequential2IteBits3.v")
def test_sequential2_ite_complex():
@m.sequential2()
class Test:
def __init__(self):
self.a = m.Register(T=m.Bit, init=m.bit(0))()
self.b = m.Register(T=m.Bits[2], init=m.bits(0, 2))()
def __call__(self, sel: m.Bit) -> m.AnonProduct[dict(
a=m.Tuple[m.Bits[2]],
b=m.Array[2, m.Bit],
)]:
self.a = self.a
self.b = self.b
if sel:
return m.namedtuple(
a=m.tuple_([self.b.prev()]),
b=m.array([self.a.prev(), self.a.prev()]),
)
else:
return m.namedtuple(
a=m.tuple_([m.array([self.a.prev(), self.a.prev()])]),
b=m.array(self.b.prev()),
)
m.compile("build/TestSequential2IteComplex", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteComplex.v",
f"gold/TestSequential2IteComplex.v")
def test_sequential2_ite_complex_register():
class T(m.Product):
a = m.Array[1, m.Bits[2]]
b = m.Tuple[m.AnonProduct[dict(c=m.Array[2, m.Bit])]]
@m.sequential2()
class Test:
def __init__(self):
self.a = m.Register(
T=T,
init=m.namedtuple(
a=m.array([m.bits(0, 2)]),
b=m.tuple_([m.namedtuple(c=m.array([m.bit(0), m.bit(0)]))]),
),
)()
def __call__(self, sel: m.Bit) -> T:
self.a = self.a
if sel:
return self.a
else:
return self.a
m.compile("build/TestSequential2IteComplexRegister", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteComplexRegister.v",
f"gold/TestSequential2IteComplexRegister.v")
def test_sequential2_ite_complex_register2():
class T(m.Product):
a = m.Array[1, m.Bits[2]]
b = m.Tuple[m.AnonProduct[dict(c=m.Array[2, m.Bit])]]
@m.sequential2()
class Test:
def __init__(self):
self.a = m.Register(
T=T,
init=m.namedtuple(
a=m.array([m.bits(0, 2)]),
b=m.tuple_([m.namedtuple(c=m.array([m.bit(0), m.bit(0)]))]),
),
)()
def __call__(self, sel: m.Bit) -> T:
self.a = self.a
if sel:
return m.namedtuple(
a=m.array([self.a.b[0].c]),
b=m.tuple_([m.namedtuple(
c=m.array([m.bit(0), m.bit(self.a.a[0][1:2])]))]
),
)
else:
return self.a
m.compile("build/TestSequential2IteComplexRegister2", Test, inline=True)
assert check_files_equal(__file__, f"build/TestSequential2IteComplexRegister2.v",
f"gold/TestSequential2IteComplexRegister2.v")
def test_gcd():
@m.sequential2()
class GCD:
def __init__(self):
self.x = m.Register(m.UInt[16])()
self.y = m.Register(m.UInt[16])()
def __call__(self, a: m.In(m.UInt[16]), b: m.In(m.UInt[16]),
load: m.In(m.Bit)) -> (m.Out(m.UInt[16]), m.Out(m.Bit)):
if load:
self.x = a
self.y = b
elif self.y != 0:
if self.x > self.y:
self.x = self.x - self.y
else:
self.y = self.y - self.x
return self.x.prev(), self.y.prev() == 0
m.compile("build/GCD", GCD, inline=True)
tester = fault.SynchronousTester(GCD, clock=GCD.CLK)
tester.circuit.a = 32
tester.circuit.b = 16
tester.circuit.load = 1
tester.advance_cycle()
tester.circuit.load = 0
tester.advance_cycle()
tester.wait_on(tester.circuit.O1 == 1)
tester.circuit.O0.expect(16)
dir_ = os.path.join(os.path.dirname(__file__), "build")
tester.compile_and_run("verilator", skip_compile=True, directory=dir_)
@pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul,
operator.floordiv, operator.truediv,
operator.mod, operator.lshift, operator.rshift,
operator.and_, operator.xor, operator.or_])
def test_r_ops(op):
@m.sequential2()
class Test:
def __init__(self):
self.x = m.Register(m.UInt[16])()
self.y = m.Register(m.UInt[16])()
def __call__(self, a: m.In(m.UInt[16]), b: m.In(m.UInt[16]),
load: m.In(m.Bit)) -> (m.Out(m.UInt[16]),
m.Out(m.UInt[16])):
if load:
self.x = a
self.y = b
else:
self.x = op(self.x, self.y)
self.y = op(self.y, self.x)
return self.x.prev(), self.y.prev()
type(Test).rename(Test, f"TestRop{op.__name__}")
m.compile(f"build/TestRop{op.__name__}", Test, inline=True)
if op in {operator.mod, operator.truediv}:
# coreir doesn't support urem primitive
# hwtypes BV doesn't support truediv
# but we still test these right hand op implementation for coverage and
# to make sure they compile without error
return
tester = fault.SynchronousTester(Test, clock=Test.CLK)
tester.circuit.a = a = 32
tester.circuit.b = b = 3
tester.circuit.load = 1
tester.advance_cycle()
tester.circuit.load = 0
tester.advance_cycle()
O0 = op(a, b)
tester.circuit.O0.expect(O0)
tester.circuit.O1.expect(op(b, O0))
dir_ = os.path.join(os.path.dirname(__file__), "build")
tester.compile_and_run("verilator", flags=['-Wno-unused'],
skip_compile=True, directory=dir_)
@pytest.mark.parametrize('op', [operator.invert, operator.neg])
def test_u_ops(op):
@m.sequential2()
class Test:
def __init__(self):
self.x = m.Register(m.SInt[16])()
def __call__(self, a: m.In(m.SInt[16]),
load: m.In(m.Bit)) -> m.Out(m.SInt[16]):
if load:
self.x = a
else:
self.x = op(self.x)
return self.x.prev()
type(Test).rename(Test, f"TestUop{op.__name__}")
m.compile(f"build/TestUop{op.__name__}", Test, inline=True)
tester = fault.SynchronousTester(Test, clock=Test.CLK)
tester.circuit.a = a = 32
tester.circuit.load = 1
tester.advance_cycle()
tester.circuit.load = 0
tester.advance_cycle()
O = op(a)
tester.circuit.O.expect(O)
dir_ = os.path.join(os.path.dirname(__file__), "build")
tester.compile_and_run("verilator", flags=['-Wno-unused'],
skip_compile=True, directory=dir_)
def test_reset_no_init():
Data = m.UInt[8]
@m.sequential2(reset_type=m.AsyncReset)
class Inc:
def __call__(self, i: Data) -> Data:
return i + 1
def test_magma_not_in_env():
Data = m.UInt[8]
env = SymbolTable({}, {'Data': Data})
@m.sequential2(env=env, reset_type=m.AsyncReset)
class Inc:
def __init__(self):
pass
def __call__(self, i: Data) -> Data:
return i + 1
def test_named_outputs():
Data = m.UInt[8]
@m.sequential2(output_port_names=['s','c_out'])
class Adder:
def __call__(self, a: Data, b: Data, c_in: m.Bit) -> (Data, m.Bit):
return a.adc(b, c_in)
assert Adder.interface.ports.keys() == {'a', 'b', 'c_in', 's', 'c_out', 'CLK'}
|
11500565
|
import numpy as np
l_2d = [[0, 1, 2], [3, 4, 5]]
print(l_2d)
# [[0, 1, 2], [3, 4, 5]]
print(type(l_2d))
# <class 'list'>
arr = np.array([[0, 1, 2], [3, 4, 5]])
print(arr)
# [[0 1 2]
# [3 4 5]]
print(type(arr))
# <class 'numpy.ndarray'>
arr = np.arange(6)
print(arr)
# [0 1 2 3 4 5]
arr = np.arange(6).reshape((2, 3))
print(arr)
# [[0 1 2]
# [3 4 5]]
mat = np.matrix([[0, 1, 2], [3, 4, 5]])
print(mat)
# [[0 1 2]
# [3 4 5]]
print(type(mat))
# <class 'numpy.matrix'>
mat = np.matrix(arr)
print(mat)
# [[0 1 2]
# [3 4 5]]
print(type(mat))
# <class 'numpy.matrix'>
mat_1d = np.matrix([0, 1, 2])
print(mat_1d)
# [[0 1 2]]
print(type(mat_1d))
# <class 'numpy.matrix'>
print(mat_1d.shape)
# (1, 3)
# mat_3d = np.matrix([[[0, 1, 2]]])
# ValueError: matrix must be 2-dimensional
print(l_2d)
# [[0, 1, 2], [3, 4, 5]]
print(l_2d[0][1])
# 1
l_2d[0][1] = 100
print(l_2d)
# [[0, 100, 2], [3, 4, 5]]
print(arr)
# [[0 1 2]
# [3 4 5]]
print(arr[0, 1])
# 1
arr[0, 1] = 100
print(arr)
# [[ 0 100 2]
# [ 3 4 5]]
l_2d = [[0, 1, 2], [3, 4, 5]]
print(l_2d)
# [[0, 1, 2], [3, 4, 5]]
print([list(x) for x in list(zip(*l_2d))])
# [[0, 3], [1, 4], [2, 5]]
arr = np.arange(6).reshape((2, 3))
print(arr)
# [[0 1 2]
# [3 4 5]]
print(arr.T)
# [[0 3]
# [1 4]
# [2 5]]
l_2d_1 = [[0, 1, 2], [3, 4, 5]]
l_2d_2 = [[0, 2, 4], [6, 8, 10]]
print(l_2d_1 + l_2d_2)
# [[0, 1, 2], [3, 4, 5], [0, 2, 4], [6, 8, 10]]
# print(l_2d_1 - l_2d_2)
# TypeError: unsupported operand type(s) for -: 'list' and 'list'
arr1 = np.arange(6).reshape((2, 3))
print(arr1)
# [[0 1 2]
# [3 4 5]]
arr2 = np.arange(0, 12, 2).reshape((2, 3))
print(arr2)
# [[ 0 2 4]
# [ 6 8 10]]
print(arr1 + arr2)
# [[ 0 3 6]
# [ 9 12 15]]
print(arr1 - arr2)
# [[ 0 -1 -2]
# [-3 -4 -5]]
mat1 = np.matrix(arr1)
mat2 = np.matrix(arr2)
print(mat1 + mat2)
# [[ 0 3 6]
# [ 9 12 15]]
print(mat1 - mat2)
# [[ 0 -1 -2]
# [-3 -4 -5]]
print(l_2d_1 * 2)
# [[0, 1, 2], [3, 4, 5], [0, 1, 2], [3, 4, 5]]
# print(l_2d_1 * l_2d_2)
# TypeError: can't multiply sequence by non-int of type 'list'
print(arr1 * 2)
# [[ 0 2 4]
# [ 6 8 10]]
print(mat1 * 2)
# [[ 0 2 4]
# [ 6 8 10]]
print(np.multiply(arr1, arr2))
# [[ 0 2 8]
# [18 32 50]]
print(np.multiply(mat1, mat2))
# [[ 0 2 8]
# [18 32 50]]
print(arr1 * arr2)
# [[ 0 2 8]
# [18 32 50]]
arr1 = np.arange(4).reshape((2, 2))
print(arr1)
# [[0 1]
# [2 3]]
arr2 = np.arange(6).reshape((2, 3))
print(arr2)
# [[0 1 2]
# [3 4 5]]
print(np.dot(arr1, arr2))
# [[ 3 4 5]
# [ 9 14 19]]
print(arr1.dot(arr2))
# [[ 3 4 5]
# [ 9 14 19]]
print(np.matmul(arr1, arr2))
# [[ 3 4 5]
# [ 9 14 19]]
print(arr1 @ arr2)
# [[ 3 4 5]
# [ 9 14 19]]
mat1 = np.matrix(arr1)
mat2 = np.matrix(arr2)
print(np.dot(mat1, mat2))
# [[ 3 4 5]
# [ 9 14 19]]
print(mat1.dot(mat2))
# [[ 3 4 5]
# [ 9 14 19]]
print(np.matmul(mat1, mat2))
# [[ 3 4 5]
# [ 9 14 19]]
print(mat1 @ mat2)
# [[ 3 4 5]
# [ 9 14 19]]
print(mat1 * mat2)
# [[ 3 4 5]
# [ 9 14 19]]
arr = np.arange(1, 5).reshape(2, 2)
print(arr)
# [[1 2]
# [3 4]]
print(arr**2)
# [[ 1 4]
# [ 9 16]]
mat = np.matrix(arr)
print(mat)
# [[1 2]
# [3 4]]
print(mat**2)
# [[ 7 10]
# [15 22]]
print(mat**2 == mat * mat)
# [[ True True]
# [ True True]]
print(mat**3 == mat * mat * mat)
# [[ True True]
# [ True True]]
# print(arr**-1)
# ValueError: Integers to negative integer powers are not allowed.
arr_f = np.array(arr, dtype=float)
print(arr_f**-1)
# [[1. 0.5 ]
# [0.33333333 0.25 ]]
print(mat**-1)
# [[-2. 1. ]
# [ 1.5 -0.5]]
print(mat**-2)
# [[ 5.5 -2.5 ]
# [-3.75 1.75]]
print(mat**-2 == mat**-1 * mat**-1)
# [[ True True]
# [ True True]]
print(mat**-3 == mat**-1 * mat**-1 * mat**-1)
# [[ True True]
# [ True True]]
|
11500573
|
import serial
import time
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser(prog='read_himax', description='Himax image sensor downloader')
parser.add_argument('--baud', type=int, default=460800)
parser.add_argument('-l', '--loop', action='store_true')
parser.add_argument('-o', '--out', default='img')
parser.add_argument('port', help='Windows: COMx, /dev/ttyUSBx on Linux')
args = parser.parse_args()
ser = serial.Serial(port=args.port, baudrate=args.baud, timeout=1.0)
print(ser.name)
ser.flushInput()
ser.flushOutput()
# "Warm up" the board by acquiring a frame
ser.write(b'x')
time.sleep(1)
(cols, rows) = (162, 162)
#(cols, rows) = (324, 324)
frameno = 0
while(True):
print('Frame no: %d' % (frameno) )
ser.flushInput()
ser.write(b'x')
resp = ser.read(100000) # Max length to be read is a frame
print(len(resp))
if (len(resp)>0):
image = np.frombuffer(resp, dtype=np.uint8).reshape(rows, -1)
cv2.imshow('capture', image)
k = cv2.waitKey(0)
if args.loop and args.out:
if frameno < 9999:
fname = "%s-%04d.tiff" %(args.out, frameno)
cv2.imwrite(fname, image)
else:
print("Exceeded frame count")
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
break
frameno = frameno + 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.