id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11533051
|
from .dataset import Dataset
from .large_img_dataset import LargeImgDataset
from .sequential_dataset import SequentialDataset
from .celeba_data import CelebAData
from .cifar10_data import CIFAR10Data
from .cifar100_data import CIFAR100Data
#from .cub_200_2011_data import CUB2002011
from .fashion_mnist import FashionMNISTData
#from .ilsvrc2012_data import ILSVRC2012Data
from .mnist_data import MNISTData
from .svhn_data import SVHNData
from .udacity_ch2 import UdacityCh2Data
|
11533063
|
from functools import reduce
import logging
from django.conf import settings as django_settings
from rest_framework import exceptions
from waldur_core.core.permissions import SAFE_METHODS, IsAdminOrReadOnly
from waldur_core.structure import models
logger = logging.getLogger(__name__)
# TODO: this is a temporary permission filter.
class IsAdminOrOwner(IsAdminOrReadOnly):
"""
Allows access to admin users or account's owner for modifications.
For other users read-only access.
"""
def has_permission(self, request, view):
user = request.user
if user.is_staff or request.method in SAFE_METHODS:
return True
elif view.suffix == 'List' or request.method == 'DELETE':
return False
# Fix for schema generation
elif 'uuid' not in view.kwargs:
return False
return user == view.get_object()
def is_staff(request, view, obj=None):
if not request.user.is_staff:
raise exceptions.PermissionDenied()
def is_owner(request, view, obj=None):
if not obj:
return
customer = _get_customer(obj)
if not _has_owner_access(request.user, customer):
raise exceptions.PermissionDenied()
def is_manager(request, view, obj=None):
if not obj:
return
project = _get_project(obj)
if not _has_manager_access(request.user, project):
raise exceptions.PermissionDenied()
def is_administrator(request, view, obj=None):
if not obj:
return
project = _get_project(obj)
if not _has_admin_access(request.user, project):
raise exceptions.PermissionDenied()
def _has_owner_access(user, customer):
return user.is_staff or customer.has_user(user, models.CustomerRole.OWNER)
def _has_manager_access(user, project):
return _has_owner_access(user, project.customer) or project.has_user(user, models.ProjectRole.MANAGER)
def _has_admin_access(user, project):
return _has_manager_access(user, project) or project.has_user(user, models.ProjectRole.ADMINISTRATOR)
def _get_parent_by_permission_path(obj, permission_path):
path = getattr(obj.Permissions, permission_path, None)
if path is None:
return
if path == 'self':
return obj
return reduce(getattr, path.split('__'), obj)
def _get_project(obj):
return _get_parent_by_permission_path(obj, 'project_path')
def _get_customer(obj):
return _get_parent_by_permission_path(obj, 'customer_path')
def check_access_to_services_management(request, view, obj=None):
if django_settings.WALDUR_CORE['ONLY_STAFF_MANAGES_SERVICES'] and not request.user.is_staff:
raise exceptions.PermissionDenied()
|
11533078
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from utils import segment_data, segment_length, map_activation_str_to_layer, batch_convert_len_to_mask
from basemodel import EdgeSeqModel
_INF = -1e30
class PositionalEmbedding(nn.Module):
def __init__(self, d_emb):
super(PositionalEmbedding, self).__init__()
self.d_emb = d_emb
inv_freq = 1 / (10000 ** (torch.arange(0.0, d_emb, 2.0) / d_emb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb.unsqueeze(0).expand(bsz, -1, -1)
else:
return pos_emb.unsqueeze(0)
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, act_func="relu", pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
map_activation_str_to_layer(act_func),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout))
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
# init
for m in self.CoreNet.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0.0, 1/(d_model**0.5))
nn.init.zeros_(m.bias)
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization
inp = self.layer_norm(inp)
core_out = self.CoreNet(inp)
##### residual connection
output = core_out + inp
if not self.pre_lnorm:
##### layer normalization
output = self.layer_norm(output)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.k_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.v_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
# init
for m in [self.q_net, self.k_net, self.v_net, self.o_net]:
nn.init.normal_(m.weight, 0.0, self.scale)
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [bsz x hlen x n_head x d_head]
bsz, qlen = h.size(0), h.size(1)
if mems is not None:
c = torch.cat([mems, h], dim=1)
else:
c = h
klen = c.size(1)
if self.pre_lnorm:
##### layer normalization
h = self.layer_norm(h)
c = self.layer_norm(c)
head_q = self.q_net(h).view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = self.k_net(c).view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = self.v_net(c).view(c.size(0), c.size(1), self.n_head, self.d_head)
# [bsz x qlen x klen x n_head]
attn_score = torch.einsum("bind,bjnd->bijn", (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2: # [bsz x klen] -> [bsz x qlen x klen x n_head]
attn_score.masked_fill_((attn_mask == 0).unsqueeze(1).unsqueeze(-1), _INF)
elif attn_mask.dim() == 3: # [bsz x qlen x klen] -> [bsz x qlen x klen x n_head]
attn_score.masked_fill_((attn_mask == 0).unsqueeze(-1), _INF)
# [bsz x qlen x klen x n_head]
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
# klen = vlen
# [bsz x qlen x klen x n_head] + [bsz x vlen x n_head x d_head] -> [bsz x qlen x n_head x d_head]
attn_vec = torch.einsum("bijn,bjnd->bind", (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
bsz, qlen, self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
##### residual connection
output = h + attn_out
if not self.pre_lnorm:
##### layer normalization
output = self.layer_norm(output)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False, **kw):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.k_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.v_net = nn.Linear(d_model, n_head * d_head, bias=False)
# self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
# init
for m in [self.q_net, self.k_net, self.v_net, self.o_net]:
nn.init.normal_(m.weight, 0.0, self.scale)
def _rel_shift(self, x, zero_triu=False):
# x: bsz x qlen x klen x n_head
zero_pad = torch.zeros((x.size(0), x.size(1), 1, x.size(3)),
device=x.device, dtype=x.dtype, requires_grad=False) # bsz x qlen x 1 x n_head
x_padded = torch.cat([x, zero_pad], dim=2) # bsz x qlen x (klen+1) x n_head
x = x_padded[:,:,1:,:]
if zero_triu:
ones = torch.ones((x.size(1), x.size(2)), device=x.device, dtype=x.dtype, requires_grad=False)
x = x * torch.tril(ones, diagonal=x.size(2) - x.size(1)).unsqueeze(0).unsqueeze(-1)
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kw):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kw)
self.r_net = nn.Linear(self.d_model, self.n_head*self.d_head, bias=False)
# init
nn.init.normal_(self.r_net.weight, 0.0, 1/((self.n_head*self.d_head)**0.5))
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
# r: [bsz, klen, d_model], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_r_bias: [klen, n_head], used for term D
bsz, qlen = w.size(0), w.size(1)
if mems is not None:
c = torch.cat([mems, w], dim=1)
else:
c = w
klen = c.size(1)
if self.pre_lnorm:
##### layer normalization
w = self.layer_norm(w)
c = self.layer_norm(c)
r_head_k = self.r_net(r)
w_head_q = self.q_net(w)
w_head_k = self.k_net(c)
w_head_v = self.v_net(c)
r_head_k = r_head_k.view(klen, self.n_head, self.d_head) # klen x n_head x d_head
w_head_q = w_head_q.view(bsz, qlen, self.n_head, self.d_head) # bsz x qlen x n_head x d_head
w_head_k = w_head_k.view(bsz, klen, self.n_head, self.d_head) # bsz x klen x n_head x d_head
w_head_v = w_head_v.view(bsz, klen, self.n_head, self.d_head) # bsz x klen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + r_w_bias # bsz x qlen x n_head x d_head
AC = torch.einsum("bind,bjnd->bijn", (rw_head_q, w_head_k)) # bsz x qlen x klen x n_head
rr_head_q = w_head_q + r_r_bias # bsz x qlen x n_head x d_head
BD = torch.einsum("bind,jnd->bijn", (rr_head_q, r_head_k)) # bsz x qlen x klen x n_head
BD = self._rel_shift(BD)
# [bsz x qlen x klen x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
# bsz x klen -> bsz x qlen x klen x n_head
attn_score = attn_score.masked_fill_((attn_mask == 0).unsqueeze(1).unsqueeze(-1), _INF)
elif attn_mask.dim() == 3:
# bsz x qlen x klen -> bsz x qlen x klen x n_head
attn_score = attn_score.masked_fill_((attn_mask == 0).unsqueeze(-1), _INF)
# [bsz x qlen x klen x n_head]
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("bijn,bjnd->bind", (attn_prob, w_head_v))
# [bsz x qlen x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
bsz, qlen, self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
##### residual connection
output = w + attn_out
if not self.pre_lnorm:
##### layer normalization
output = self.layer_norm(output)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kw):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kw)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
bsz, qlen = w.size(0), w.size(1)
if mems is not None:
c = torch.cat([mems, w], dim=1)
else:
c = w
klen = c.size(1)
if self.pre_lnorm:
##### layer normalization
w = self.layer_norm(w)
c = self.layer_norm(c)
w_head_q = self.q_net(w)
w_head_k = self.k_net(c)
w_head_v = self.v_net(c)
w_head_q = w_head_q.view(bsz, qlen, self.n_head, self.d_head) # bsz x qlen x n_head x d_head
w_head_k = w_head_k.view(bsz, klen, self.n_head, self.d_head) # bsz x qlen x n_head x d_head
w_head_v = w_head_v.view(bsz, klen, self.n_head, self.d_head) # bsz x qlen x n_head x d_head
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], dim=0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], dim=0)
elif klen < r_emb.size(0):
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias.unsqueeze(0).unsqueeze(0) # bsz x qlen x n_head x d_head
AC = torch.einsum("bind,bjnd->bijn", (rw_head_q, w_head_k)) # bsz x qlen x klen x n_head
B_ = torch.einsum("bind,jnd->bijn", (w_head_q, r_emb)) # bsz x qlen x klen x n_head
D_ = r_bias.unsqueeze(0).unsqueeze(0) # 1 x 1 x klen x n_head
BD = self._rel_shift(B_ + D_)
# [bsz x qlen x klen x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
# bsz x klen -> bsz x qlen x klen x n_head
attn_score = attn_score.masked_fill(
attn_mask.unsqueeze(1).unsqueeze(-1), _INF)
elif attn_mask.dim() == 3:
# bsz x qlen x klen -> bsz x qlen x klen x n_head
attn_score = attn_score.masked_fill(
attn_mask.unsqueeze(-1), _INF)
# [bsz x qlen x klen x n_head]
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("bijn,bjnd->bind", (attn_prob, w_head_v))
# [bsz x qlen x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
bsz, qlen, self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
##### residual connection
output = w + attn_out
if not self.pre_lnorm:
##### layer normalization
output = self.layer_norm(output)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kw):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kw)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
act_func=kw.get("act_func", "relu"), pre_lnorm=kw.get("pre_lnorm"))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableTransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kw):
super(RelLearnableTransformerLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kw)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
act_func=kw.get("act_func", "relu"), pre_lnorm=kw.get("pre_lnorm"))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableTransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kw):
super(RelPartialLearnableTransformerLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kw)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
act_func=kw.get("act_func", "relu"), pre_lnorm=kw.get("pre_lnorm"))
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class TXL(EdgeSeqModel):
def __init__(self, config):
super(TXL, self).__init__(config)
self.drop = nn.Dropout(self.dropout)
self.tgt_len = config["txl_tgt_len"]
self.mem_len = config["txl_mem_len"]
self.ext_len = config["txl_ext_len"]
self.max_tgt_len = self.tgt_len + self.ext_len + self.mem_len
self.clamp_len = config["txl_clamp_len"]
self.same_length = config["txl_same_len"]
self.attn_type = config["txl_attn_type"]
self.d_model = config["txl_d_model"]
# embedding layers
p_emb_dim, g_emb_dim = self.get_emb_dim()
self.emb_scale = 1 / (config["txl_d_head"]**0.5)
self.g_emb_proj = nn.Linear(g_emb_dim, self.d_model)
self.p_emb_proj = self.g_emb_proj if self.share_emb else nn.Linear(p_emb_dim, self.d_model)
self.pos_emb = PositionalEmbedding(self.d_model)
# transformer layers
self.g_net, g_dim = self.create_net(
name="graph", input_dim=self.d_model, num_layers=config["txl_graph_num_layers"],
d_model=self.d_model, d_inner=config["txl_d_inner"],
n_head=config["txl_n_head"], d_head=config["txl_d_head"],
tgt_len=self.tgt_len, ext_len=self.ext_len, mem_len=self.mem_len,
attn_type=self.attn_type, pre_lnorm=config["txl_pre_lnorm"],
act_func=self.act_func, dropout=self.dropout, dropatt=self.dropout)
self.p_net, p_dim = (self.g_net, g_dim) if self.share_arch else self.create_net(
name="pattern", input_dim=self.d_model, num_layers=config["txl_pattern_num_layers"],
d_model=self.d_model, d_inner=config["txl_d_inner"],
n_head=config["txl_n_head"], d_head=config["txl_d_head"],
tgt_len=self.tgt_len, ext_len=self.ext_len, mem_len=self.mem_len,
attn_type=self.attn_type, pre_lnorm=config["txl_pre_lnorm"],
act_func=self.act_func, dropout=self.dropout, dropatt=self.dropout)
self.g_params = self.create_params(
num_layers=config["txl_graph_num_layers"], attn_type=self.attn_type,
n_head=config["txl_d_head"], d_head=config["txl_d_head"], max_tgt_len=self.max_tgt_len)
self.p_params = self.g_params if self.share_arch else self.create_params(
num_layers=config["txl_pattern_num_layers"], attn_type=self.attn_type,
n_head=config["txl_d_head"], d_head=config["txl_d_head"], max_tgt_len=self.max_tgt_len)
# predict layers
if self.add_enc:
p_enc_dim, g_enc_dim = self.get_enc_dim()
p_dim += p_enc_dim
g_dim += g_enc_dim
self.predict_net = self.create_predict_net(config["predict_net"],
pattern_dim=p_dim, graph_dim=g_dim, hidden_dim=config["predict_net_hidden_dim"],
num_heads=config["predict_net_num_heads"], recurrent_steps=config["predict_net_recurrent_steps"],
mem_len=config["predict_net_mem_len"], mem_init=config["predict_net_mem_init"])
# init
nn.init.normal_(self.g_emb_proj.weight, 0.0, self.emb_scale)
nn.init.zeros_(self.g_emb_proj.bias)
nn.init.normal_(self.p_emb_proj.weight, 0.0, self.emb_scale)
nn.init.zeros_(self.p_emb_proj.bias)
def create_net(self, name, input_dim, **kw):
num_layers = kw.get("num_layers", 1)
d_model = kw.get("d_model", 64)
n_head = kw.get("n_head", 8)
d_head = kw.get("d_head", 8)
d_inner = kw.get("d_inner", 64)
tgt_len = kw.get("tgt_len", 64)
ext_len = kw.get("ext_len", 0)
mem_len = kw.get("mem_len", 64)
attn_type = kw.get("attn_type", 0)
pre_lnorm = kw.get("pre_lnorm", True)
act_func = kw.get("act_func", "relu")
dropatt = kw.get("dropatt", 0.0)
dropout = kw.get("dropout", 0.0)
txl = nn.ModuleList()
if attn_type == 0: # the default attention
for i in range(num_layers):
txl.add_module("%s_txl(%d)%d" % (name, attn_type, i), RelPartialLearnableTransformerLayer(
n_head, d_model, d_head, d_inner, dropout, act_func=act_func,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm))
elif attn_type == 1: # learnable embeddings
for i in range(num_layers):
txl.add_module("%s_txl(%d)%d" % (name, attn_type, i), RelLearnableTransformerLayer(
n_head, d_model, d_head, d_inner, dropout, act_func=act_func,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm))
elif attn_type in [2, 3]: # absolute embeddings
for i in range(num_layers):
txl.add_module("%s_txl(%d)%d" % (name, attn_type, i), TransformerLayer(
n_head, d_model, d_head, d_inner, dropout, act_func=act_func,
dropatt=dropatt, pre_lnorm=pre_lnorm))
num_features = d_model
return txl, num_features
def create_params(self, **kw):
num_layers = kw.get("num_layers", 6)
attn_type = kw.get("attn_type", 0)
n_head = kw.get("n_head", 8)
d_head = kw.get("d_head", 8)
max_tgt_len = kw.get("max_tgt_len", 128)
params = nn.ParameterDict()
if attn_type == 0: # default attention
params["r_w_bias"] = nn.Parameter(torch.Tensor(n_head, d_head))
params["r_r_bias"] = nn.Parameter(torch.Tensor(n_head, d_head))
elif attn_type == 1: # learnable
params["r_emb"] = nn.Parameter(torch.Tensor(
num_layers, max_tgt_len, n_head, d_head))
params["r_w_bias"] = nn.Parameter(torch.Tensor(
num_layers, n_head, d_head))
params["r_bias"] = nn.Parameter(torch.Tensor(
num_layers, max_tgt_len, n_head))
elif attn_type == 2: # absolute standard
pass
elif attn_type == 3: # absolute deeper SA
params["r_emb"] = nn.Parameter(torch.Tensor(
num_layers, max_tgt_len, n_head, d_head))
# init
if hasattr(params, "r_emb"):
nn.init.normal_(params.r_emb, 0.0, 1/(d_head**0.5))
if hasattr(params, "r_w_bias"):
nn.init.normal_(params.r_w_bias, 0.0, 1/(d_head**0.5))
if hasattr(params, "r_r_bias"):
nn.init.normal_(params.r_r_bias, 0.0, 1/(d_head**0.5))
if hasattr(params, "r_bias"):
nn.init.zeros_(params.r_bias)
return params
def reset_length(self, tgt_len, ext_len, mem_len):
# If the model does not use memory at all, make the ext_len longer.
# Otherwise, make the mem_len longer and keep the ext_len the same.
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
assert self.max_tgt_len == self.tgt_len + self.ext_len + self.mem_len
def init_mems(self, num_layers, x):
if self.mem_len > 0:
mems = []
for i in range(num_layers+1):
empty = torch.empty((x.size(0), 0, self.d_model), dtype=x.dtype, device=x.device)
mems.append(empty)
return mems
else:
return None
def update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
if mems is None or mlen == 0:
new_mems.append(hids[i][:,beg_idx:end_idx].detach())
else:
cat = torch.cat([mems[i], hids[i]], dim=1)
new_mems.append(cat[:,beg_idx:end_idx].detach())
return new_mems
def _forward(self, x, x_len, txl, params, attn_mask=None, mems=None):
bsz, qlen = x.size(0), x.size(1)
mlen = mems[0].size(1) if mems is not None else 0
klen = mlen + qlen
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=x.device, dtype=x.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(x)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(txl):
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, params["r_w_bias"], params["r_r_bias"],
dec_attn_mask=attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 1: # learnable
core_out = self.drop(x)
hids.append(core_out)
for i, layer in enumerate(txl):
if self.clamp_len > 0:
r_emb = params["r_emb"][i][-self.clamp_len :]
r_bias = params["r_bias"][i][-self.clamp_len :]
else:
r_emb, r_bias = params["r_emb"][i], params["r_bias"][i]
r_w_bias = params["r_w_bias"][i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, r_w_bias, r_bias,
dec_attn_mask=attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=x.device, dtype=x.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(x + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(txl):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(x)
hids.append(core_out)
for i, layer in enumerate(txl):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = params["r_emb"][i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += params["r_emb"][i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=attn_mask, mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self.update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def encoder_forward(self, enc_inp, enc_len, enc_txl, enc_params, mems=None):
qlen = enc_inp.size(1)
mlen = mems[0].size(1) if mems is not None else 0
enc_attn_mask = batch_convert_len_to_mask(enc_len + mlen, max_seq_len=qlen+mlen)
return self._forward(enc_inp, enc_len, enc_txl, enc_params, attn_mask=enc_attn_mask, mems=mems)
def decoder_forward(self, dec_inp, dec_len, dec_txl, dec_params, mems=None):
bsz, qlen = dec_inp.size(0), dec_inp.size(1)
mlen = mems[0].size(1) if mems is not None else 0
klen = mlen + qlen
ones = torch.ones((qlen, klen), dtype=torch.uint8, device=dec_inp.device, requires_grad=False)
if self.same_length:
mask_len = klen - self.tgt_mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (1 - (torch.triu(ones, diagonal=1+mlen) + torch.tril(ones, -mask_shift_len))).unsqueeze(0)
else:
dec_attn_mask = (1 - torch.triu(ones, diagonal=1+mlen)).unsqueeze(0)
return self._forward(dec_inp, dec_len, dec_txl, dec_params, attn_mask=dec_attn_mask, mems=mems)
def increase_input_size(self, config):
old_p_enc_dim, old_g_enc_dim = self.get_enc_dim()
super(TXL, self).increase_input_size(config)
new_p_enc_dim, new_g_enc_dim = self.get_enc_dim()
# increase predict network
if self.add_enc and (new_g_enc_dim != old_g_enc_dim or new_p_enc_dim != old_p_enc_dim):
self.predict_net.increase_input_size(
self.predict_net.pattern_dim+new_p_enc_dim-old_p_enc_dim,
self.predict_net.graph_dim+new_g_enc_dim-old_g_enc_dim)
def increase_net(self, config):
p_emb_dim, g_emb_dim = self.get_emb_dim()
g_net, g_dim = self.create_net(
name="graph", input_dim=self.d_model, num_layers=config["txl_graph_num_layers"],
d_model=self.d_model, d_inner=config["txl_d_inner"],
n_head=config["txl_n_head"], d_head=config["txl_d_head"],
tgt_len=self.tgt_len, ext_len=self.ext_len, mem_len=self.mem_len,
attn_type=self.attn_type, pre_lnorm=config["txl_pre_lnorm"],
act_func=self.act_func, dropout=self.dropout, dropatt=self.dropout)
assert len(g_net) >= len(self.g_net)
with torch.no_grad():
for old_g_rnn, new_g_rnn in zip(self.g_net, g_net):
new_g_rnn.load_state_dict(old_g_rnn.state_dict())
del self.g_net
self.g_net = g_net
if self.share_arch:
self.p_net = self.g_net
else:
p_net, p_dim = self.create_net(
name="pattern", input_dim=self.d_model, num_layers=config["txl_pattern_num_layers"],
d_model=self.d_model, d_inner=config["txl_d_inner"],
n_head=config["txl_n_head"], d_head=config["txl_d_head"],
tgt_len=self.tgt_len, ext_len=self.ext_len, mem_len=self.mem_len,
attn_type=self.attn_type, pre_lnorm=config["txl_pre_lnorm"],
act_func=self.act_func, dropout=self.dropout, dropatt=self.dropout)
assert len(p_net) >= len(self.p_net)
with torch.no_grad():
for old_p_rnn, new_p_rnn in zip(self.p_net, p_net):
new_p_rnn.load_state_dict(old_p_rnn.state_dict())
del self.p_net
self.p_net = p_net
g_params = self.create_params(
num_layers=config["txl_graph_num_layers"], attn_type=self.attn_type,
n_head=config["txl_d_head"], d_head=config["txl_d_head"], max_tgt_len=self.max_tgt_len)
with torch.no_grad():
for k in self.g_params:
g_params[k].data.copy_(self.g_params[k])
del self.g_params
self.g_params = g_params
if self.share_arch:
self.p_params = self.g_params
else:
p_params = self.g_params if self.share_arch else self.create_params(
num_layers=config["txl_pattern_num_layers"], attn_type=self.attn_type,
n_head=config["txl_d_head"], d_head=config["txl_d_head"], max_tgt_len=self.max_tgt_len)
with torch.no_grad():
for k in self.p_params:
p_params[k].data.copy_(self.p_params[k])
del self.p_params
self.p_params = p_params
def forward(self, pattern, pattern_len, graph, graph_len):
# data, target, *mems
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
bsz = pattern_len.size(0)
gate = self.get_filter_gate(pattern, pattern_len, graph, graph_len)
zero_mask = (gate == 0).unsqueeze(-1) if gate is not None else None
pattern_emb, graph_emb = self.get_emb(pattern, pattern_len, graph, graph_len)
if zero_mask is not None:
graph_emb.masked_fill_(zero_mask, 0.0)
pattern_emb = self.p_emb_proj(pattern_emb).mul_(self.emb_scale)
graph_emb = self.g_emb_proj(graph_emb).mul_(self.emb_scale)
pattern_segments = segment_data(pattern_emb, self.tgt_len)
pattern_seg_lens = segment_length(pattern_len, self.tgt_len)
graph_segments = segment_data(graph_emb, self.tgt_len)
graph_seg_lens = segment_length(graph_len, self.tgt_len)
pattern_outputs = list()
for i, (pattern_seg, pattern_seg_len) in enumerate(zip(pattern_segments, pattern_seg_lens)):
if i == 0:
pattern_mems = self.init_mems(len(self.p_net), pattern_seg)
pattern_output, pattern_mems = self.encoder_forward(pattern_seg, pattern_seg_len, self.p_net, self.p_params, mems=pattern_mems)
pattern_outputs.append(pattern_output)
pattern_output = torch.cat(pattern_outputs, dim=1)[:,:pattern_emb.size(1)]
# some segments may only have padded elements, we need to set them as 0 manually
pattern_mask = (batch_convert_len_to_mask(pattern_len, max_seq_len=pattern_output.size(1))==0).unsqueeze(-1)
pattern_output.masked_fill_(pattern_mask, 0.0)
graph_outputs = list()
for i, (graph_seg, graph_seg_len) in enumerate(zip(graph_segments, graph_seg_lens)):
if i == 0:
graph_mems = self.init_mems(len(self.g_net), graph_seg)
graph_output, graph_mems = self.encoder_forward(graph_seg, graph_seg_len, self.g_net, self.g_params, mems=graph_mems)
graph_outputs.append(graph_output)
graph_output = torch.cat(graph_outputs, dim=1)[:,:graph_emb.size(1)]
# some segments may only have padded elements, we need to set them as 0 manually
graph_mask = (batch_convert_len_to_mask(graph_len, max_seq_len=graph_output.size(1))==0).unsqueeze(-1)
graph_output.masked_fill_(graph_mask, 0.0)
if self.add_enc:
pattern_enc, graph_enc = self.get_enc(pattern, pattern_len, graph, graph_len)
if zero_mask is not None:
graph_enc.masked_fill_(zero_mask, 0.0)
pattern_output = torch.cat([pattern_enc, pattern_output], dim=2)
graph_output = torch.cat([graph_enc, graph_output], dim=2)
pred = self.predict_net(pattern_output, pattern_len, graph_output, graph_len)
return pred
|
11533114
|
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from ..dummymodel import DummyModel
class Command(BaseCommand):
"""Django command to create Initial Data"""
def handle(self, *args, **options):
# Cant generate a permission without a class
content_type = ContentType.objects.get_for_model(DummyModel)
# Create permissions from list
PERMISSION_LIST = []
for permission in PERMISSION_LIST:
Permission.objects.get_or_create(codename=permission, name=permission.title(), content_type=content_type)
PERMISSION_GROUP_RELATION = [
]
for relation in PERMISSION_GROUP_RELATION:
group, created = Group.objects.get_or_create(name=relation['group'])
for permission_codename in relation['permissions']:
permission = Permission.objects.get(codename=permission_codename)
group.permissions.add(permission)
|
11533127
|
import pytest
import datetime
import time
import base64
from cryptoauthlib import *
from cryptoauthlib.library import load_cryptoauthlib
from cryptoauthlib_mock import atcab_mock
__config = cfg_ateccx08a_kithid_default()
def pretty_print_hex(a, l=16, indent=''):
"""
Format a list/bytes/bytearray object into a formatted ascii hex string
"""
s = ''
a = bytearray(a)
for x in range(0, len(a), l):
s += indent + ''.join(['%02X ' % y for y in a[x:x+l]]) + '\n'
return s
def pubnums_to_bytes(pub_nums):
return bytes(bytearray.fromhex('%064X%064X' % (pub_nums.x, pub_nums.y)))
@pytest.fixture
def test_jwt_init():
"""
Run tests against the library mock
"""
load_cryptoauthlib(atcab_mock())
@pytest.fixture
def test_jwt_init_live(test_init_with_device):
"""
Use real hardware for these tests - otherwise skip
"""
load_cryptoauthlib()
if Status.ATCA_SUCCESS != atcab_init(__config):
raise Exception('Unable to connect to a device')
# Check device type
info = bytearray(4)
assert Status.ATCA_SUCCESS == atcab_info(info)
dev_type = get_device_type_id(get_device_name(info))
if dev_type != __config.devtype:
__config.devtype = dev_type
assert Status.ATCA_SUCCESS == atcab_release()
time.sleep(1)
assert Status.ATCA_SUCCESS == atcab_init(__config)
@pytest.mark.parametrize("slot, config", [
pytest.param(0, None, id='Normal'),
pytest.param(0, __config, id='Init/Reinit'),
])
def test_jwt_round_trip_ec_qa(test_jwt_init_live, slot, config):
"""
Test JWT with an asymetric key (Elliptic Curve: SECP256r1)
"""
# Load device public key
public_key = bytearray(64)
assert Status.ATCA_SUCCESS == atcab_get_pubkey(0, public_key)
# Convert to the key to PEM format
public_key_pem = bytearray.fromhex('3059301306072A8648CE3D020106082A8648CE3D03010703420004') + public_key
public_key_pem = '-----BEGIN PUBLIC KEY-----\n' + base64.b64encode(public_key_pem).decode('ascii') + '\n-----END PUBLIC KEY-----'
claims = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# A Dummy/Test Audience to verify against
'aud': 'test_audience'
}
token = PyJWT(slot, config)
encoded = token.encode(claims, public_key_pem, algorithm='ES256')
# If the audience does not match or the signature fails to verify the following will raise an exception
decoded = token.decode(encoded, public_key_pem, audience=claims['aud'], algorithms=['ES256'])
assert claims == decoded
@pytest.mark.parametrize("slot, config", [
pytest.param(1, None, id='Normal'),
pytest.param(1, __config, id='Init/Reinit'),
])
def test_jwt_round_trip_hmac_qa(test_jwt_init_live, slot, config):
"""
Check JWT with a symmetric key (SHA256 based HMAC)
"""
# Set write key
write_key = bytearray([<KEY>
0xd8, 0x22, 0xc0, 0x13, 0xfc, 0xc3, 0x23, 0x84,
0x5d, 0x1b, 0x56, 0x9f, 0xe7, 0x05, 0xb6, 0x00,
0x06, 0xfe, 0xec, 0x14, 0x5a, 0x0d, 0xb1, 0xe3])
assert Status.ATCA_SUCCESS == atcab_write_zone(2, 4, 0, 0, write_key, 32);
# Write HMAC key
hmac_key = bytearray([0x73, 0x16, 0xe9, 0x64, 0x2b, 0x38, 0xfb, 0xad,
0x5d, 0xb7, 0x0a, 0x1b, 0x33, 0xf0, 0xdc, 0xb9,
0x4c, 0x35, 0x5e, 0x78, 0xd7, 0xf0, 0x00, 0xa9,
0xb3, 0x19, 0x41, 0xa0, 0x36, 0x0d, 0x09, 0x61])
assert Status.ATCA_SUCCESS == atcab_write_enc(slot, 0, hmac_key, write_key, 4);
claims = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# A Dummy/Test Audience to verify against
'aud': 'test_audience'
}
token = PyJWT(slot, config)
encoded = token.encode(claims, b'', algorithm='HS256')
# If the audience does not match or the signature fails to verify the following will raise an exception
decoded = token.decode(encoded, bytes(hmac_key), audience=claims['aud'], algorithms=['HS256'])
assert claims == decoded
|
11533130
|
from django.contrib import admin
from .models import Document, Create_page
# Register your models here.
admin.site.register(Document)
admin.site.register(Create_page)
|
11533176
|
def up(config, database, semester, course):
if not database.table_has_column('electronic_gradeable', 'eg_limited_access_blind'):
database.execute('ALTER TABLE electronic_gradeable ADD COLUMN IF NOT EXISTS eg_limited_access_blind INTEGER DEFAULT 1')
database.execute('ALTER TABLE electronic_gradeable ADD COLUMN IF NOT EXISTS eg_peer_blind INTEGER DEFAULT 3')
def down(config, database, semester, course):
pass
|
11533179
|
from select2_foreign_key import test_functional
from .models import TModel
class AdminForeignKeyTestCase(test_functional.AdminForeignKeyTestCase):
model = TModel
|
11533214
|
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
from six.moves import range
def true_range(close_data, period):
"""
True Range.
Formula:
TRt = MAX(abs(Ht - Lt), abs(Ht - Ct-1), abs(Lt - Ct-1))
"""
catch_errors.check_for_period_error(close_data, period)
tr = [np.max([np.max(close_data[idx+1-period:idx+1]) -
np.min(close_data[idx+1-period:idx+1]),
abs(np.max(close_data[idx+1-period:idx+1]) -
close_data[idx-1]),
abs(np.min(close_data[idx+1-period:idx+1]) -
close_data[idx-1])]) for idx in range(period-1, len(close_data))]
tr = fill_for_noncomputable_vals(close_data, tr)
return tr
|
11533251
|
from loudml import errors
import requests
from urllib.parse import urlencode
DEFAULT_REQUEST_TIMEOUT = 5
def perform_request(
base_url,
method,
url,
session,
params=None,
body=None,
timeout=None,
ignore=(),
headers=None
):
url = base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
request = requests.Request(
method=method, headers=headers, url=url, json=body)
prepared_request = session.prepare_request(request)
settings = session.merge_environment_settings(
prepared_request.url, {}, None, None, None)
send_kwargs = {'timeout': timeout}
send_kwargs.update(settings)
try:
response = session.send(prepared_request, **send_kwargs)
except Exception as e:
if isinstance(e, requests.exceptions.SSLError):
raise errors.SSLError('N/A', str(e), e)
if isinstance(e, requests.Timeout):
raise errors.ConnectionTimeout('TIMEOUT', str(e), e)
raise errors.ConnectionError('N/A', str(e), e)
return response
def perform_data_request(
base_url,
method,
url,
session,
params=None,
body=None,
timeout=None,
ignore=(),
headers=None
):
url = base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
request = requests.Request(
method=method, headers=headers, url=url, data=body)
prepared_request = session.prepare_request(request)
settings = session.merge_environment_settings(
prepared_request.url, {}, None, None, None)
send_kwargs = {'timeout': timeout}
send_kwargs.update(settings)
try:
response = session.send(prepared_request, **send_kwargs)
except Exception as e:
if isinstance(e, requests.exceptions.SSLError):
raise errors.SSLError('N/A', str(e), e)
if isinstance(e, requests.Timeout):
raise errors.ConnectionTimeout('TIMEOUT', str(e), e)
raise errors.ConnectionError('N/A', str(e), e)
return response
|
11533256
|
from __future__ import absolute_import
"""
Collection of physical constants and conversion factors.
Most constants are in SI units, so you can do
print('10 mile per minute is', 10*mile/minute, 'm/s or',
10*mile/(minute*knot), 'knots')
The list is not meant to be comprehensive, but just a convenient list for
everyday use.
"""
"""
BasSw 2006
physical constants: imported from CODATA
unit conversion: see e.g. NIST special publication 811
Use at own risk: double-check values before calculating your Mars
orbit-insertion burn.
Some constants exist in a few variants, which are marked with suffixes.
The ones without any suffix should be the most common one.
"""
import math as _math
from .codata import value as _cd
# mathematical constants
pi = _math.pi
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
# SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
# binary prefixes
kibi = 2 ** 10
mebi = 2 ** 20
gibi = 2 ** 30
tebi = 2 ** 40
pebi = 2 ** 50
exbi = 2 ** 60
zebi = 2 ** 70
yobi = 2 ** 80
# physical constants
c = speed_of_light = _cd('speed of light in vacuum')
mu_0 = 4e-7 * pi
epsilon_0 = 1 / (mu_0 * c * c)
h = Planck = _cd('Planck constant')
hbar = h / (2 * pi)
G = gravitational_constant = _cd('Newtonian constant of gravitation')
g = _cd('standard acceleration of gravity')
e = elementary_charge = _cd('elementary charge')
R = gas_constant = _cd('molar gas constant')
alpha = fine_structure = _cd('fine-structure constant')
N_A = Avogadro = _cd('Avogadro constant')
k = Bolzmann = _cd('Boltzmann constant')
sigma = Stefan_Bolzmann = _cd('Stefan-Boltzmann constant')
Wien = _cd('Wien displacement law constant')
Rydberg = _cd('Rydberg constant')
# weight in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain # avoirdupois
oz = ounce = pound / 16
stone = 14 * pound
long_ton = 2240 * pound
short_ton = 2000 * pound
troy_ounce = 480 * grain # only for metals / gems
troy_pound = 12 * troy_ounce
carat = 200e-6
m_e = electron_mass = _cd('electron mass')
m_p = proton_mass = _cd('proton mass')
m_n = neutron_mass = _cd('neutron mass')
m_u = u = atomic_mass = _cd('atomic mass constant')
# angle in rad
degree = pi / 180
arcmin = arcminute = degree / 60
arcsec = arcsecond = arcmin / 60
# time in second
minute = 60.0
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
Julian_year = 365.25 * day
# length in meter
inch = 0.0254
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
mil = inch / 1000
pt = point = inch / 72 # typography
survey_foot = 1200.0 / 3937
survey_mile = 5280 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870691.0
light_year = Julian_year * c
parsec = au / arcsec
# pressure in pascal
atm = atmosphere = _cd('standard atmosphere')
bar = 1e5
torr = mmHg = atm / 760
psi = pound * g / (inch * inch)
# area in meter**2
hectare = 1e4
acre = 43560 * foot ** 2
# volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231 * inch ** 3 # US
# pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42 * gallon_US # for oil
gallon_imp = 4.54609e-3 # uk
fluid_ounce_imp = gallon_imp / 160
# speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
# approx value at 15 degrees in 1 atm. is this a common value?
mach = speed_of_sound = 340.5
knot = nautical_mile / hour
# temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1 / 1.8 # only for differences
# energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
# Wh = watt_hour
# power in watt
hp = horsepower = 550 * foot * pound * g
# force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
# functions for conversions that are not linear
def C2K(C):
"""Convert Celsius to Kelvin"""
return C + zero_Celsius
def K2C(K):
"""Convert Kelvin to Celsius"""
return K - zero_Celsius
def F2C(F):
"""Convert Fahrenheit to Celsius"""
return (F - 32) / 1.8
def C2F(C):
"""Convert Celsius to Fahrenheit"""
return 1.8 * C + 32
def F2K(F):
"""Convert Fahrenheit to Kelvin"""
return C2K(F2C(F))
def K2F(K):
"""Convert Kelvin to Fahrenheit"""
return C2F(K2C(K))
# optics
def lambda2nu(lambda_):
"""Convert wavelength to optical frequency"""
return c / lambda_
def nu2lambda(nu):
"""Convert optical frequency to wavelength"""
return c / nu
|
11533280
|
import glob
import os.path as osp
import numpy as np
from vedacore import fileio
from vedacore.image import imread
from vedacore.misc import registry
from .custom import CustomDataset
@registry.register_module('dataset')
class Thumos14Dataset(CustomDataset):
"""Thumos14 dataset for temporal action detection."""
CLASSES = ('BaseballPitch', 'BasketballDunk', 'Billiards', 'CleanAndJerk',
'CliffDiving', 'CricketBowling', 'CricketShot', 'Diving',
'FrisbeeCatch', 'GolfSwing', 'HammerThrow', 'HighJump',
'JavelinThrow', 'LongJump', 'PoleVault', 'Shotput',
'SoccerPenalty', 'TennisSwing', 'ThrowDiscus',
'VolleyballSpiking')
def __init__(self, **kwargs):
super(Thumos14Dataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from Thumos14 json ann_file.
Args:
ann_file (str): Path of JSON file.
Returns:
list[dict]: Annotation info from JSON file.
"""
data_infos = []
data = fileio.load(ann_file)
for video_name, video_info in data['database'].items():
data_info = dict()
data_info['video_name'] = video_name
data_info['duration'] = float(video_info['duration'])
imgfiles = glob.glob(osp.join(self.video_prefix, video_name, '*'))
num_imgs = len(imgfiles)
data_info['frames'] = num_imgs
data_info['fps'] = int(round(num_imgs / video_info['duration']))
img = imread(imgfiles[0])
data_info['height'], data_info['width'] = img.shape[:2]
segments = []
labels = []
segments_ignore = []
for ann in video_info['annotations']:
label = ann['label']
segment = ann['segment']
if not self.test_mode:
segment[0] = min(video_info['duration'],
max(0, segment[0]))
segment[1] = min(video_info['duration'],
max(0, segment[1]))
if segment[0] >= segment[1]:
continue
if label == 'Ambiguous':
segments_ignore.append(segment)
elif label in self.CLASSES:
segments.append(segment)
labels.append(self.CLASSES.index(label))
else:
continue
if not segments:
segments = np.zeros((0, 2))
labels = np.zeros((0, ))
else:
segments = np.array(segments)
labels = np.array(labels)
if not segments_ignore:
segments_ignore = np.zeros((0, 2))
else:
segments_ignore = np.array(segments_ignore)
data_info['ann'] = dict(
segments=segments.astype(np.float32),
labels=labels.astype(np.int64),
segments_ignore=segments_ignore.astype(np.float32))
data_infos.append(data_info)
return data_infos
|
11533431
|
from vulkan import vk, helpers as hvk
from enum import IntFlag
from functools import lru_cache
from ctypes import memmove, byref, c_void_p, POINTER
import weakref
class MemoryManager(object):
def __init__(self, engine):
self.engine = engine
self.memory_info = {}
self.allocations = []
self._setup_memory_info()
def free(self):
_, api, device = self.ctx
for alloc in self.allocations:
hvk.free_memory(api, device, alloc.device_memory)
del self.engine
@property
def ctx(self):
ctx = self.engine
api, device = ctx.api, ctx.device
return ctx, api, device
def alloc(self, resource, resource_type, types):
_, api, device = self.ctx
requirements = self.get_resource_requirements(resource, resource_type)
memory_type_index = self._get_memory_type_index(types)
device_memory = hvk.allocate_memory(api, device, hvk.memory_allocate_info(
allocation_size = requirements.size,
memory_type_index = memory_type_index
))
if resource_type == vk.STRUCTURE_TYPE_IMAGE_CREATE_INFO:
hvk.bind_image_memory(api, device, resource, device_memory)
elif resource_type == vk.STRUCTURE_TYPE_BUFFER_CREATE_INFO:
hvk.bind_buffer_memory(api, device, resource, device_memory, 0)
else:
raise ValueError("value of argument \"resource_type\" must be STRUCTURE_TYPE_IMAGE_CREATE_INFO or STRUCTURE_TYPE_BUFFER_CREATE_INFO")
alloc = Alloc(resource, device_memory, requirements.size)
self.allocations.append(alloc)
return weakref.proxy(alloc)
def shared_alloc(self, size, types):
_, api, device = self.ctx
memory_type_index = self._get_memory_type_index(types)
device_memory = hvk.allocate_memory(api, device, hvk.memory_allocate_info(
allocation_size = size,
memory_type_index = memory_type_index
))
alloc = SharedAlloc(device_memory, size)
self.allocations.append(alloc)
return weakref.proxy(alloc)
def free_alloc(self, alloc):
_, api, device = self.ctx
hvk.free_memory(api, device, alloc.device_memory)
self.allocations.remove(alloc)
def map_alloc(self, alloc, offset=None, size=None):
engine, api, device = self.ctx
offset = offset or 0
size = size or alloc.size
pointer = hvk.map_memory(api, device, alloc.device_memory, offset, size)
unmap = lambda: hvk.unmap_memory(api, device, alloc.device_memory)
return MappedDeviceMemory(alloc, pointer, unmap)
def get_resource_requirements(self, resource, resource_type):
_, api, device = self.ctx
requirements = None
if resource_type == vk.STRUCTURE_TYPE_IMAGE_CREATE_INFO:
requirements = hvk.image_memory_requirements(api, device, resource)
elif resource_type == vk.STRUCTURE_TYPE_BUFFER_CREATE_INFO:
requirements = hvk.buffer_memory_requirements(api, device, resource)
else:
raise ValueError("value of argument \"resource_type\" must be STRUCTURE_TYPE_IMAGE_CREATE_INFO or STRUCTURE_TYPE_BUFFER_CREATE_INFO")
return requirements
def _setup_memory_info(self):
ctx = self.engine
api, physical_device = ctx.api, ctx.physical_device
props = hvk.physical_device_memory_properties(api, physical_device)
types = props.memory_types[:props.memory_type_count]
heaps = props.memory_heaps[:props.memory_heap_count]
self.memory_info["memory_types"] = types
self.memory_info["memory_heaps"] = heaps
@lru_cache(maxsize=None, typed=False)
def _get_memory_type_index(self, memory_type_flags):
memory_types = self.memory_info["memory_types"]
for type_index, memory_type in enumerate(memory_types):
memory_type_properties = hvk.MemoryPropertyFlag(memory_type.property_flags)
for memory_type_flag in memory_type_flags:
if hvk.MemoryPropertyFlag(memory_type_flag) in memory_type_properties:
return type_index
raise ValueError(f"No memory type matches the requested flags: {memory_type_flags}")
class Alloc(object):
__slots__ = ("resource", "device_memory", "size", "__weakref__")
def __init__(self, resource, device_memory, size):
self.resource = resource
self.device_memory = device_memory
self.size = size
class SharedAlloc(object):
__slots__ = ("device_memory", "size", "__weakref__")
def __init__(self, device_memory, size):
self.device_memory = device_memory
self.size = size
class MappedDeviceMemory(object):
__slots__ = ("alloc", "pointer", "pointer2", "unmap")
def __init__(self, alloc, pointer, unmap):
self.alloc = alloc
self.pointer = pointer
self.pointer2 = pointer.value
self.unmap = unmap
def write_bytes(self, offset, data):
offset_pointer = self.pointer2 + offset
memmove(offset_pointer, byref(data), len(data))
def write_typed_data(self, src, offset):
dst = (type(src)*1).from_address(self.pointer2 + offset)
dst[0] = src
def __enter__(self):
return self
def __exit__(self, *args):
self.unmap()
|
11533455
|
import django.db.models.deletion
from django.db import migrations, models
def build_spl_migrations(APP_NAME, SVC_NAME, SPL_NAME, AFFECTED_MODELS):
def fill_project_and_service(apps, schema_editor):
for model_name in AFFECTED_MODELS:
model = apps.get_model(APP_NAME, model_name)
for obj in model.objects.all():
obj.project = obj.service_project_link.project
obj.service_settings = obj.service_project_link.service.settings
obj.save(update_fields=['project', 'service_settings'])
ADD_OPERATIONS = []
ALTER_OPERATIONS = []
for model_name in AFFECTED_MODELS:
ADD_OPERATIONS += [
migrations.AddField(
model_name=model_name,
name='project',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.Project',
),
),
migrations.AddField(
model_name=model_name,
name='service_settings',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
]
ALTER_OPERATIONS += [
migrations.AlterField(
model_name=model_name,
name='project',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.Project',
),
),
migrations.AlterField(
model_name=model_name,
name='service_settings',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
migrations.RemoveField(model_name=model_name, name='service_project_link',),
]
return (
ADD_OPERATIONS
+ [migrations.RunPython(fill_project_and_service)]
+ ALTER_OPERATIONS
+ [
migrations.AlterUniqueTogether(name=SPL_NAME, unique_together=None,),
migrations.AlterUniqueTogether(name=SVC_NAME, unique_together=None,),
migrations.DeleteModel(name=SPL_NAME),
migrations.DeleteModel(name=SVC_NAME),
]
)
|
11533464
|
from threading import Lock
from polog.handlers.file.locks.abstract_single_lock import AbstractSingleLock
class ThreadLock(AbstractSingleLock):
"""
Обертка вокруг обычного тред-лока (см. https://en.wikipedia.org/wiki/Lock_(computer_science)).
Предназначена, чтобы сделать лок отключаемым.
"""
def __init__(self, on=True):
if not on:
self.off()
else:
self.lock = Lock()
def acquire(self):
"""
Взять лок.
"""
self.lock.acquire()
def release(self):
"""
Отпустить лок.
"""
self.lock.release()
|
11533469
|
from Spheral3d import *
from MedialGenerator import *
from CompositeNodeDistribution import *
from SpheralTestUtilities import *
from VoronoiDistributeNodes import distributeNodes3d as distributeNodes
from siloPointmeshDump import *
commandLine(hmin = 1e-5,
hmax = 1e6,
rhoscale = 0.5,
n1 = 1000,
n2 = 1000,
nPerh = 2.01,
maxIterations = 200,
fracTol = 1e-3)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
gamma = 1.4
mu = 2.0
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
output("WT")
#-------------------------------------------------------------------------------
# Make the NodeLists.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
hminratio = 1.0,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodes2 = makeFluidNodeList("nodes2", eos,
hmin = hmin,
hmax = hmax,
hminratio = 1.0,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodeSet = [nodes1, nodes2]
for nodes in nodeSet:
output("nodes.name")
output(" nodes.hmin")
output(" nodes.hmax")
output(" nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Make some interesting boundaries for each of our NodeLists and generators.
#-------------------------------------------------------------------------------
# The inner cube.
bcpoints = vector_of_Vector()
for p in [(1,1,1), (1,2,1), (2,1,1), (2,2,1),
(1,1,2), (1,2,2), (2,1,2), (2,2,2)]:
bcpoints.append(Vector(*p))
innerBoundary = Polyhedron(bcpoints) # Builds the convex hull
# The outer cube.
bcpoints = vector_of_Vector()
for p in [(0,0,0), (0,3,0), (3,0,0), (3,3,0),
(0,0,3), (0,3,3), (3,0,3), (3,3,3)]:
bcpoints.append(Vector(*p))
outerBoundary = Polyhedron(bcpoints) # Builds the convex hull
#-------------------------------------------------------------------------------
# Generate them nodes.
#-------------------------------------------------------------------------------
def rhoprofile1(posi):
r = (posi - Vector(1.5,1.5)).magnitude()
return exp(-r*r/(rhoscale*rhoscale))
print "Generator 1"
generator1 = MedialGenerator3d(n = n1,
rho = 1.0,
boundary = innerBoundary,
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes1_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
print "Generator 2"
generator2 = MedialGenerator3d(n = n2,
rho = 1.0,
boundary = outerBoundary,
holes = [innerBoundary],
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes2_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
distributeNodes((nodes1, generator1),
(nodes2, generator2))
#-------------------------------------------------------------------------------
# Drop a viz file for inspection.
#-------------------------------------------------------------------------------
Hfield = nodes.Hfield()
HfieldInv = SymTensorField("H inverse", nodes)
for i in xrange(nodes.numNodes):
HfieldInv[i] = Hfield[i].Inverse()
vizfile = siloPointmeshDump(baseName = "test_medial3d_maxiter=%i_tol=%g" % (maxIterations, fracTol),
baseDirectory = "test_medial3d",
fields = ([x.massDensity() for x in nodeSet] +
[x.mass() for x in nodeSet] +
[x.velocity() for x in nodeSet] +
[x.specificThermalEnergy() for x in nodeSet] +
[x.Hfield() for x in nodeSet])
)
|
11533527
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.Geometry.GeometryExtended2015Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2015_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/Generator_cff')
process.load('Configuration/StandardSequences/VtxSmearedNoSmear_cff')
process.load('Configuration/StandardSequences/SimExtended_cff')
process.load('Configuration/StandardSequences/EndOfProcess_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(2112),
MinEta = cms.double(5.5),
MaxEta = cms.double(10000),
MinPhi = cms.double(-3.14159265359), ## in radians
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(2499.99),
MaxE = cms.double(2500.01)
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('single neutron E 2.5 TeV'),
AddAntiParticle = cms.bool(False),
firstRun = cms.untracked.uint32(1)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
fileName = cms.untracked.string('simevent.root'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Special settings
process.g4SimHits.UseMagneticField = cms.bool(False)
process.g4SimHits.Generator.MinEtaCut = cms.double(-9.0)
process.g4SimHits.Generator.MaxEtaCut = cms.double(9.0)
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('ZdcTestAnalysis'),
ZdcTestAnalysis = cms.PSet(
Verbosity = cms.int32(0),
StepNtupleFlag = cms.int32(0),
EventNtupleFlag = cms.int32(1),
StepNtupleFileName = cms.string('stepNtuple.root'),
EventNtupleFileName = cms.string('eventNtuple.root')
)
))
process.g4SimHits.ZdcSD.UseShowerLibrary = cms.bool(True)
# Path and EndPath definitions
process.generation_step = cms.Path(process.ProductionFilterSequence+process.pgen)
process.smearing = cms.Path(process.VtxSmeared+process.generatorSmeared)
process.simulation_step = cms.Path(process.psim)
process.endjob_step = cms.Path(process.endOfProcess)
process.out_step = cms.EndPath(process.output)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.smearing,process.simulation_step,process.endjob_step,process.out_step)
def customise(process):
#Adding SimpleMemoryCheck service:
process.SimpleMemoryCheck=cms.Service("SimpleMemoryCheck",
ignoreTotal=cms.untracked.int32(1),
oncePerEventMode=cms.untracked.bool(True))
#Adding Timing service:
process.Timing=cms.Service("Timing")
#Tweak Message logger to dump G4cout and G4cerr messages in G4msg.log
#print process.MessageLogger.__dict__
process.MessageLogger.debugModules=cms.untracked.vstring('g4SimHits')
#Configuring the G4msg.log output
process.MessageLogger.files = dict(G4msg = cms.untracked.PSet(
noTimeStamps = cms.untracked.bool(True)
#First eliminate unneeded output
,threshold = cms.untracked.string('INFO')
,INFO = cms.untracked.PSet(limit = cms.untracked.int32(0))
,DEBUG = cms.untracked.PSet(limit = cms.untracked.int32(0))
,FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(0))
,FwkSummary = cms.untracked.PSet(limit = cms.untracked.int32(0))
,Root_NoDictionary = cms.untracked.PSet(limit = cms.untracked.int32(0))
,FwkJob = cms.untracked.PSet(limit = cms.untracked.int32(0))
,TimeReport = cms.untracked.PSet(limit = cms.untracked.int32(0))
,TimeModule = cms.untracked.PSet(limit = cms.untracked.int32(0))
,TimeEvent = cms.untracked.PSet(limit = cms.untracked.int32(0))
,MemoryCheck = cms.untracked.PSet(limit = cms.untracked.int32(0))
#TimeModule, TimeEvent, TimeReport are written to LogAsbolute instead of LogInfo with a category
#so they cannot be eliminated from any destination (!) unless one uses the summaryOnly option
#in the Timing Service... at the price of silencing the output needed for the TimingReport profiling
#
#Then add the wanted ones:
,PhysicsList = cms.untracked.PSet(limit = cms.untracked.int32(-1))
,G4cout = cms.untracked.PSet(limit = cms.untracked.int32(-1))
,G4cerr = cms.untracked.PSet(limit = cms.untracked.int32(-1))
,CaloSim = cms.untracked.PSet(limit = cms.untracked.int32(-1))
,ForwardSim = cms.untracked.PSet(limit = cms.untracked.int32(-1))
)
)
#Add these 3 lines to put back the summary for timing information at the end of the logfile
#(needed for TimeReport report)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# process.g4SimHits.G4Commands = cms.vstring('/tracking/verbose 1')
return(process)
# End of customisation function definition
process = customise(process)
|
11533567
|
from immudb.client import ImmudbClient
import string
import random
import itertools
import time
import multiprocessing
SIZE = 1000000
CHUNKSIZE = 1000
def chunked(it, size):
it = iter(it)
while True:
p = dict(itertools.islice(it, size))
if not p:
break
yield p
def massive_test(taskid: int):
ic = ImmudbClient()
ic.login("immudb", "immudb")
# let's fill a big dictionary:
big_dict = {}
for i in range(0, SIZE):
big_dict["verymassif:{:08X}".format(i).encode(
'utf8')] = "value:{:08f}".format(random.random()).encode('utf8')
# now we put all the key/value pairs in immudb
written = 0
t0 = time.time()
for chunk in chunked(big_dict.items(), CHUNKSIZE):
response = ic.setAll(chunk)
# the response holds the new index position of the merkele tree
assert type(response) != int
written += CHUNKSIZE
t1 = time.time()
print("TASK{}: {} keys written in {:3.2f} seconds".format(taskid, SIZE, t1-t0))
return t1-t0
plist = []
for i in range(0, 4):
p = multiprocessing.Process(target=massive_test, args=(i,))
p.start()
plist.append(p)
for p in plist:
p.join()
|
11533571
|
import os
import unittest
from rdflib import URIRef, Graph
from rdflib.namespace import OWL, RDFS, RDF
from linkml.generators.owlgen import OwlSchemaGenerator
from tests.utils.compare_rdf import compare_rdf
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
# Tests: https://github.com/biolink/biolinkml/issues/163
class IssueOWLNamespaceTestCase(TestEnvironmentTestCase):
env = env
def _test_owl(self, name: str) -> Graph:
self.env.generate_single_file(f'{name}.owl',
lambda: OwlSchemaGenerator(env.input_path(f'{name}.yaml'),
importmap=env.import_map).serialize(),
value_is_returned=True, comparator=compare_rdf)
g = Graph()
g.parse(env.expected_path(f'{name}.owl'), format="turtle")
return g
def test_issue_owl_namespace(self):
""" Make sure that types are generated as part of the output """
g = self._test_owl('issue_163')
A = URIRef('http://example.org/A')
self.assertIn((A, RDF.type, OWL.Class), g)
NAME = URIRef('http://example.org/name')
self.assertIn((NAME, RDF.type, OWL.ObjectProperty), g)
def test_issue_no_default(self):
""" Make sure that types are generated as part of the output """
g = self._test_owl('issue_163b')
A = URIRef('http://example.org/sample/example1/A')
self.assertIn((A, RDF.type, OWL.Class), g)
NAME = URIRef('http://example.org/sample/example1/name')
self.assertIn((NAME, RDF.type, OWL.ObjectProperty), g)
def test_aliases(self):
""" Make sure aliases work """
g = self._test_owl('issue_163c')
if __name__ == '__main__':
unittest.main()
|
11533597
|
import pytest
from eth.constants import UINT_256_MAX
from trinity.exceptions import OversizeObject
from trinity.utils.headers import sequence_builder
@pytest.mark.parametrize(
'start_num, max_length, skip, reverse, expected',
(
(0, 0, 0, False, ()),
(0, 0, 0, True, ()),
(0, 0, 1, False, ()),
(0, 0, 1, True, ()),
(0, 1, 0, False, (0, )),
(0, 1, 0, True, (0, )),
(0, 1, 1, False, (0, )),
(0, 1, 1, True, (0, )),
(9, 1, 0, False, (9, )),
(9, 1, 0, True, (9, )),
(1, 3, 0, False, (1, 2, 3)),
(0, 5, 1, False, (0, 2, 4, 6, 8)),
(9, 5, 1, True, (9, 7, 5, 3, 1)),
(1, 9, 0, True, (1, 0)),
(UINT_256_MAX - 1, 4, 0, False, (UINT_256_MAX - 1, UINT_256_MAX, )),
# can handle mildly large numbers
(400000000, 1000000, 0, False, tuple(range(400000000, 401000000))),
),
)
def test_sequence(start_num, max_length, skip, reverse, expected):
assert sequence_builder(start_num, max_length, skip, reverse) == expected
TOO_LONG = 2000000
@pytest.mark.parametrize('reverse', (True, False))
@pytest.mark.parametrize('start_num', (0, 400000000))
@pytest.mark.parametrize('skip', (0, 10000))
def test_oversize_sequence(start_num, skip, reverse):
# Instead of using the specific constant, just use a rough TOO_LONG number
# We don't need to worry about edge cases for this gut check
with pytest.raises(OversizeObject):
sequence_builder(start_num, TOO_LONG, skip, reverse)
|
11533607
|
from django.conf import settings
from corehq.util.io import ClosingContextProxy
from kafka import KafkaConsumer
from kafka.client import KafkaClient, SimpleClient
GENERIC_KAFKA_CLIENT_ID = 'cchq-kafka-client'
def get_simple_kafka_client(client_id=GENERIC_KAFKA_CLIENT_ID):
# this uses the old SimpleClient because we are using the old SimpleProducer interface
return ClosingContextProxy(SimpleClient(
hosts=settings.KAFKA_BROKERS,
client_id=client_id,
timeout=30, # seconds
))
def get_kafka_client(client_id=GENERIC_KAFKA_CLIENT_ID):
return ClosingContextProxy(KafkaClient(
bootstrap_servers=settings.KAFKA_BROKERS,
client_id=client_id,
api_version=settings.KAFKA_API_VERSION
))
def get_kafka_consumer():
return ClosingContextProxy(KafkaConsumer(
client_id='pillowtop_utils',
bootstrap_servers=settings.KAFKA_BROKERS,
))
|
11533618
|
from ..commandparser import Member
from ..discordbot import unmute_user
import discord
name = 'unmute'
channels = None
roles = ('helper', 'trialhelper')
args = '<member>'
async def run(message, member: Member):
'Removes a mute from a member'
await unmute_user(
member.id,
reason=f'Unmuted by {str(message.author)}'
)
await message.send(embed=discord.Embed(
description=f'<@{member.id}> has been unmuted.'
))
|
11533631
|
from fluent.syntax import ast as FTL
from . import resolver
class Compiler:
def __call__(self, item):
if isinstance(item, FTL.BaseNode):
return self.compile(item)
if isinstance(item, (tuple, list)):
return [self(elem) for elem in item]
return item
def compile(self, node):
nodename = type(node).__name__
if not hasattr(resolver, nodename):
return node
kwargs = vars(node).copy()
for propname, propvalue in kwargs.items():
kwargs[propname] = self(propvalue)
handler = getattr(self, 'compile_' + nodename, self.compile_generic)
return handler(nodename, **kwargs)
def compile_generic(self, nodename, **kwargs):
return getattr(resolver, nodename)(**kwargs)
def compile_Placeable(self, _, expression, **kwargs):
if isinstance(expression, resolver.Literal):
return expression
return resolver.Placeable(expression=expression, **kwargs)
def compile_Pattern(self, _, elements, **kwargs):
if (
len(elements) == 1 and
isinstance(elements[0], resolver.Placeable)
):
# Don't isolate isolated placeables
return resolver.NeverIsolatingPlaceable(elements[0].expression)
if any(
not isinstance(child, resolver.Literal)
for child in elements
):
return resolver.Pattern(elements=elements, **kwargs)
if len(elements) == 1:
return elements[0]
return resolver.TextElement(
''.join(child(None) for child in elements)
)
|
11533632
|
from unittest import TestCase
from unittest.mock import Mock, call, mock_open, patch
from ruamel.yaml import YAML
from conda_vendor.custom_manifest import CustomManifest, IBManifest
# "mymethod" in dir(dyn)
def test_custom_manifest():
assert "__init__" in dir(CustomManifest)
assert "read_meta_manifest" in dir(CustomManifest)
assert "write_custom_manifest" in dir(CustomManifest)
assert "format_custom_manifest" in dir(CustomManifest)
@patch("conda_vendor.custom_manifest.CustomManifest.read_meta_manifest")
def test_load_manifest(mock, tmp_path):
mock.return_value = "booboobeedoop"
test_manifest_path = tmp_path / "test_manifest.yml"
c = IBManifest(manifest_path=test_manifest_path)
mock.assert_called_once_with = [test_manifest_path]
assert "write_custom_manifest" in dir(IBManifest)
assert "format_custom_manifest" in dir(IBManifest)
@patch("conda_vendor.custom_manifest.CustomManifest.read_meta_manifest")
def test_write_custom_manifest(mock_read_meta_manifest, tmp_path):
mock_read_meta_manifest.return_value = None
custom_channel = IBManifest(manifest_path=tmp_path)
test_custom_manifest = {"foomanchu": True}
custom_channel.custom_manifest = test_custom_manifest
expected_custom_manifest = test_custom_manifest
test_output_path = tmp_path / "ironbank_manifest.yaml"
expected_custom_manifest_destination = test_output_path
custom_channel.write_custom_manifest(test_output_path)
with open(expected_custom_manifest_destination, "r") as f:
actual_custom_manifest = YAML(typ="safe").load(
f,
)
assert actual_custom_manifest == expected_custom_manifest
def test_IBManifest_strip_lead_underscore():
test_str = "_poobear"
expected_str = "poobear"
actual_result = IBManifest.strip_lead_underscore(test_str)
assert expected_str == actual_result
@patch("conda_vendor.custom_manifest.CustomManifest.read_meta_manifest")
def test_format_custom_manifest(mock):
test_meta_manifest = {
"main": {
"noarch": {"repodata_url": [], "entries": []},
"linux-64": {
"repodata_url": [],
"entries": [
{
"url": f"https://conda.anaconda.org/main/linux-64/brotlipy-0.7.0-py39h27cfd23_1003.tar.bz2",
"fn": "brotlipy",
"version": "0.7.0",
"channel": f"https://conda.anaconda.org/main/linux-64",
"sha256": "omega_yoyo",
}
],
},
},
"conda-forge": {
"noarch": {
"repodata_url": [],
"entries": [
{
"url": "https://conda.anaconda.org/conda-forge/noarch/ensureconda-1.4.1-pyhd8ed1ab_0.tar.bz2",
"fn": "ensureconda",
"version": "1.4.1",
"channel": "https://conda.anaconda.org/conda-forge/noarch",
"sha256": "yoyo",
}
],
},
"linux-64": {"repodata_url": [], "entries": []},
},
}
expected_iron_bank_manifest = {
"resources": [
{
"url": "https://conda.anaconda.org/main/linux-64/brotlipy-0.7.0-py39h27cfd23_1003.tar.bz2",
"filename": "brotlipy",
"validation": {"type": "sha256", "value": "omega_yoyo"},
},
{
"url": "https://conda.anaconda.org/conda-forge/noarch/ensureconda-1.4.1-pyhd8ed1ab_0.tar.bz2",
"filename": "ensureconda",
"validation": {"type": "sha256", "value": "yoyo"},
},
]
}
mock.return_value = test_meta_manifest
c = IBManifest()
actual_manifest = c.format_custom_manifest()
TestCase().assertDictEqual(actual_manifest, expected_iron_bank_manifest)
@patch("conda_vendor.custom_manifest.CustomManifest.read_meta_manifest")
def test_format_custom_manifest_exists(mock, tmp_path):
dummy_iron_bank_manifest = {"FOO": "ASWELL"}
expected_iron_bank_manifest = dummy_iron_bank_manifest
c = IBManifest(manifest_path=tmp_path)
c.custom_manifest = expected_iron_bank_manifest
actual_manifest = c.format_custom_manifest()
TestCase().assertDictEqual(actual_manifest, expected_iron_bank_manifest)
|
11533639
|
import numpy as np
import decimal
import random
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM, Dropout
from keras.utils import to_categorical
from keras import optimizers
from keras import metrics
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import TimeSeriesSplit
import pandas as pd
class Crypto_Trade(QCAlgorithm):
def Initialize(self):
#self.Debug("START: Initialize")
self.SetStartDate(2017,9,1) #Set Start Date
self.SetEndDate(2017,10,14) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Cash)
self.currency = "EURUSD"x
self.AddForex(self.currency,Resolution.Daily)
self.long_list =[]
self.model =Sequential()
self.x=0
#self.Debug("End: Initialize")
def OnData(self, data): #This function runs on every resolution of data mentioned.
#(eg if resolution = daily, it will run daily, if resolution = hourly, it will run hourly.)
#self.Debug("START: Ondata")
currency_data = self.History([self.currency], 10, Resolution.Daily) # Asking for last 10 days of data
self.Debug("History is : " + str(currency_data))
L= len(currency_data)
self.Debug("The length is " + str (L))
if not currency_data.empty: # Making sure the data is not empty and then only proceed with the algo
data = np.array([currency_data.close]) #Get the close prices and make an array
self.Debug("Close prices after making an array" + str(data))
#Data Preparation for input to LSTM
X1 = data[:,0:L-5] #(0 to 5 data)
self.Debug("X1 is " + str(X1))
X2 = data[:,1:L-4] #(1 to 6 data)
self.Debug("X2 is " + str(X2))
X3 = data[:,2:L-3] #(#2 to 7 data)
self.Debug("X3 is " + str(X3))
X= np.concatenate([X1,X2,X3],axis=0) # concatenate to join X1 X2 X3
self.Debug("X after concatenate: " + str(X))
X_data= np.transpose(X) # # transpose to get in the form [0,1,2],[1,2,3],[2,3,4],[3,4,5]...
self.Debug("X after transpose: " + str(X_data))
Y_data = np.transpose(data[:,3:L-2]) # to grt in form [ [3],[4],[5]....
self.Debug("Y : " + str(Y_data))
#Normalize the data
scaler = MinMaxScaler()
scaler.fit(X_data)
X_data = scaler.transform(X_data)
self.Debug("X after transformation is " + str(X_data))
scaler1 = MinMaxScaler()
scaler1.fit(Y_data)
Y_data = scaler1.transform(Y_data)
self.Debug("Y after transformation is " + str(Y_data))
if self.x==0: #To make sure the model is build only once and avoid computation at every new data
#USE TimeSeriesSplit to split data into n sequential splits
tscv = TimeSeriesSplit(n_splits=2)
# Make cells and epochs to be used in grid search.
cells = [100,200]
epochs = [100,200]
# creating a datframe to store final results of cross validation for different combination of cells and epochs
df = pd.DataFrame(columns= ['cells','epoch','mse'])
#Loop for every combination of cells and epochs. In this setup, 4 combinations of cells and epochs [100, 100] [ 100,200] [200,100] [200,200]
for i in cells:
for j in epochs:
cvscores = []
# to store CV results
#Run the LSTM in loop for every combination of cells an epochs and every train/test split in order to get average mse for each combination.
for train_index, test_index in tscv.split(X_data):
#self.Debug("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X_data[train_index], X_data[test_index]
Y_train, Y_test = Y_data[train_index], Y_data[test_index]
self.Debug("X_train input before reshaping : " + str(X_train))
#self.Debug("X_test is" + str(X_test))
self.Debug("Y input before reshaping: "+ str(Y_train))
#self.Debug("Y_test is" + str(Y_test))
#self.Debug ( " X train [0] is " + str (X_train[0]))
#self.Debug ( " X train [1] is " + str (X_train[1]))
X_train= np.reshape(X_train, (X_train.shape[0],1,X_train.shape[1]))
self.Debug("X input to LSTM : " + str(X_train))
X_test= np.reshape(X_test, (X_test.shape[0],1,X_test.shape[1]))
self.Debug("Y input to LSTM : "+ str(Y_train))
#self.Debug("START: LSTM Model")
#self.Debug(i)
#self.Debug(j)
model = Sequential()
model.add(LSTM(i, input_shape = (1,3), return_sequences = True))
model.add(Dropout(0.10))
model.add(LSTM(i,return_sequences = True))
model.add(LSTM(i))
model.add(Dropout(0.10))
model.add(Dense(1))
model.compile(loss= 'mean_squared_error',optimizer = 'rmsprop', metrics = ['mean_squared_error'])
model.fit(X_train,Y_train,epochs=j,verbose=0)
#self.Debug("END: LSTM Model")
scores = model.evaluate(X_test, Y_test, verbose=0)
#self.Debug("%s: %f " % (model.metrics_names[1], scores[1]))
cvscores.append(scores[1])
MSE= np.mean(cvscores)
#self.Debug("MSE" + str(MSE))
#Create a dataframe to store output from each combination and append to final results dataframe df.
df1 = pd.DataFrame({ 'cells': [i], 'epoch': [j], 'mse': [MSE]})
self.Debug("Individual run ouput DF1" + str(df1))
#Appending individual ouputs to final dataframe for comparison
df = df.append(df1)
self.Debug("Final table of DF"+ str(df))
#Check the optimised values obtained from cross validation
#This code gives the row which has minimum mse and store the values to O_values
O_values = df[df['mse']==df['mse'].min()]
# Extract the optimised values of cells and epochs from above row (having min mse )
O_cells = O_values.iloc[0][0]
O_epochs = O_values.iloc[0][1]
self.Debug( "O_cells" + str (O_cells))
self.Debug( "O_epochs" + str (O_epochs))
#Build model for whole data:
# Repeating the model but for optimised cells and epochs
X_data1= np.reshape(X_data, (X_data.shape[0],1,X_data.shape[1]))
#self.Debug("START: Final_LSTM Model")
self.model.add(LSTM(O_cells, input_shape = (1,3), return_sequences = True))
self.model.add(Dropout(0.10))
self.model.add(LSTM(O_cells,return_sequences = True))
self.model.add(LSTM(O_cells))
self.model.add(Dropout(0.10))
self.model.add(Dense(1))
self.model.compile(loss= 'mean_squared_error',optimizer = 'rmsprop', metrics = ['mean_squared_error'])
self.model.fit(X_data1,Y_data,epochs=O_epochs,verbose=0)
#self.Debug("END: Final_LSTM Model")
self.x=1
#Prepare new data for prediction based above model
# Similar to as we did initially ( data prep for input to LSTM)
X1_new = data[:,-3]
#self.Debug(X1_new)
X2_new = data[:,-2]
#self.Debug(X2_new)
X3_new = data[:,-1]
#self.Debug(X3_new)
X_new= np.concatenate([X1_new,X2_new,X3_new],axis=0)
X_new= np.transpose(X_new)
#self.Debug(X_new)
scaler = MinMaxScaler()
scaler.fit(X_data)
X_new = scaler.transform([X_new])
#self.Debug(X_new)
X_new= np.reshape(X_new,(X_new.shape[0],1,X_new.shape[1]))
#self.Debug(X_new)
# Predicting with the LSTM model
Predict = self.model.predict(X_new)
#Needs to inverse transform as we transformed the data for LSTM input
output = scaler1.inverse_transform(Predict)
self.Debug("Output from LSTM model is" + str(output))
#Checking the current price
price = currency_data.close[-1]
self.Debug("Current price is" + str(price))
#Make decision for trading based on the output from LSTM and the current price.
#If output ( forecast) is greater than current price , we will buy the currency; else, do nothing.
# Only one trade at a time and therefore made a list " self.long_list".
#As long as the currency is in that list, no further buying can be done.
# Risk and Reward are defined: Ext the trade at 1% loss or 1 % profit.
# Generally the LSTM model can predict above/below the current price and hence a random value is used
#to scale it down/up. Here the number is 1.1 but can be backtested and optimised.
if 1.1*output > price and self.currency not in self.long_list:
self.Debug("output is greater")
# Buy the currency with X% of holding in this case 90%
self.SetHoldings(self.currency, 0.9)
self.long_list.append(self.currency)
self.Debug("long")
if self.currency in self.long_list:
cost_basis = self.Portfolio[self.currency].AveragePrice
#self.Debug("cost basis is " +str(cost_basis))
if ((price <= float(0.99) * float(cost_basis)) or (price >= float(1.01) * float(cost_basis))):
self.Debug("SL-TP reached")
#self.Debug("price is" + str(price))
#If true then sell
self.SetHoldings(self.currency, 0)
self.long_list.remove(self.currency)
self.Debug("squared")
#self.Debug("END: Ondata")
|
11533643
|
from django.core.management.base import BaseCommand
from migrate_dns.destructo import destroy
class Command(BaseCommand):
args = ''
def handle(self, *args, **options):
destroy() # Whipe the db
|
11533646
|
import pytest
from unittestmock import UnitTestMock
import numpy as np
from cykhash import all_int64, all_int64_from_iter, Int64Set_from, Int64Set_from_buffer
from cykhash import all_int32, all_int32_from_iter, Int32Set_from, Int32Set_from_buffer
from cykhash import all_float64, all_float64_from_iter, Float64Set_from, Float64Set_from_buffer
from cykhash import all_float32, all_float32_from_iter, Float32Set_from, Float32Set_from_buffer
from cykhash import all_pyobject, all_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer
ALL={'int32': all_int32, 'int64': all_int64, 'float64' : all_float64, 'float32' : all_float32}
ALL_FROM_ITER={'int32': all_int32_from_iter, 'int64': all_int64_from_iter, 'float64' : all_float64_from_iter, 'float32' : all_float32_from_iter}
FROM_SET={'int32': Int32Set_from, 'int64': Int64Set_from, 'float64' : Float64Set_from, 'float32' : Float32Set_from, 'pyobject' : PyObjectSet_from}
BUFFER_SIZE = {'int32': 'i', 'int64': 'q', 'float64' : 'd', 'float32' : 'f'}
import array
@pytest.mark.parametrize(
"value_type",
['int64', 'int32', 'float64', 'float32']
)
class TestAll(UnitTestMock):
def test_all_yes(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], [2,4,6]*6)
result=ALL[value_type](a,s)
self.assertEqual(result, True)
def test_all_yes_from_iter(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=[2,4,6]*6
result=ALL_FROM_ITER[value_type](a,s)
self.assertEqual(result, True)
def test_all_last_no(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], [2]*6+[3])
result=ALL[value_type](a,s)
self.assertEqual(result, False)
def test_all_last_no_from_iter(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=[2]*6+[3]
result=ALL_FROM_ITER[value_type](a,s)
self.assertEqual(result, False)
def test_all_empty(self, value_type):
s=FROM_SET[value_type]([])
a=array.array(BUFFER_SIZE[value_type],[])
result=ALL[value_type](a,s)
self.assertEqual(result, True)
def test_all_empty_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=[]
result=ALL_FROM_ITER[value_type](a,s)
self.assertEqual(result, True)
def test_all_empty_set(self, value_type):
s=FROM_SET[value_type]([])
a=array.array(BUFFER_SIZE[value_type],[1])
result=ALL[value_type](a,s)
self.assertEqual(result, False)
def test_all_empty_set_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=[1]
result=ALL_FROM_ITER[value_type](a,s)
self.assertEqual(result, False)
def test_noniter_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=1
with pytest.raises(TypeError) as context:
ALL_FROM_ITER[value_type](a,s)
self.assertTrue("object is not iterable" in str(context.value))
def test_memview_none(self, value_type):
s=FROM_SET[value_type]([])
self.assertEqual(ALL[value_type](None,s), True)
def test_dbnone(self, value_type):
a=array.array(BUFFER_SIZE[value_type],[1])
self.assertEqual(ALL[value_type](a,None), False)
def test_dbnone_empty_query(self, value_type):
a=array.array(BUFFER_SIZE[value_type],[])
self.assertEqual(ALL[value_type](a,None), True)
def test_dbnone_from_iter(self, value_type):
a=[1]
self.assertEqual(ALL_FROM_ITER[value_type](a,None), False)
def test_dbnone_empty_query_from_iter(self, value_type):
self.assertEqual(ALL_FROM_ITER[value_type]([],None), True)
class TestAllPyObject(UnitTestMock):
def test_all_yes(self):
s=PyObjectSet_from([2,4,666])
a=np.array([2,4,666]*6, dtype=np.object)
result=all_pyobject(a,s)
self.assertEqual(result, True)
def test_all_from_iter(self):
s=PyObjectSet_from([2,4,666])
a=[2,4,666]*6
result=all_pyobject_from_iter(a,s)
self.assertEqual(result, True)
def test_all_last_no(self):
s=PyObjectSet_from([2,4,666])
a=np.array([2,4,666]*6+[3], dtype=np.object)
result=all_pyobject(a,s)
self.assertEqual(result, False)
def test_all_last_no_from_iter(self):
s=PyObjectSet_from([2,4,666])
a=[2,4,666]*6+[3]
result=all_pyobject_from_iter(a,s)
self.assertEqual(result, False)
def test_all_empty(self):
s=PyObjectSet_from([])
a=np.array([], dtype=np.object)
result=all_pyobject(a,s)
self.assertEqual(result, True)
def test_all_empty_from_iter(self):
s=PyObjectSet_from([])
a=[]
result=all_pyobject_from_iter(a,s)
self.assertEqual(result, True)
def test_all_empty_set(self):
s=PyObjectSet_from([])
a=np.array([1], dtype=np.object)
result=all_pyobject(a,s)
self.assertEqual(result, False)
def test_all_empty_set_from_iter(self):
s=PyObjectSet_from([])
a=[1]
result=all_pyobject_from_iter(a,s)
self.assertEqual(result, False)
def test_noniter_from_iter(self):
s=PyObjectSet_from([])
a=1
with pytest.raises(TypeError) as context:
all_pyobject_from_iter(a,s)
self.assertTrue("object is not iterable" in str(context.value))
def test_memview_none(self):
s=PyObjectSet_from([])
self.assertEqual(all_pyobject(None,s), True)
def test_dbnone(self):
a=np.array([1], dtype=np.object)
self.assertEqual(all_pyobject(a,None), False)
def test_dbnone_from_iter(self):
a=[1]
self.assertEqual(all_pyobject_from_iter(a,None), False)
|
11533647
|
import argparse, os, glob, cv2, torch, math, imageio, lpips
from tqdm import tqdm
import kornia as k, numpy as np, torchvision
from get_model import Model
from utils import auxiliaries as aux
from data.get_dataloder import get_eval_loader
# setup argparser
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=str, required=True, help="Define GPU on which to run")
parser.add_argument('-dataset', type=str, required=True, help='Specify dataset')
parser.add_argument('-data_path', type=str, required=False, help="Path to dataset arranged as described in readme")
parser.add_argument('-ckpt_path', type=str, required=False, help='If ckpt outside of repo')
parser.add_argument('-seq_length', type=int, default=16)
parser.add_argument('-n_samples', type=int, default=15, help='How many realizations generated for each test instance')
parser.add_argument('-n_realiz', type=int, default=8, help='How many realizations generated for each test instance')
parser.add_argument('-bs', type=int, default=6, help='Batchsize')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
assert args.dataset == 'bair'
path_ds = f'{args.dataset}/{args.texture}/' if args.dataset == 'DTDB' else f'{args.dataset}'
ckpt_path = f'./models/{path_ds}/stage2_control/' if not args.ckpt_path else args.ckpt_path
## Load model from config
model = Model(ckpt_path, args.seq_length)
# set up dataloader
dataset = get_eval_loader(args.dataset, args.seq_length + 1, args.data_path, model.config, control=True)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=10, batch_size=args.bs, shuffle=False)
# Generate samples
seq_fake = []
with torch.no_grad():
for _ in range(args.n_realiz):
seq_fakes = []
num_samples = 0
for batch_idx, file_dict in enumerate(dataloader):
seq = file_dict["seq"].type(torch.FloatTensor).cuda()
seq_gen = model(seq[:, 0], cond=file_dict["cond"])
seq_fakes.append(seq_gen.detach().cpu())
num_samples += seq_gen.size(0)
if num_samples >= args.n_samples:
break
seq_fake.append(torch.cat(seq_fakes))
videos = torch.stack(seq_fake, 1)[:args.n_samples]
del model
torch.cuda.empty_cache()
## Save video as gif
save_path = f'./assets/results/bair_endpoint/'
os.makedirs(os.path.dirname(save_path), exist_ok=True)
for idx, vid in enumerate(videos):
gif = aux.convert_seq2gif(vid)
imageio.mimsave(save_path + f'endpoint_{idx}.gif', gif.astype(np.uint8), fps=3)
torchvision.utils.save_image(vid[:, -1], save_path + f'endpoint_{idx}.png', normalize=True)
print(f'Animations saved in {save_path}')
|
11533650
|
import argparse
import os, sys
import logging
import json
import csv
from tqdm.auto import tqdm
import torch
from torch.utils.data import DataLoader, SequentialSampler
import sentence_transformers as sent_trans
import transformers
from transformers import set_seed
import accelerate
from accelerate import Accelerator
from dataset import SimpleDataset, padding_util
from model import build_encoder, DualEncoderModel
from utils import perform_eval, eval_and_cluster
import pandas as pd
import warnings
from main import parse_args
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
args = parse_args()
distributed_args = accelerate.DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[distributed_args])
device = accelerator.device
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
filename=f'xmc_{args.dataset}_{args.log}_evaluate.log',
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
logger.info(sent_trans.__file__)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Load pretrained model and tokenizer
if args.model_name_or_path == 'bert-base-uncased' or args.model_name_or_path == 'sentence-transformers/paraphrase-mpnet-base-v2':
label_encoder = build_encoder(
args.model_name_or_path,
args.max_label_length,
args.pooling_mode,
args.proj_emb_dim,
)
else:
label_encoder = sent_trans.SentenceTransformer(args.model_name_or_path)
tokenizer = label_encoder._first_module().tokenizer
instance_encoder = label_encoder
model = DualEncoderModel(
label_encoder,
instance_encoder,
)
model = model.to(device)
# the whole label set
data_path = os.path.join(os.path.abspath(os.getcwd()), 'dataset', args.dataset)
all_labels = pd.read_json(os.path.join(data_path, 'lbl.json'),lines=True)
label_list = list(all_labels.title)
label_ids = list(all_labels.uid)
label_data = SimpleDataset(label_list, transform=tokenizer.encode)
# label dataloader for searching
sampler = SequentialSampler(label_data)
label_padding_func = lambda x: padding_util(x, tokenizer.pad_token_id, 64)
label_dataloader = DataLoader(label_data, sampler=sampler, batch_size=16, collate_fn=label_padding_func)
# test data
data_path = os.path.join(os.path.abspath(os.getcwd()), 'dataset', args.dataset)
try:
accelerator.print("load cache")
all_instances = torch.load(os.path.join(data_path, 'all_passages_with_titles.json.cache.pt'))
test_data = SimpleDataset(all_instances.values())
except:
if args.mode == 'construct-pseudo':
test_path = os.path.join(data_path, 'trn.json')
else:
test_path = os.path.join(data_path, 'tst.json')
all_instances = {}
test_ids = []
with open(test_path) as fp:
for line in fp:
inst = json.loads(line.strip())
all_instances[inst['uid']] = inst['title'] + '\t' + inst['content']
test_ids.append(inst['uid'])
simple_transform = lambda x: tokenizer.encode(x, max_length=288, truncation=True)
test_data = SimpleDataset(list(all_instances.values()), transform=simple_transform)
inst_num = len(test_data)
sampler = SequentialSampler(test_data)
sent_padding_func = lambda x: padding_util(x, tokenizer.pad_token_id, 288)
instance_dataloader = DataLoader(test_data, sampler=sampler, batch_size=128, collate_fn=sent_padding_func)
# Prepare everything with our `accelerator`.
model, label_dataloader, instance_dataloader = accelerator.prepare(model, label_dataloader, instance_dataloader)
if args.mode == 'construct-pseudo':
D, I, _ = perform_eval(accelerator.unwrap_model(model), label_dataloader, label_ids, instance_dataloader, inst_num, test_ids, accelerator)
pseudo_pair_path = os.path.join(data_path, 'pseudo_pos.json')
if accelerator.is_local_main_process:
with open(pseudo_pair_path, 'w') as f:
for row_id in tqdm(range(inst_num)):
inst_id = test_ids[row_id]
item = {'uid': inst_id}
predict_target = []
predict_score = []
for col_id, score in zip(I[row_id][:5], D[row_id][:5]):
predict_target.append(int(col_id))
predict_score.append(float(score))
item['predict_ind'] = predict_target
item['score'] = predict_score
f.write(json.dumps(item) + '\n')
else:
# prepare pairs
reader = csv.reader(open(os.path.join(data_path, 'all_pairs.txt'), encoding="utf-8"), delimiter=" ")
qrels = {}
for id, row in enumerate(reader):
query_id, corpus_id, score = row[0], row[1], int(row[2])
if query_id not in qrels:
qrels[query_id] = {corpus_id: score}
else:
qrels[query_id][corpus_id] = score
eval_and_cluster(args, logger, 0, accelerator.unwrap_model(model), label_dataloader, label_ids,
instance_dataloader, inst_num, test_ids, qrels, accelerator)
|
11533657
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..classification import resnet
class DecoderBlock(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
# self.up = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.up(x)
return x
class UNet50(nn.Module):
def __init__(self, num_classes=2, in_channels=3, pretrained=True):
super(UNet50, self).__init__()
from torchvision.models import resnet101
# self.encoder = resnet50(num_classes=num_classes, in_channels=in_channels, pretrained=pretrained)
self.encoder = resnet.resnet50(num_classes=num_classes, pretrained=pretrained)
self.encoder1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1, self.encoder.relu)
self.encoder2 = nn.Sequential(self.encoder.maxpool, self.encoder.layer1)
self.encoder3 = self.encoder.layer2
self.encoder4 = self.encoder.layer3
self.encoder5 = self.encoder.layer4
self.center = nn.Sequential(nn.MaxPool2d(kernel_size=2),
DecoderBlock(2048, 2048, 2048))
self.decoder5 = DecoderBlock(4096, 1024+1024//2, 1024)
self.decoder4 = DecoderBlock(2048, 512+512//2, 512)
self.decoder3 = DecoderBlock(1024, 256+256//2, 256)
self.decoder2 = DecoderBlock(512, 128+128//2, 64)
self.decoder1 = DecoderBlock(128, 64+64//2, 32)
self.out = nn.Sequential(nn.Conv2d(32, 16, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(16, num_classes, kernel_size=3, padding=1))
# def forward(self, x_pre: torch.tensor, x_post: torch.tensor):
def forward(self, x):
# suppose x: (batch_size, 3, 512, 512)
encoder1 = self.encoder1(x) #batchsize, 64, 256, 256
encoder2 = self.encoder2(encoder1) #batchsize, 256, 128, 128
encoder3 = self.encoder3(encoder2) #batchsize, 512, 64, 64
encoder4 = self.encoder4(encoder3) #batchsize, 1024, 32, 32
encoder5 = self.encoder5(encoder4) #batchsize, 2048, 16, 16
center = self.center(encoder5)
decoder5 = self.decoder5(torch.cat([encoder5, center], dim=1))
decoder4 = self.decoder4(torch.cat([encoder4, decoder5], dim=1))
decoder3 = self.decoder3(torch.cat([encoder3, decoder4], dim=1))
decoder2 = self.decoder2(torch.cat([encoder2, decoder3], dim=1))
decoder1 = self.decoder1(torch.cat([encoder1, decoder2], dim=1))
out = self.out(decoder1)
return out
if __name__ == '__main__':
import torch
# x = torch.randn(size=(1, 1024, 16, 16))
# test decoder block
# decoder = DecoderBlock(1024, 600, 512)
# decoder(x)
x = torch.randn(size=(2, 3, 512, 512))
model = FC_EF()
out = model(x)
print(out.shape)
|
11533665
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase, SAMPLE_SUBSTITUTIONS
class TupleTests(TranspileTestCase):
pass
class BuiltinTupleFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["tuple"]
not_implemented = [
'test_tuple',
]
substitutions = {
"('one', 'two', 'six')": [
"('two', 'one', 'six')", "('six', 'one', 'two')", "('one', 'six', 'two')", "('two', 'six', 'one')", "('six', 'two', 'one')"
],
"('on', 'to', 'an')": [
"('to', 'on', 'an')", "('an', 'on', 'to')", "('on', 'an', 'to')", "('to', 'an', 'on')", "('an', 'to', 'on')"
],
"(1, 2.3456, 7)": [
"(2.3456, 1, 7)", "(7, 1, 2.3456)", "(1, 7, 2.3456)", "(2.3456, 7, 1)", "(7, 2.3456, 1)"
],
"('a', 'c', 'd')": [
"('c', 'a', 'd')", "('d', 'a', 'c')", "('a', 'd', 'c')", "('c', 'd', 'a')", "('d', 'c', 'a')"
]
}
substitutions.update(SAMPLE_SUBSTITUTIONS)
|
11533678
|
import pandas as pd
from plotly.graph_objects import Figure
from ml_matrics import ROOT, spacegroup_sunburst
phonons = pd.read_csv(f"{ROOT}/data/matbench-phonons.csv")
def test_spacegroup_sunburst():
fig = spacegroup_sunburst(phonons.sg_number)
assert isinstance(fig, Figure)
assert set(fig.data[0].parents) == {
"",
"cubic",
"trigonal",
"triclinic",
"orthorhombic",
"tetragonal",
"hexagonal",
"monoclinic",
}
assert fig.data[0].branchvalues == "total"
spacegroup_sunburst(phonons, sgp_col="sg_number")
spacegroup_sunburst(phonons.sg_number, show_values="percent")
|
11533693
|
from jumpscale.core.exceptions import Input
from jumpscale.clients.explorer.models import Volume, DiskType, ContainerMount, WorkloadType, Container
from typing import Union
class VolumesGenerator:
""" """
def create(self, node_id: str, pool_id: int, size: int = 5, type: Union[str, DiskType] = DiskType.HDD) -> Volume:
"""add a volume to the reservation
Args:
node_id(str): id of the node where to reserve the volume
pool_id(int) the capacity pool ID
size(int, optional): size in GiB. Defaults to 5.
type(Union[str,DiskType], optional): type of disk to use. Can be SSD or HDD. Defaults to "HDD".
Returns:
Volume: the newly created volume object
"""
if isinstance(type, str):
type = getattr(DiskType, type)
volume = Volume()
volume.size = size
volume.type = type
volume.info.node_id = node_id
volume.info.pool_id = pool_id
volume.info.workload_type = WorkloadType.Volume
return volume
def attach_existing(self, container: Container, volume_id: Union[str, Volume], mount_point: str):
"""attach an existing volume to a container.
The volume must already exist on the node
Args:
container(Volume): container object returned from container.create_container function
volume_id(Union[str,volume]): the volume to attached to the container or its full ID
mount_point(str): path where to mount the volume in the container
"""
if isinstance(volume_id, Volume):
if not volume_id.id:
raise Input("volume needs to be deployed before it can be attached to a container")
volume_id = f"{volume_id.id}-1"
vol = ContainerMount()
vol.volume_id = volume_id
vol.mountpoint = mount_point
container.volumes.append(vol)
|
11533715
|
from django.template.defaultfilters import urlizetrunc
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class UrlizetruncTests(SimpleTestCase):
@setup({
'urlizetrunc01': '{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}'
})
def test_urlizetrunc01(self):
output = self.engine.render_to_string(
'urlizetrunc01',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
@setup({'urlizetrunc02': '{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}'})
def test_urlizetrunc02(self):
output = self.engine.render_to_string(
'urlizetrunc02',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
class FunctionTests(SimpleTestCase):
def test_truncate(self):
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(
urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/test/</a>',
)
self.assertEqual(
urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/t...</a>',
)
self.assertEqual(
urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'
' rel="nofollow">...</a>',
)
def test_overtruncate(self):
self.assertEqual(
urlizetrunc('http://short.com/', 20), '<a href='
'"http://short.com/" rel="nofollow">http://short.com/</a>',
)
def test_query_string(self):
self.assertEqual(
urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20),
'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'
'meta=" rel="nofollow">http://www.google...</a>',
)
def test_non_string_input(self):
self.assertEqual(urlizetrunc(123, 1), '123')
def test_autoescape(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 10),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz'
)
def test_autoescape_off(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 9, autoescape=False),
'foo<a href=" <a href="http://google.com" rel="nofollow">google...</a> ">bar</a>buz',
)
|
11533717
|
import doctest
import unittest
from .doctest_2to3 import doctest_suite
def broken_function():
raise Exception('This is broken')
class MyTestCase(unittest.TestCase):
def test(self):
"""
DocTests (pychemia.utils) [exceptions] :
"""
from pychemia.utils.periodic import atomic_number
with self.assertRaises(Exception) as context:
atomic_number(['H', u'A'])
# self.assertTrue(u'Atomic symbol not found' == context.exception)
from pychemia.utils.computing import read_file
with self.assertRaises(Exception) as context:
read_file('/dev/abc')
# self.assertTrue('Could not open file: /dev/abc' in context.exception)
from pychemia.utils.computing import get_float
with self.assertRaises(Exception) as context:
get_float('3i')
# self.assertTrue("Could not convert '3i' into a float number" in context.exception)
def test_periodic():
"""
DocTests (pychemia.utils.periodic) :
"""
import pychemia.utils.periodic
dt = doctest.testmod(pychemia.utils.periodic, verbose=True)
assert dt.failed == 0
def test_mathematics():
"""
DocTests (pychemia.utils.mathematics) :
"""
import pychemia.utils.mathematics
dt = doctest.testmod(pychemia.utils.mathematics, verbose=True)
assert dt.failed == 0
def test_computing():
"""
DocTests (pychemia.utils.computing) :
"""
import pychemia.utils.computing
suite = unittest.TestSuite()
suite.addTest(doctest_suite(pychemia.utils.computing))
runner = unittest.TextTestRunner(verbosity=1)
result = runner.run(suite)
assert result.wasSuccessful()
if __name__ == "__main__":
unittest.main(defaultTest='test_computing')
unittest.main()
|
11533723
|
import sys
import os
import subprocess
from pathlib import Path
import argparse
from tempfile import mkstemp
import re
def remove_inouts(jsonpath, replacewith='input'):
"""Replaces inouts with either input or output statements.
Netlistsvg does not parse inout ports as for now, so they need to be
replaced with either input or output to produce a diagram.
Parameters
----------
jsonpath : str
Path to JSON file to fix
replacewith : str
The string to replace 'inout', can be 'input' or 'output'
"""
assert replacewith in ['input', 'output']
with open(jsonpath, 'r') as withinouts:
lines = withinouts.readlines()
with open(jsonpath, 'w') as withoutinouts:
for line in lines:
withoutinouts.write(re.sub('inout', replacewith, line))
def main(argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument(
'verilog_rtl_dir',
help="Path to the project's verilog/rtl directory",
type=Path)
parser.add_argument(
'output',
help="Path to the output SVG file",
type=Path)
parser.add_argument(
'--num-iopads',
help='Number of iopads to render',
type=int,
default=38)
parser.add_argument(
'--yosys-executable',
help='Path to yosys executable',
type=Path,
default='yosys')
parser.add_argument(
'--netlistsvg-executable',
help='Path to netlistsvg executable',
type=Path,
default='netlistsvg')
parser.add_argument(
'--inouts-as',
help='To what kind of IO should inout ports be replaced',
choices=['input', 'output'],
default='input'
)
args = parser.parse_args(argv[1:])
fd, jsonpath = mkstemp(suffix='-yosys.json')
os.close(fd)
yosyscommand = [
f'{str(args.yosys_executable)}',
'-p',
'read_verilog pads.v defines.v; ' +
'read_verilog -lib -overwrite *.v; ' +
f'verilog_defines -DMPRJ_IO_PADS={args.num_iopads}; ' +
'read_verilog -overwrite caravel.v; ' +
'hierarchy -top caravel; ' +
'proc; ' +
'opt; ' +
f'write_json {jsonpath}; '
]
result = subprocess.run(
yosyscommand,
cwd=args.verilog_rtl_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
exitcode = 0
if result.returncode != 0:
print(f'Failed to run: {" ".join(yosyscommand)}', file=sys.stderr)
print(result.stdout.decode())
exitcode = result.returncode
else:
# TODO once netlistsvg supports inout ports, this should be removed
remove_inouts(jsonpath, args.inouts_as)
command = f'{args.netlistsvg_executable} {jsonpath} -o {args.output}'
result = subprocess.run(
command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
if result.returncode != 0:
print(f'Failed to run: {command}', file=sys.stderr)
print(result.stdout.decode())
exitcode = result.returncode
os.unlink(jsonpath)
sys.exit(exitcode)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
11533733
|
from zenpy import Zenpy
from zenpy.lib.api_objects import Ticket
from zenpy.lib.api_objects import Comment
from zenpy.lib.exception import RecordNotFoundException
from zenpy.lib.exception import APIException
from st2actions.runners.pythonrunner import Action
__all__ = [
'ZendeskAction'
]
class ZendeskAction(Action):
def __init__(self, config):
super(ZendeskAction, self).__init__(config=config)
self.email = self.config['email']
self.token = self.config['api_token']
self.subdomain = self.config['subdomain']
self.credentials = {
'email': self.email,
'token': self.token,
'subdomain': self.subdomain
}
self.api = Zenpy(**self.credentials)
def clean_response(self, text):
return text.replace('\n', ' ').replace(' ', ' ').strip()
def url_for_ticket(self, ticket):
return 'https://{}.zendesk.com/agent/tickets/{}'.format(self.subdomain, ticket)
def api_search(self, query, search_type):
return self.api.search(query, type=search_type, sort_by='created_at', sort_order='desc')
def create_ticket(self, subject, description):
ticket = Ticket(subject=subject, description=description)
try:
created_ticket_audit = self.api.tickets.create(ticket)
return {
'ticket_id': created_ticket_audit.ticket.id,
'ticket_url': self.url_for_ticket(created_ticket_audit.ticket.id),
'subject': self.clean_response(subject),
'description': self.clean_response(description)
}
except APIException:
return {'error': 'Could not create ticket with provided parameters'}
except Exception as e:
self.logger.error(e)
return {'error': 'Could not make API request'}
def search_tickets(self, query, search_type='ticket', limit=10):
try:
query_results = self.api_search(query, search_type)
results_clean = map(lambda t: {
'ticket_id': t.id,
'ticket_url': self.url_for_ticket(t.id),
'ticket_status': t.status,
'subject': self.clean_response(t.subject),
'description': self.clean_response(t.description)},
list(query_results)[:limit]
)
return {'search_results': results_clean}
except APIException:
return {'error': 'Could not execute search for query: {}'.format(query)}
except Exception as e:
self.logger.error(e)
return {'error': 'There was an error executing your search'}
def update_ticket(self, ticket_id, comment_text, public):
try:
ticket = self.api.tickets(id=ticket_id)
ticket.comment = Comment(body=comment_text, public=public)
self.api.tickets.update(ticket)
return {
'ticket_id': ticket_id,
'ticket_url': self.url_for_ticket(ticket_id),
'body': self.clean_response(comment_text),
'public': public
}
except RecordNotFoundException:
return {'error': 'Could not find ticket #{}'.format(ticket_id)}
except Exception as e:
self.logger.error(e)
return {'error': 'Could not update ticket'}
def update_ticket_status(self, ticket_id, status):
valid_statuses = ['new', 'open', 'pending', 'solved', 'closed']
if status in valid_statuses:
try:
ticket = self.api.tickets(id=ticket_id)
ticket.status = status
self.api.tickets.update(ticket)
return {
'ticket_id': ticket_id,
'ticket_url': self.url_for_ticket(ticket_id),
'status': status
}
except RecordNotFoundException:
return {'error': 'Could not find ticket #{}'.format(ticket_id)}
except Exception as e:
self.logger.error(e)
return {'error': 'Could not update ticket status'}
else:
return {'error': 'Invalid status given for ticket'}
|
11533746
|
from tottle import BaseStateGroup
from tottle.bot import Bot, Message
# Create a simple bot
bot = Bot("paste-token-here")
# Let's make a group of states
# (BaseStateGroup is IntEnum)
class ProfileState(BaseStateGroup):
NAME = 1
AGE = 2
# <state = None> handles all events with no state;
# you can add StateRule to auto_rules in blueprint for example
@bot.on.private_message(state=None)
async def start_handler(message: Message):
await message.answer("Good to see you! What's your name?")
# If you handle chats you can set state dispenser key to from_id
# or you can implement custom state dispenser and base on other features
await bot.state_dispenser.set(message.chat.id, ProfileState.NAME)
@bot.on.message(state=ProfileState.NAME)
async def name_handler(message: Message):
await message.answer("Sounds beautiful! Please write your age.")
await bot.state_dispenser.set(message.chat.id, ProfileState.AGE)
@bot.on.message(state=ProfileState.AGE)
async def greeting_age(message: Message):
await message.answer(
"Ah, I see... Well, nice to meet ya! "
"Write to me whenever you want to talk again!"
)
await bot.state_dispenser.delete(message.chat.id)
bot.run_forever()
|
11533761
|
import copy
from funboost.utils import un_strict_json_dumps
class DataClassBase:
"""
使用类实现的 简单数据类。
也可以使用装饰器来实现数据类
"""
def __new__(cls, **kwargs):
self = super().__new__(cls)
self.__dict__ = copy.copy({k: v for k, v in cls.__dict__.items() if not k.startswith('__')})
return self
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __call__(self, ) -> dict:
return self.get_dict()
def get_dict(self):
return {k: v.get_dict() if isinstance(v, DataClassBase) else v for k, v in self.__dict__.items()}
def __str__(self):
return f"{self.__class__} {self.get_dict()}"
def __getitem__(self, item):
return getattr(self, item)
def get_json(self):
un_strict_json_dumps.dict2json(self.get_dict())
if __name__ == '__main__':
import datetime
class A(DataClassBase):
x = 1
y = 2
z = datetime.datetime.now()
print(A())
print(A(y=3))
print(A(y=5).get_dict())
print(A()['y'])
print(A().y)
|
11533764
|
from __future__ import absolute_import, unicode_literals
from setuptools import setup, find_packages
from codecs import open
import os
here = os.path.dirname(__file__)
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vcrpy-unittest',
version='0.1.7',
description='Python unittest integration for vcr.py',
long_description=long_description,
url='https://github.com/agriffis/vcrpy-unittest',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='vcrpy vcr.py unittest testing mock http'.split(),
packages=find_packages(exclude=['tests']),
install_requires=['vcrpy'],
)
|
11533791
|
import os
import numpy as np
import torch
import pdb
from trajectory.utils import discretization
from trajectory.utils.arrays import to_torch
from .d4rl import load_environment, qlearning_dataset_with_timeouts
from .preprocessing import dataset_preprocess_functions
def segment(observations, terminals, max_path_length):
"""
segment `observations` into trajectories according to `terminals`
"""
assert len(observations) == len(terminals)
observation_dim = observations.shape[1]
trajectories = [[]]
for obs, term in zip(observations, terminals):
trajectories[-1].append(obs)
if term.squeeze():
trajectories.append([])
if len(trajectories[-1]) == 0:
trajectories = trajectories[:-1]
## list of arrays because trajectories lengths will be different
trajectories = [np.stack(traj, axis=0) for traj in trajectories]
n_trajectories = len(trajectories)
path_lengths = [len(traj) for traj in trajectories]
## pad trajectories to be of equal length
trajectories_pad = np.zeros((n_trajectories, max_path_length, observation_dim), dtype=trajectories[0].dtype)
early_termination = np.zeros((n_trajectories, max_path_length), dtype=np.bool)
for i, traj in enumerate(trajectories):
path_length = path_lengths[i]
trajectories_pad[i,:path_length] = traj
early_termination[i,path_length:] = 1
return trajectories_pad, early_termination, path_lengths
class SequenceDataset(torch.utils.data.Dataset):
def __init__(self, env, sequence_length=250, step=10, discount=0.99, max_path_length=1000, penalty=None, device='cuda:0'):
print(f'[ datasets/sequence ] Sequence length: {sequence_length} | Step: {step} | Max path length: {max_path_length}')
self.env = env = load_environment(env) if type(env) is str else env
self.sequence_length = sequence_length
self.step = step
self.max_path_length = max_path_length
self.device = device
print(f'[ datasets/sequence ] Loading...', end=' ', flush=True)
dataset = qlearning_dataset_with_timeouts(env.unwrapped, terminate_on_end=True)
print('✓')
preprocess_fn = dataset_preprocess_functions.get(env.name)
if preprocess_fn:
print(f'[ datasets/sequence ] Modifying environment')
dataset = preprocess_fn(dataset)
##
observations = dataset['observations']
actions = dataset['actions']
next_observations = dataset['next_observations']
rewards = dataset['rewards']
terminals = dataset['terminals']
realterminals = dataset['realterminals']
self.observations_raw = observations
self.actions_raw = actions
self.next_observations_raw = next_observations
self.joined_raw = np.concatenate([observations, actions], axis=-1)
self.rewards_raw = rewards
self.terminals_raw = terminals
## terminal penalty
if penalty is not None:
terminal_mask = realterminals.squeeze()
self.rewards_raw[terminal_mask] = penalty
## segment
print(f'[ datasets/sequence ] Segmenting...', end=' ', flush=True)
self.joined_segmented, self.termination_flags, self.path_lengths = segment(self.joined_raw, terminals, max_path_length)
self.rewards_segmented, *_ = segment(self.rewards_raw, terminals, max_path_length)
print('✓')
self.discount = discount
self.discounts = (discount ** np.arange(self.max_path_length))[:,None]
## [ n_paths x max_path_length x 1 ]
self.values_segmented = np.zeros(self.rewards_segmented.shape)
for t in range(max_path_length):
## [ n_paths x 1 ]
V = (self.rewards_segmented[:,t+1:] * self.discounts[:-t-1]).sum(axis=1)
self.values_segmented[:,t] = V
## add (r, V) to `joined`
values_raw = self.values_segmented.squeeze(axis=-1).reshape(-1)
values_mask = ~self.termination_flags.reshape(-1)
self.values_raw = values_raw[values_mask, None]
self.joined_raw = np.concatenate([self.joined_raw, self.rewards_raw, self.values_raw], axis=-1)
self.joined_segmented = np.concatenate([self.joined_segmented, self.rewards_segmented, self.values_segmented], axis=-1)
## get valid indices
indices = []
for path_ind, length in enumerate(self.path_lengths):
end = length - 1
for i in range(end):
indices.append((path_ind, i, i+sequence_length))
self.indices = np.array(indices)
self.observation_dim = observations.shape[1]
self.action_dim = actions.shape[1]
self.joined_dim = self.joined_raw.shape[1]
## pad trajectories
n_trajectories, _, joined_dim = self.joined_segmented.shape
self.joined_segmented = np.concatenate([
self.joined_segmented,
np.zeros((n_trajectories, sequence_length-1, joined_dim)),
], axis=1)
self.termination_flags = np.concatenate([
self.termination_flags,
np.ones((n_trajectories, sequence_length-1), dtype=np.bool),
], axis=1)
def __len__(self):
return len(self.indices)
class DiscretizedDataset(SequenceDataset):
def __init__(self, *args, N=50, discretizer='QuantileDiscretizer', **kwargs):
super().__init__(*args, **kwargs)
self.N = N
discretizer_class = getattr(discretization, discretizer)
self.discretizer = discretizer_class(self.joined_raw, N)
def __getitem__(self, idx):
path_ind, start_ind, end_ind = self.indices[idx]
path_length = self.path_lengths[path_ind]
joined = self.joined_segmented[path_ind, start_ind:end_ind:self.step]
terminations = self.termination_flags[path_ind, start_ind:end_ind:self.step]
joined_discrete = self.discretizer.discretize(joined)
## replace with termination token if the sequence has ended
assert (joined[terminations] == 0).all(), \
f'Everything after termination should be 0: {path_ind} | {start_ind} | {end_ind}'
joined_discrete[terminations] = self.N
## [ (sequence_length / skip) x observation_dim]
joined_discrete = to_torch(joined_discrete, device='cpu', dtype=torch.long).contiguous()
## don't compute loss for parts of the prediction that extend
## beyond the max path length
traj_inds = torch.arange(start_ind, end_ind, self.step)
mask = torch.ones(joined_discrete.shape, dtype=torch.bool)
mask[traj_inds > self.max_path_length - self.step] = 0
## flatten everything
joined_discrete = joined_discrete.view(-1)
mask = mask.view(-1)
X = joined_discrete[:-1]
Y = joined_discrete[1:]
mask = mask[:-1]
return X, Y, mask
class GoalDataset(DiscretizedDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pdb.set_trace()
def __getitem__(self, idx):
X, Y, mask = super().__getitem__(idx)
## get path length for looking up the last transition in the trajcetory
path_ind, start_ind, end_ind = self.indices[idx]
path_length = self.path_lengths[path_ind]
## the goal is the first `observation_dim` dimensions of the last transition
goal = self.joined_segmented[path_ind, path_length-1, :self.observation_dim]
goal_discrete = self.discretizer.discretize(goal, subslice=(0, self.observation_dim))
goal_discrete = to_torch(goal_discrete, device='cpu', dtype=torch.long).contiguous().view(-1)
return X, goal_discrete, Y, mask
|
11533817
|
from __future__ import annotations
import pytest
from pipelayer import Action, Context, Filter, FilterEventArgs, Pipeline
from pipelayer.filter import _parse_filter_event_args, raise_events
class MyFilter(Filter):
@raise_events
def run(self, data, context) -> dict:
return {"something": "goes here"}
@pytest.mark.unit
class TestFilterEvents:
@pytest.mark.happy
def test_filter_on_start(self):
def myfilter_start(sender: object, args: FilterEventArgs) -> None:
args.action = Action.EXIT
f = MyFilter()
f.start += myfilter_start
p = Pipeline(steps=[f])
response = p.run(None)
assert response is None
@pytest.mark.happy
def test_filter_on_end(self):
def myfilter_end(sender: object, args: FilterEventArgs) -> None:
args.data = None
args.action = Action.EXIT
f = MyFilter()
f.end.append(myfilter_end)
p = Pipeline(steps=[f])
response = p.run(None)
assert response is None
@pytest.mark.happy
def test_event_handler_assignment(self):
def my_event_handler(sender: Filter, args: FilterEventArgs):
pass
my_filter = MyFilter()
my_filter.start += my_event_handler
my_filter.start.append(my_event_handler)
my_filter.start = my_filter.start + my_event_handler
my_filter.exit += my_event_handler
my_filter.exit.append(my_event_handler)
my_filter.exit = my_filter.start + my_event_handler
my_filter.end += my_event_handler
my_filter.end.append(my_event_handler)
my_filter.end = my_filter.start + my_event_handler
assert True
@pytest.mark.sad
def test_assigning_wrong_type_to_handlers(self):
class MyFilter(Filter):
def run(self, data, context) -> dict:
pass
with pytest.raises(TypeError):
MyFilter().start = []
with pytest.raises(TypeError):
MyFilter().exit = []
with pytest.raises(TypeError):
MyFilter().end = []
@pytest.mark.happy
def test_parse_args_2_args(self):
class MyFilter(Filter):
def run(self, data) -> dict:
pass
my_filter = MyFilter()
a, b, c = _parse_filter_event_args(my_filter, "xyz")
assert a is my_filter
assert b == "xyz"
assert isinstance(c, Context)
@pytest.mark.happy
def test_parse_kwargs(self):
class MyFilter(Filter):
def run(self, data) -> dict:
pass
my_filter = MyFilter()
a, b, c = _parse_filter_event_args(my_filter, data="xyz")
assert a is my_filter
assert b == "xyz"
assert isinstance(c, Context)
|
11533825
|
import os
import gc
import ast
import pandas as pd
import numpy as np
from . import fractal
import matplotlib.pyplot as plt
from amlearn.utils.data import read_lammps_dump
from amlearn.utils.check import check_output_path
__author__ = "<NAME>"
__email__ = "<EMAIL>"
"""
This is an example script of fractal analysis, based on the Fortran source
codes in ./fractal.f90. Please make sure to compile the Fortran code using f2py
before running this script.
Compiling command example:
f2py -c fractal.f90 -m fractal
"""
system = ["Cu65Zr35", "qr_5plus10^10"]
lammps_file = "xxx/dump.lmp"
structure, bds = read_lammps_dump(lammps_file)
print(structure)
n_atoms = len(structure)
atom_type = np.zeros(n_atoms, dtype=int)
atom_coords = structure[["x", "y", "z"]].iloc[0:n_atoms].values
pbc = np.array([0, 1, 0])
cutoff = 30.0
bin = 0.2
output_path = "xxx"
check_output_path(output_path)
prediction_file = "xx"
df = pd.read_csv(os.path.join(prediction_file), index_col="number")
qs_col = "QS_predict"
structure[qs_col] = df[qs_col]
qs_higher_thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85]
qs_lower_thresholds = [0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]
for lower_threshold in qs_lower_thresholds:
structure[qs_col + "_lower_than_{:.2f}".format(lower_threshold)] = \
df[qs_col].apply(lambda x: 1 if x < lower_threshold else 0)
for higher_threshold in qs_higher_thresholds:
structure[qs_col + "_higher_than_{:.2f}".format(higher_threshold)] = \
df[qs_col].apply(lambda x: 1 if x > higher_threshold else 0)
cols = [qs_col + "_lower_than_{:.2f}".format(lower_threshold)
for lower_threshold in qs_lower_thresholds] + \
[qs_col + "_higher_than_{:.2f}".format(higher_threshold)
for higher_threshold in qs_higher_thresholds]
# here 777 represents all atoms, which is for compatibility with Fortran.
center_types = np.array([777, 1, 2])
for prob_col in cols:
selected_atoms = structure[structure[prob_col] == 1]
# n_atoms = 20000
n_atoms = len(selected_atoms)
# check if the fractal file is already calculated
output_file = os.path.join(output_path, "fractal_{}_{}_bin_{}_all_{}_col_{}.csv".format(system[0], system[1], bin, n_atoms, prob_col))
if os.path.exists(output_file) and os.path.isfile(output_file):
print("skip", prob_col)
continue
print("Fractal calculation started, "
"the number of selected atoms:", prob_col, n_atoms)
atom_type = selected_atoms[["type"]].iloc[0:n_atoms].values
atom_coords = selected_atoms[["x", "y", "z"]].iloc[0:n_atoms].values
fractal_distwise = np.zeros((int(cutoff / bin), len(center_types)),
dtype=np.float64)
fractal_accumulative = np.zeros((int(cutoff / bin), len(center_types)),
dtype=np.float64)
fractal_distwise, fractal_accumulative = fractal.fractal_intense(
atom_type=atom_type, atom_coords=atom_coords, pbc=pbc, bds=bds,
cutoff=cutoff, bin=bin, bin_num=int(cutoff / bin),
center_types=center_types,
fractal_distwise=fractal_distwise,
fractal_accumulative=fractal_accumulative)
fractal_distwise_df = pd.DataFrame(index=np.arange(0, cutoff, bin))
fractal_accumulative_df = pd.DataFrame(index=np.arange(0, cutoff, bin))
for idx, col in enumerate(center_types):
fractal_distwise_df[prob_col + "_" + str(col) + "_distw"] = np.array(
fractal_distwise)[:, idx]
fractal_accumulative_df[prob_col + "_" + str(col) + "_acc"] = np.array(
fractal_accumulative)[:, idx]
plt.bar(np.arange(0, cutoff, bin), height=fractal_accumulative[:, 0])
plt.bar(np.arange(0, cutoff, bin), height=fractal_distwise[:, 0])
if n_atoms == len(selected_atoms):
pd.concat([fractal_distwise_df, fractal_accumulative_df],
axis=1).to_csv(os.path.join(output_path, "fractal_{}_{}_bin_{}_all_{}_col_{}.csv".format(
system[0],
system[1],
bin,
n_atoms, prob_col)))
else:
pd.concat([fractal_distwise_df, fractal_accumulative_df],
axis=1).to_csv(os.path.join(output_path, "fractal_{}_{}_bin_{}_test_{}_col_{}.csv".format(
system[0],
system[1],
bin,
n_atoms, prob_col)))
del [selected_atoms, fractal_distwise_df, fractal_accumulative_df]
gc.collect()
def distwise_stats_to_gr(fractal_df, atom_num, volume, distwise_col_end="_distw"):
# the default index of fractal_df is the center distance of each bin
sphere_shell_vol = 4 * np.pi * (np.array(fractal_df.index)) ** 2 * bin
for col in fractal.columns:
if col.endswith(distwise_col_end):
# not atom_num**2, we already divide atom_num in calculating _distw,
fractal_df[col + "_vol_norm"] = \
fractal_df[col] / sphere_shell_vol * volume / atom_num
return fractal_df
|
11533827
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_main', '0010_status_history_for_system'),
]
operations = [
migrations.AlterField(
model_name='domainuser',
name='domainuser_is_domainadmin',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='system',
name='system_is_vm',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='systemuser',
name='systemuser_is_systemadmin',
field=models.BooleanField(blank=True, null=True),
),
]
|
11533830
|
from app import db,ma
from datetime import datetime
class Todo(db.Model):
id=db.Column(db.Integer, primary_key=True)
content=db.Column(db.String(512))
done=db.Column(db.Boolean)
user_id=db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
user=db.relationship('User', backref=db.backref('todos', lazy=True))
def __init__(self,title,content,done,user_id):
self.title=title
self.content=content
self.done=done
self.user_id=user_id
def __repr__(self):
return '<Todo> %r' % self.title
# Generate Tables in DB
db.create_all()
class TodoSchema(ma.Schema):
class Meta:
fields = ('id','title','content','done','user_id')
todo_schema = TodoSchema()
todos_schema = TodoSchema(many=True)
|
11533838
|
import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn.utils import prune
from .concepts import XConceptizator
class XLogic(Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
def __init__(self, in_features: int, out_features: int, activation: str,
bias: bool = True, top: bool = False) -> None:
super(XLogic, self).__init__(in_features, out_features, bias)
self.in_features = in_features
self.out_features = out_features
self.top = top
self.conceptizator = XConceptizator(activation)
self.activation = activation
def forward(self, input: Tensor) -> Tensor:
x = self.conceptizator(input)
if not self.top:
x = torch.nn.functional.linear(x, self.weight, self.bias)
return x
def extra_repr(self) -> str:
return 'conceptizator={}, in_features={}, out_features={}, bias={}'.format(
self.conceptizator, self.in_features, self.out_features, self.bias is not None
)
class DisentangledConcepts(Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
def __init__(self, in_features_per_concept: int, n_concepts: int, out_features_per_concept: int, bias: bool = True) -> None:
super(DisentangledConcepts, self).__init__(n_concepts * in_features_per_concept,
n_concepts * out_features_per_concept,
bias)
self.in_features_per_concept = in_features_per_concept
self.n_concepts = n_concepts
self.out_features_per_concept = out_features_per_concept
self._prune()
def _prune(self):
blocks = []
block_size = (self.out_features_per_concept, self.in_features_per_concept)
for i in range(self.n_concepts):
blocks.append(torch.ones(block_size))
mask = torch.block_diag(*blocks)
prune.custom_from_mask(self, name="weight", mask=mask)
return
def extra_repr(self) -> str:
return 'in_features={}, n_concepts={}, out_features={}, bias={}'.format(
self.in_features, self.n_concepts, self.out_features, self.bias is not None
)
|
11533868
|
from wydget import anim
from wydget.widgets.frame import Frame
class Drawer(Frame):
'''A *transparent container* that may hide and expose its contents.
'''
name='drawer'
HIDDEN='hidden'
EXPOSED='exposed'
LEFT='left'
RIGHT='right'
TOP='top'
BOTTOM='bottom'
def __init__(self, parent, state=HIDDEN, side=LEFT,
is_transparent=True, **kw):
super(Drawer, self).__init__(parent, is_transparent=is_transparent,
**kw)
self.state = state
self.side = side
if state == self.HIDDEN:
self.setVisible(False)
def toggle_state(self):
if self.state == self.EXPOSED: self.hide()
else: self.expose()
_anim = None
def expose(self):
if self.state == self.EXPOSED: return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = ExposeAnimation(self)
self.setVisible(True)
self.state = self.EXPOSED
def hide(self):
if self.state == self.HIDDEN: return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = HideAnimation(self)
self.state = self.HIDDEN
class HideAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = int(drawer.height)
self.eh = 0
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = int(drawer.height)
self.eh = 0
super(HideAnimation, self).__init__()
def cancel(self):
self.drawer.setVisible(False)
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((self.sx, 0, self.ew,
self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, self.sy, self.drawer.width,
self.eh))
self.drawer.y = self.ey
super(HideAnimation, self).cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.sw - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.sh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
class ExposeAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = 0
self.eh = int(drawer.height)
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = 0
self.eh = int(drawer.height)
super(ExposeAnimation, self).__init__()
def cancel(self):
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((0, 0, self.ew, self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, 0, self.drawer.width, self.eh))
self.drawer.y = self.ey
super(ExposeAnimation, self).cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.ew - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.eh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
|
11533883
|
import os
import pytest
from django.core.files.base import ContentFile
from .base import DataLayerFactory, MapFactory
pytestmark = pytest.mark.django_db
def test_datalayers_should_be_ordered_by_rank(map, datalayer):
datalayer.rank = 5
datalayer.save()
c4 = DataLayerFactory(map=map, rank=4)
c1 = DataLayerFactory(map=map, rank=1)
c3 = DataLayerFactory(map=map, rank=3)
c2 = DataLayerFactory(map=map, rank=2)
assert list(map.datalayer_set.all()) == [c1, c2, c3, c4, datalayer]
def test_upload_to(map, datalayer):
map.pk = 302
datalayer.pk = 17
assert datalayer.upload_to().startswith('datalayer/2/0/302/17_')
def test_save_should_use_pk_as_name(map, datalayer):
assert "/{}_".format(datalayer.pk) in datalayer.geojson.name
def test_same_geojson_file_name_will_be_suffixed(map, datalayer):
before = datalayer.geojson.name
datalayer.geojson.save(before, ContentFile("{}"))
assert datalayer.geojson.name != before
assert "/{}_".format(datalayer.pk) in datalayer.geojson.name
def test_clone_should_return_new_instance(map, datalayer):
clone = datalayer.clone()
assert datalayer.pk != clone.pk
assert datalayer.name == clone.name
assert datalayer.map == clone.map
def test_clone_should_update_map_if_passed(datalayer, user, licence):
map = MapFactory(owner=user, licence=licence)
clone = datalayer.clone(map_inst=map)
assert datalayer.pk != clone.pk
assert datalayer.name == clone.name
assert datalayer.map != clone.map
assert map == clone.map
def test_clone_should_clone_geojson_too(datalayer):
clone = datalayer.clone()
assert datalayer.pk != clone.pk
assert clone.geojson is not None
assert clone.geojson.path != datalayer.geojson.path
def test_should_remove_old_versions_on_save(datalayer, map, settings):
settings.UMAP_KEEP_VERSIONS = 3
root = datalayer.storage_root()
before = len(datalayer.geojson.storage.listdir(root)[1])
newer = '%s/%s_1440924889.geojson' % (root, datalayer.pk)
medium = '%s/%s_1440923687.geojson' % (root, datalayer.pk)
older = '%s/%s_1440918637.geojson' % (root, datalayer.pk)
for path in [medium, newer, older]:
datalayer.geojson.storage.save(path, ContentFile("{}"))
datalayer.geojson.storage.save(path + '.gz', ContentFile("{}"))
assert len(datalayer.geojson.storage.listdir(root)[1]) == 6 + before
datalayer.save()
files = datalayer.geojson.storage.listdir(root)[1]
assert len(files) == 5
assert os.path.basename(newer) in files
assert os.path.basename(newer + '.gz') in files
assert os.path.basename(medium) in files
assert os.path.basename(medium + '.gz') in files
assert os.path.basename(datalayer.geojson.path) in files
assert os.path.basename(older) not in files
assert os.path.basename(older + '.gz') not in files
|
11533888
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
from metapub import MedGenFetcher
# example of CUID: C0000039
try:
cui = sys.argv[1]
except IndexError:
print('Supply a ConceptID (CUI) to this script as its argument.')
sys.exit()
####
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.WARNING)
####
fetch = MedGenFetcher()
uid = fetch.uid_for_cui(cui)
print(uid)
|
11533928
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import copy
from hfta.ops import get_hfta_op_for
def str_to_class(classname):
return getattr(sys.modules[__name__], classname)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, B=1):
"""3x3 convolution with padding"""
return get_hfta_op_for(nn.Conv2d, B)(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1, B=1):
"""1x1 convolution"""
return get_hfta_op_for(nn.Conv2d, B)(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
norm_layer=None,
track_running_stats=True,
B=1):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride, B=B)
self.bn1 = norm_layer(planes, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, B=B)
self.bn2 = norm_layer(planes, track_running_stats=track_running_stats)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def snatch_parameters(self, other, b):
self.conv1.snatch_parameters(other.conv1, b)
self.bn1.snatch_parameters(other.bn1, b)
self.conv2.snatch_parameters(other.conv2, b)
self.bn2.snatch_parameters(other.bn2, b)
if self.downsample is not None:
sequence_snatch_parameters(self.downsample, other.downsample, b)
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
norm_layer=None,
track_running_stats=True,
B=1):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes, B=B)
self.bn1 = norm_layer(planes, track_running_stats=track_running_stats)
self.conv2 = conv3x3(planes, planes, stride, B=B)
self.bn2 = norm_layer(planes, track_running_stats=track_running_stats)
self.conv3 = conv1x1(planes, planes * self.expansion, B=B)
self.bn3 = norm_layer(planes * self.expansion,
track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def snatch_parameters(self, other, b):
self.conv1.snatch_parameters(other.conv1, b)
self.bn1.snatch_parameters(other.bn1, b)
self.conv2.snatch_parameters(other.conv2, b)
self.bn2.snatch_parameters(other.bn2, b)
self.conv3.snatch_parameters(other.conv2, b)
self.bn3.snatch_parameters(other.bn2, b)
if self.downsample is not None:
sequence_snatch_parameters(self.downsample, other.downsample, b)
class SerialBasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
norm_layer=None,
track_running_stats=True,
B=1):
super(SerialBasicBlock, self).__init__()
self.hfta = (B > 0)
self.B = max(1, B)
self.downsample = None
self.unfused_parameters = []
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = [conv3x3(inplanes, planes, stride, B=0) for _ in range(B)]
self.bn1 = [
norm_layer(planes, track_running_stats=track_running_stats)
for _ in range(B)
]
self.relu = [nn.ReLU(inplace=True) for _ in range(B)]
self.conv2 = [conv3x3(planes, planes, B=0) for _ in range(B)]
self.bn2 = [nn.BatchNorm2d(planes) for _ in range(B)]
if downsample is not None:
self.downsample = [copy.copy(downsample) for _ in range(B)]
for i in range(self.B):
param = []
param.extend(list(self.conv1[i].parameters()))
param.extend(list(self.conv2[i].parameters()))
param.extend(list(self.bn1[i].parameters()))
param.extend(list(self.bn2[i].parameters()))
if self.downsample is not None:
param.extend(list(self.downsample[i].parameters()))
self.unfused_parameters.append(param)
self.stride = stride
def to(self, *args, **kwargs):
for i in range(self.B):
self.conv1[i].to(*args, **kwargs)
self.conv2[i].to(*args, **kwargs)
self.bn1[i].to(*args, **kwargs)
self.bn2[i].to(*args, **kwargs)
if self.downsample is not None:
self.downsample[i].to(*args, **kwargs)
def forward(self, x):
if self.hfta:
x = x.transpose(0, 1)
else:
x = [x]
identity = x
out = [self.conv1[i](x[i]) for i in range(self.B)]
out = [self.bn1[i](out[i]) for i in range(self.B)]
out = [self.relu[i](out[i]) for i in range(self.B)]
out = [self.conv2[i](out[i]) for i in range(self.B)]
out = [self.bn2[i](out[i]) for i in range(self.B)]
for i in range(self.B):
out[i] += identity[i] if self.downsample is None else self.downsample[i](
x[i])
out = [self.relu[i](out[i]) for i in range(self.B)]
if self.hfta:
out = [out[i].unsqueeze(1) for i in range(self.B)]
out = torch.cat(out, 1)
else:
out = out[0]
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=10,
zero_init_residual=False,
track_running_stats=True,
B=1):
super(ResNet, self).__init__()
self.B = B
self.track_running_stats = track_running_stats
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
self._conv_layer = get_hfta_op_for(nn.Conv2d,
B).func if B > 0 else nn.Conv2d
self._norm_layer = get_hfta_op_for(nn.BatchNorm2d,
B).func if B > 0 else nn.BatchNorm2d
self._linear_layer = get_hfta_op_for(nn.Linear,
B).func if B > 0 else nn.Linear
self.inplanes = 64
self.conv1 = get_hfta_op_for(nn.Conv2d, B=B)(3,
self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes,
track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.maxpool = get_hfta_op_for(nn.MaxPool2d, B=B)(kernel_size=3,
stride=2,
padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], B=B)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, B=B)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, B=B)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, B=B)
self.fc = get_hfta_op_for(nn.Linear, B)(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, self._conv_layer):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, self._norm_layer):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, B=1):
downsample = None
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, B=B),
norm_layer(planes * block.expansion,
track_running_stats=self.track_running_stats),
)
layers = []
layers.append(
block(self.inplanes,
planes,
stride,
downsample,
norm_layer,
track_running_stats=self.track_running_stats,
B=B))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
norm_layer=norm_layer,
track_running_stats=self.track_running_stats,
B=B))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.B > 0:
x = torch.flatten(x, 2)
x = x.transpose(0, 1)
else:
x = torch.flatten(x, 1)
x = self.fc(x)
if self.B > 0:
output = F.log_softmax(x, dim=2)
else:
output = F.log_softmax(x, dim=1)
return output
def snatch_parameters(self, others, b):
self.conv1.snatch_parameters(others.conv1, b)
self.bn1.snatch_parameters(others.bn1, b)
sequence_snatch_parameters(self.layer1, others.layer1, b)
sequence_snatch_parameters(self.layer2, others.layer2, b)
sequence_snatch_parameters(self.layer3, others.layer3, b)
sequence_snatch_parameters(self.layer4, others.layer4, b)
self.fc.snatch_parameters(others.fc, b)
def init_load(self, file_names):
if self.B == 0:
self.load_state_dict(torch.load(file_names[0]).state_dict())
else:
assert self.B == len(file_names)
for i, file_name in enumerate(file_names):
others = torch.load(file_name)
self.snatch_parameters(others, i)
class SerialLinear(nn.Module):
def __init__(self, C_in, C_out, B=1):
super(SerialLinear, self).__init__()
self.hfta = (B > 0)
self.B = max(1, B)
self.fc = [nn.Linear(C_in, C_out) for _ in range(B)]
self.unfused_parameters = [list(self.fc[i].parameters()) for i in range(B)]
def to(self, *args, **kwargs):
for i in range(self.B):
self.fc[i].to(*args, **kwargs)
def forward(self, x):
if not self.hfta:
x = [x]
out = [self.fc[i](x[i]) for i in range(self.B)]
if self.hfta:
out = [out[i].unsqueeze(0) for i in range(self.B)]
out = torch.cat(out, 0)
else:
out = out[0]
return out
class SerialConvBlock(nn.Module):
def __init__(self, B, in_C, out_C):
super(SerialConvBlock, self).__init__()
self.hfta = (B > 0)
self.B = max(1, B)
self.unfused_parameters = []
self.conv = [
nn.Conv2d(in_C, out_C, kernel_size=7, stride=2, padding=3, bias=False)
for _ in range(B)
]
self.bn1 = [nn.BatchNorm2d(out_C) for _ in range(B)]
self.relu = [nn.ReLU(inplace=True) for _ in range(B)]
self.maxpool = [
nn.MaxPool2d(kernel_size=3, stride=2, padding=1) for _ in range(B)
]
for i in range(B):
self.unfused_parameters.append(
list(self.conv[i].parameters()) + list(self.bn1[i].parameters()))
def to(self, *args, **kwargs):
for i in range(self.B):
self.conv[i].to(*args, **kwargs)
self.bn1[i].to(*args, **kwargs)
self.relu[i].to(*args, **kwargs)
self.maxpool[i].to(*args, **kwargs)
def forward(self, x):
if self.hfta:
x = x.transpose(0, 1)
else:
x = [x]
out = [self.conv[i](x[i]) for i in range(self.B)]
out = [self.bn1[i](out[i]) for i in range(self.B)]
out = [self.relu[i](out[i]) for i in range(self.B)]
out = [self.maxpool[i](out[i]) for i in range(self.B)]
if self.hfta:
out = [out[i].unsqueeze(1) for i in range(self.B)]
out = torch.cat(out, 1)
else:
out = out[0]
return out
class PartiallyFusedResNet(nn.Module):
unfused_layers = []
def __init__(self,
config,
block,
serial_block,
num_classes=10,
zero_init_residual=False,
track_running_stats=True,
B=1):
super(PartiallyFusedResNet, self).__init__()
layers = config["layers"]
run_in_serial = config["run_in_serial"]
self.B = B
self.track_running_stats = track_running_stats
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
self._conv_layer = get_hfta_op_for(nn.Conv2d,
B).func if B > 0 else nn.Conv2d
self._norm_layer = get_hfta_op_for(nn.BatchNorm2d,
B).func if B > 0 else nn.BatchNorm2d
self._linear_layer = get_hfta_op_for(nn.Linear,
B).func if B > 0 else nn.Linear
self.inplanes = 64
if run_in_serial[4][1]:
self.convBlock = SerialConvBlock(B, 3, self.inplanes)
self.unfused_layers.append(self.convBlock)
else:
self.convBlock = nn.Sequential(
get_hfta_op_for(nn.Conv2d, B=B)(3,
self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False),
norm_layer(self.inplanes, track_running_stats=track_running_stats),
nn.ReLU(inplace=True),
get_hfta_op_for(nn.MaxPool2d, B=B)(kernel_size=3, stride=2,
padding=1))
self.layer1 = self._make_layer(block,
serial_block,
64,
layers[0],
run_in_serial[0],
B=B)
self.layer2 = self._make_layer(block,
serial_block,
128,
layers[1],
run_in_serial[1],
stride=2,
B=B)
self.layer3 = self._make_layer(block,
serial_block,
256,
layers[2],
run_in_serial[2],
stride=2,
B=B)
self.layer4 = self._make_layer(block,
serial_block,
512,
layers[3],
run_in_serial[3],
stride=2,
B=B)
if run_in_serial[4][0]:
self.fc = SerialLinear(512 * block.expansion, num_classes, B=B)
self.unfused_layers.append(self.fc)
else:
self.fc = get_hfta_op_for(nn.Linear, B)(512 * block.expansion,
num_classes)
for m in self.modules():
if isinstance(m, self._conv_layer):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, self._norm_layer):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self,
block,
serial_block,
planes,
blocks,
run_in_serial,
stride=1,
B=1):
downsample = None
norm_layer = get_hfta_op_for(nn.BatchNorm2d, B)
assert block.expansion == serial_block.expansion
if stride != 1 or self.inplanes != planes * block.expansion:
if self.B > 0 and run_in_serial[0]:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, B=0),
nn.BatchNorm2d(planes * block.expansion),
)
else:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, B=B),
norm_layer(planes * block.expansion,
track_running_stats=self.track_running_stats),
)
layers = []
if self.B > 0 and run_in_serial[0]:
current_block = serial_block(self.inplanes,
planes,
stride,
downsample,
nn.BatchNorm2d,
B=B,
track_running_stats=self.track_running_stats)
self.unfused_layers.append(current_block)
else:
current_block = block(self.inplanes,
planes,
stride,
downsample,
norm_layer,
B=B,
track_running_stats=self.track_running_stats)
layers.append(current_block)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if self.B > 0 and run_in_serial[i]:
current_block = serial_block(
self.inplanes,
planes,
norm_layer=nn.BatchNorm2d,
B=B,
track_running_stats=self.track_running_stats)
self.unfused_layers.append(current_block)
else:
current_block = block(self.inplanes,
planes,
norm_layer=norm_layer,
B=B,
track_running_stats=self.track_running_stats)
layers.append(current_block)
return nn.Sequential(*layers)
def forward(self, x):
x = self.convBlock(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.B > 0:
x = torch.flatten(x, 2)
x = x.transpose(0, 1)
else:
x = torch.flatten(x, 1)
x = self.fc(x)
if self.B > 0:
output = F.log_softmax(x, dim=2)
else:
output = F.log_softmax(x, dim=1)
return output
def get_unfused_parameters(self):
params = [[] for _ in range(self.B)]
for layer in self.unfused_layers:
params = [params[i] + layer.unfused_parameters[i] for i in range(self.B)]
return params
def unfused_to(self, *args, **kwargs):
for layer in self.unfused_layers:
layer.to(*args, **kwargs)
def snatch_parameters(self, others, b):
self.conv1.snatch_parameters(others.conv1, b)
self.bn1.snatch_parameters(others.bn1, b)
sequence_snatch_parameters(self.layer1, others.layer1, b)
sequence_snatch_parameters(self.layer2, others.layer2, b)
sequence_snatch_parameters(self.layer3, others.layer3, b)
sequence_snatch_parameters(self.layer4, others.layer4, b)
self.fc.snatch_parameters(others.fc, b)
def init_load(self, file_names):
if self.B == 0:
self.load_state_dict(torch.load(file_names[0]).state_dict())
else:
assert self.B == len(file_names)
for i, file_name in enumerate(file_names):
others = torch.load(file_name)
self.snatch_parameters(others, i)
def sequence_snatch_parameters(seq: nn.Sequential, others: nn.Sequential, b):
others_dict = {}
for name, layer in others.named_children():
others_dict[name] = layer
for name, layer in seq.named_children():
others_layer = others_dict[name]
if isinstance(layer, nn.Sequential):
sequence_snatch_parameters(layer, others_layer, b)
else:
layer.snatch_parameters(others_layer, b)
def Resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
11533933
|
from __future__ import (absolute_import, division, print_function)
import logging
import neovim
from . import check_lldb
__metaclass__ = type # pylint: disable=invalid-name
if not check_lldb.probe():
logging.getLogger(__name__).critical('LLDB could not be imported!')
# ImportError will be raised in Controller import below.
# pylint: disable=wrong-import-position
from .controller import Controller, EventLoopError # NOQA
from .vim_x import VimX # NOQA
# pylint: enable=wrong-import-position
@neovim.plugin # pylint: disable=too-few-public-methods
class Middleman:
def __init__(self, vim):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.ctrl = Controller(VimX(vim))
self.ctrl.start()
if self.ctrl.vimx._vim_test: # pylint: disable=protected-access
print("Note: `:LL-` commands are not bound with this test instance")
else:
vim.command('call lldb#remote#init(%d)' % vim.channel_id)
# The only interface that is predefined in the remote plugin manifest file.
# The first execution of `:LLsession` initializes the remote part of the plugin.
@neovim.command('LLsession', nargs='+', complete='customlist,lldb#session#complete')
def _session(self, args):
self.ctrl.safe_call(self.ctrl.session.handle, args)
@neovim.rpc_export('mode')
def _mode(self, mode):
self.ctrl.safe_call(self.ctrl.session.mode_setup, [mode])
@neovim.rpc_export('exec')
def _exec(self, *args):
if args[0] in ['di', 'dis', 'disassemble']:
self.ctrl.safe_call(self.ctrl.change_buffer_cmd, ['disassembly', ' '.join(args)])
elif args[0] in ['bt', '_regexp-bt']:
self.ctrl.safe_call(self.ctrl.change_buffer_cmd, ['backtrace', ' '.join(args)])
else:
self.ctrl.safe_execute(args)
if args[0] == 'help':
self.ctrl.vimx.command('drop [lldb]logs')
@neovim.rpc_export('stdin')
def _stdin(self, strin):
self.ctrl.safe_call(self.ctrl.put_stdin, [strin])
@neovim.rpc_export('exit')
def _exit(self):
self.ctrl.safe_exit()
@neovim.rpc_export('complete', sync=True)
def _complete(self, arg, line, pos):
# FIXME user-customizable timeout?
try:
return self.ctrl.safe_call(self.ctrl.complete_command,
[arg, line, pos], True, timeout=3)
except EventLoopError as e:
self.logger.warn("%s on %s | %s", str(e), repr(line[:pos]), repr(line[pos:]))
return []
@neovim.rpc_export('get_modes', sync=True)
def _get_modes(self):
try:
return self.ctrl.safe_call(self.ctrl.session.get_modes,
[], True, timeout=1)
except EventLoopError as e:
self.logger.warn(str(e))
return []
@neovim.rpc_export('select_thread_and_frame')
def _select_thread_and_frame(self, thread_and_frame_idx):
if thread_and_frame_idx[0]:
self.ctrl.safe_execute(['thread', 'select', thread_and_frame_idx[0]])
if thread_and_frame_idx[1]:
self.ctrl.safe_execute(['frame', 'select', thread_and_frame_idx[1]])
@neovim.rpc_export('btswitch')
def _btswitch(self):
self.ctrl.safe_call(self.ctrl.do_btswitch)
@neovim.rpc_export('breakswitch')
def _breakswitch(self, bufnr, line):
self.ctrl.safe_call(self.ctrl.do_breakswitch, [bufnr, line])
@neovim.rpc_export('breakdelete')
def _breakdelete(self, bp_id):
self.ctrl.safe_call(self.ctrl.do_breakdelete, [bp_id])
@neovim.rpc_export('refresh')
def _refresh(self):
self.ctrl.safe_call(self.ctrl.update_buffers)
@neovim.rpc_export('watchswitch')
def _watchpoint(self, var_name):
pass # TODO create watchpoint from locals pane
|
11533979
|
import os
import re
import json
import tqdm
import utils
import torch
import random
import sqlite3
import converter
import argparse
import itertools
import embeddings as E
import preprocess_nl2sql_cosql as preprocess_nl2sql
from vocab import Vocab
from collections import defaultdict, Counter
from transformers import DistilBertTokenizer
from eval_scripts import evaluation
from preprocess_sql2nl import SQLDataset as Base, BERT_MODEL
from nltk.stem.porter import PorterStemmer
import editsql_preprocess
import editsql_postprocess
class SQLDataset(Base):
@classmethod
def build_contexts(cls, query_norm_toks, prev_query_toks, g_values, db, bert, max_lim=512):
columns = []
for table_id, (to, t) in enumerate(zip(db['table_names_original'] + ['NULL'], db['table_names'] + ['NULL'])):
# insert a NULL table at the end
columns += [{'oname': '*', 'name': '*', 'type': 'all', 'key': '{}.*'.format(to).replace('NULL.', '').lower(), 'table_name': t.lower()}]
keys = set(db['primary_keys'])
for a, b in db['foreign_keys']:
keys.add(a)
keys.add(b)
for i, ((tid, co), (_, c), ct) in enumerate(zip(db['column_names_original'], db['column_names'], db['column_types'])):
ct = ct if i not in keys else 'key'
if tid == table_id:
columns.append({
'oname': co, 'name': c, 'type': ct,
'key': '{}.{}'.format(to, co).lower(),
'table_name': t.lower(),
})
key2col = {col['key']: col for col in columns}
question_context = [bert.cls_token]
for t in prev_query_toks:
if t in key2col:
col = key2col[t]
question_context.extend(bert.tokenize('[ {} {} : {} ]'.format(col['type'], col['table_name'], col['name'])))
else:
question_context.extend(bert.tokenize(t))
question_context.append(bert.sep_token)
for t in query_norm_toks:
if t in key2col:
col = key2col[t]
question_context.extend(bert.tokenize('[ {} {} : {} ]'.format(col['type'], col['table_name'], col['name'])))
else:
question_context.extend(bert.tokenize(t))
question_context.append(bert.sep_token)
for v in g_values:
question_context.extend(bert.tokenize(' '.join(v)))
question_context.append(';')
if question_context[-1] == ';':
question_context[-1] = bert.sep_token
if len(question_context) > max_lim:
raise Exception('question context of {} > {} is too long!'.format(len(question_context), max_lim))
return question_context, columns
@classmethod
def make_example(cls, ex, bert, utt_voc, conv, train=False):
db_id = ex['db_id']
ex['query_toks'], ex['query_toks_no_value'] = preprocess_nl2sql.SQLDataset.tokenize_query(ex['query'])
invalid = False
try:
# normalize query
query_norm = conv.convert_tokens(ex['query_toks'], ex['query_toks_no_value'], db_id)
except Exception as e:
print('preprocessing error')
print(ex['query'])
raise
return None
if query_norm is None:
return None
query_norm_toks = query_norm.split()
query_recov = g_values = None
try:
query_recov = conv.recover(query_norm, db_id)
em, g_sql, r_sql = conv.match(ex['query'], query_recov, db_id)
if not em:
invalid = True
g_values = cls.align_values(ex['query_toks_no_value'], ex['query_toks'])
except ValueAlignmentException as e:
print(ex['query'])
print(repr(e))
invalid = True
except QueryBuildError as e:
print(ex['query'])
print(repr(e))
invalid = True
except Exception as e:
print(e)
invalid = True
raise
# make utterance
question_toks = cls.tokenize_question(ex['utterance'].split(), bert)
# print(bert.convert_tokens_to_string(question_toks))
if ex['prev'] is not None:
prev_query_toks, prev_query_toks_no_value = preprocess_nl2sql.SQLDataset.tokenize_query(ex['prev']['query'])
prev_query_norm = conv.convert_tokens(prev_query_toks, prev_query_toks_no_value, db_id)
if prev_query_norm is None:
prev_query_norm = 'none'
else:
prev_query_norm = 'none'
# encode tables
try:
question_context, columns = cls.build_contexts(query_norm_toks, prev_query_norm.split(), g_values, conv.database_schemas[db_id], bert)
except Exception as e:
print(e)
return None
# print(bert.convert_tokens_to_string(question_context))
new = dict(
id=ex['id'],
query_norm=query_norm,
prev_query_norm=prev_query_norm,
columns=columns,
db_id=db_id,
question=ex['utterance'],
g_question_toks=question_toks,
g_sql=g_sql,
query=ex['query'],
g_values=g_values,
question_context=question_context,
invalid=invalid,
cands_question=cls.make_column_cands(question_context),
)
if train and not invalid:
new['sup_question'] = cls.make_sup_question(question_toks, new['cands_question'], bert, utt_voc)
# print(new['sup_question']['column_toks'])
return new
@classmethod
def from_file(cls, root, dspider, dcache, debug=False):
train_database, dev_database = editsql_preprocess.read_db_split(dspider)
conv = converter.Converter(os.path.join(dspider, 'tables.json'))
splits = {}
for k in ['train', 'dev']:
with open(os.path.join(root, '{}.json'.format(k)), 'rb') as f:
splits[k] = []
for ex in json.load(f):
splits[k].append(ex)
if debug and len(splits[k]) > 100:
break
tokenizer = DistilBertTokenizer.from_pretrained(BERT_MODEL, cache_dir=dcache)
utt_voc = Vocab(['PAD', 'EOS', 'GO'])
# make contexts and populate vocab
for s, data in splits.items():
proc = []
for i, ex in enumerate(tqdm.tqdm(data, desc='preprocess {}'.format(s))):
for turn_i, turn in enumerate(ex['interaction']):
turn['id'] = '{}/{}:{}'.format(ex['database_id'], i, turn_i)
turn['db_id'] = ex['database_id']
turn['prev'] = ex['interaction'][turn_i-1] if turn_i > 0 else None
new = cls.make_example(turn, tokenizer, utt_voc, conv, train=s=='train')
if new is not None and (s != 'train' or not new['invalid']):
proc.append(new)
splits[s] = proc
# make candidate list using vocab
for s, data in splits.items():
for ex in data:
ex['cands_question'] = cls.make_cands(ex, utt_voc)
splits[s] = data
# make pointers for training data
for ex in splits['train']:
ex['pointer_question'] = cls.make_question_pointer(ex['sup_question'], ex['cands_question'], utt_voc)
# look up pretrained word embeddings
emb = E.ConcatEmbedding([E.GloveEmbedding(), E.KazumaCharEmbedding()], default='zero')
utt_emb = torch.tensor([emb.emb(w) for w in utt_voc._index2word])
ext = dict(utt_voc=utt_voc, utt_emb=utt_emb)
return splits, ext
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--data', default='cosql')
args = parser.parse_args()
proc = SQLDataset.from_file(os.path.join('data', args.data), os.path.join('data', 'spider'), 'cache', debug=args.debug)
torch.save(proc, 'cache/data_sql2nl_sparc_cosql.debug.pt' if args.debug else 'cache/data_sql2nl_sparc_cosql.pt')
|
11533982
|
from collections import OrderedDict, defaultdict
from itertools import groupby
import xlsxwriter
import StringIO
from datetime import datetime
from dateutil.parser import parse
from sqlalchemy.sql import func
from sqlalchemy.types import Integer
from ..analysis import BiasCalculator
from ..models import Document, AnalysisNature, db
class XLSXExportBuilder:
def __init__(self, form):
self.form = form
self.formats = {}
# we use these to filter our queries, rather than trying to pull
# complex filter logic into our view queries
self.doc_ids = form.document_ids()
def build(self):
"""
Generate an Excel spreadsheet and return it as a string.
"""
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
self.formats['date'] = workbook.add_format({'num_format': 'yyyy/mm/dd'})
self.formats['bold'] = workbook.add_format({'bold': True})
self.summary_worksheet(workbook)
self.origin_worksheet(workbook)
self.topic_worksheet(workbook)
if self.form.analysis_nature().nature == AnalysisNature.ELECTIONS:
self.bias_worksheet(workbook)
self.fairness_worksheet(workbook)
if self.form.analysis_nature().nature == AnalysisNature.CHILDREN:
self.child_focus_worksheet(workbook)
self.child_gender_worksheets(workbook)
self.child_race_worksheets(workbook)
self.child_context_worksheet(workbook)
self.child_victimisation_worksheet(workbook)
self.principles_worksheet(workbook)
self.children_worksheet(workbook)
self.documents_worksheet(workbook)
self.sources_worksheet(workbook)
self.utterances_worksheet(workbook)
self.places_worksheet(workbook)
self.keywords_worksheet(workbook)
self.issues_worksheet(workbook)
self.taxonomies_worksheet(workbook)
self.everything_worksheet(workbook)
workbook.close()
output.seek(0)
return output.read()
def summary_worksheet(self, wb):
ws = wb.add_worksheet('summary')
ws.write('D1', 'Generated')
ws.write_datetime('E1', datetime.now(), self.formats['date'])
ws.set_column('E:E', 10)
ws.write('A3', 'Filters', self.formats['bold'])
ws.write('B4', 'from')
ws.write('C4', 'to')
ws.set_column('B:C', 10)
ws.write('A5', 'added')
if self.form.created_from:
ws.write_datetime('B5', parse(self.form.created_from, yearfirst=True, dayfirst=True), self.formats['date'])
if self.form.created_to:
ws.write_datetime('C5', parse(self.form.created_to, yearfirst=True, dayfirst=True), self.formats['date'])
ws.write('A6', 'published')
if self.form.published_from:
ws.write_datetime('B6', parse(self.form.published_from, yearfirst=True, dayfirst=True), self.formats['date'])
if self.form.published_to:
ws.write_datetime('C6', parse(self.form.published_to, yearfirst=True, dayfirst=True), self.formats['date'])
ws.write('A7', 'analysis')
if self.form.analysis_nature():
ws.write('B7', self.form.analysis_nature().name)
ws.write('A8', 'countries')
if self.form.countries():
ws.write('B8', ', '.join(c.name for c in self.form.countries()))
ws.write('A9', 'medium')
media = self.form.media()
if media:
ws.write('B9', ', '.join(x.name for x in media))
ws.write('A10', 'user')
if self.form.user():
ws.write('B10', self.form.user().full_name())
ws.write('A11', 'problems')
if self.form.problems.data:
ws.write('B11', ', '.join(p.short_desc for p in self.form.get_problems()))
ws.write('A12', 'keyword search')
if self.form.q.data:
ws.write('B12', self.form.q.data)
ws.write('A13', 'tags')
if self.form.tags.data:
ws.write('B13', self.form.tags.data)
ws.write('A15', 'Summary', self.formats['bold'])
ws.write('A16', 'articles')
ws.write('B16', self.filter(Document.query).count())
def documents_worksheet(self, wb):
from dexter.models.views import DocumentsView
ws = wb.add_worksheet('raw_documents')
docs = self.filter(db.session.query(DocumentsView).join(Document)).all()
self.write_table(ws, 'Documents', docs)
def sources_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentSourcesView
ws = wb.add_worksheet('raw_sources')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['source'] = DocumentSourcesView
rows = self.filter(db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentSourcesView)).all()
self.write_table(ws, 'Sources', rows)
def utterances_worksheet(self, wb):
from dexter.models.views import PersonUtterancesView
ws = wb.add_worksheet('quotations')
rows = self.filter(db.session.query(PersonUtterancesView).join(Document)).all()
self.write_table(ws, 'Quotations', rows)
def issues_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentIssuesView
ws = wb.add_worksheet('issues')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['issues'] = DocumentIssuesView
rows = self.filter(db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentIssuesView))\
.filter(DocumentIssuesView.c.issue != None)\
.all() # noqa
self.write_table(ws, 'Issues', rows)
def keywords_worksheet(self, wb):
from dexter.models.views import DocumentKeywordsView
from dexter.models import DocumentKeyword
ws = wb.add_worksheet('raw_keywords')
# only get those that are better than the avg relevance
subq = db.session.query(
DocumentKeyword.doc_id,
func.avg(DocumentKeyword.relevance).label('avg'))\
.filter(DocumentKeyword.doc_id.in_(self.doc_ids))\
.group_by(DocumentKeyword.doc_id)\
.subquery()
rows = db.session.query(DocumentKeywordsView)\
.join(subq, DocumentKeywordsView.c.document_id == subq.columns.doc_id)\
.filter(DocumentKeywordsView.c.relevance >= subq.columns.avg)\
.all()
self.write_table(ws, 'Keywords', rows)
def taxonomies_worksheet(self, wb):
from dexter.models.views import DocumentTaxonomiesView, DocumentsView
ws = wb.add_worksheet('raw_taxonomies')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['taxonomies'] = DocumentTaxonomiesView
rows = self.filter(db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentTaxonomiesView)
.filter(DocumentTaxonomiesView.c.label != None))\
.all() # noqa
self.write_table(ws, 'Taxonomies', rows)
def fairness_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentFairnessView
ws = wb.add_worksheet('fairness')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['fairness'] = DocumentFairnessView
rows = self.filter(db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentFairnessView)).all()
self.write_table(ws, 'Fairness', rows)
def principles_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentPrinciplesView
ws = wb.add_worksheet('principles')
# supported
rows = self.filter(
db.session.query(
DocumentPrinciplesView.c.principle_supported,
func.count(1).label('count')
)
.join(Document)
.filter(DocumentPrinciplesView.c.principle_supported != None) # noqa
.group_by('principle_supported')
).all()
rownum = 3 + self.write_table(ws, 'PrincipleSupported', rows)
# violated
rows = self.filter(
db.session.query(
DocumentPrinciplesView.c.principle_violated,
func.count(1).label('count')
)
.join(Document)
.filter(DocumentPrinciplesView.c.principle_violated != None) # noqa
.group_by('principle_violated')
).all()
self.write_table(ws, 'PrincipleViolated', rows, rownum=rownum)
# raw data
ws = wb.add_worksheet('raw_principles')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['principles'] = DocumentPrinciplesView
rows = self.filter(
db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentPrinciplesView)).all()
self.write_table(ws, 'Principles', rows)
def origin_worksheet(self, wb):
from dexter.models.views import DocumentsView
ws = wb.add_worksheet('origins')
query = db.session.query(
DocumentsView.c.origin,
func.count(1).label('count')
)\
.join(Document)\
.group_by('origin')
rows = self.filter(query).all()
rownum = 3 + self.write_table(ws, 'Origins', rows)
query = db.session.query(
DocumentsView.c.origin_group,
func.count(1).label('count')
)\
.join(Document)\
.group_by('origin_group')
rows = self.filter(query).all()
self.write_table(ws, 'OriginGroups', rows, rownum=rownum)
def topic_worksheet(self, wb):
from dexter.models.views import DocumentsView
ws = wb.add_worksheet('topics')
# topic groups
rows = self.filter(
db.session.query(
DocumentsView.c.topic_group,
func.count(1).label('count')
)
.join(Document)
.group_by('topic_group')).all()
rownum = 3 + self.write_table(ws, 'TopicGroups', rows)
# topics
rows = self.filter(
db.session.query(
DocumentsView.c.topic,
func.count(1).label('count')
)
.join(Document)
.group_by('topic')).all()
self.write_table(ws, 'Topics', rows, rownum=rownum)
def children_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentChildrenView
ws = wb.add_worksheet('raw_children')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['children'] = DocumentChildrenView
rows = self.filter(db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentChildrenView)).all()
self.write_table(ws, 'Children', rows)
def child_victimisation_worksheet(self, wb):
from dexter.models.views import DocumentChildrenView
ws = wb.add_worksheet('child_secondary_victimisation')
rows = self.filter(
db.session.query(
func.sum(DocumentChildrenView.c.secondary_victim_source == 'secondary-victim-source', type_=Integer).label('secondary_victim_source'),
func.sum(DocumentChildrenView.c.secondary_victim_identified == 'secondary-victim-identified', type_=Integer).label('secondary_victim_identified'),
func.sum(DocumentChildrenView.c.secondary_victim_victim_of_abuse == 'secondary-victim-abused', type_=Integer).label('secondary_victim_victim_of_abuse'),
func.sum(DocumentChildrenView.c.secondary_victim_source_identified_abused == 'secondary-victim-source-identified-abused', type_=Integer).label('secondary_victim_source_identified_abused'),
)
.join(Document)).all()
if not rows:
return
d = rows[0]._asdict()
data = [[k, d[k]] for k in sorted(d.keys(), key=len)]
ws.add_table(0, 0, len(data), 1, {
'name': 'ChildSecondaryVictimisation',
'data': data,
'columns': [
{'header': ''},
{'header': 'count'},
]
})
def child_focus_worksheet(self, wb):
from dexter.models.views import DocumentChildrenView
query = db.session.query(
DocumentChildrenView.c.child_focused,
func.count(1).label('count')
)\
.join(Document)\
.group_by('child_focused')
rows = self.filter(query).all()
ws = wb.add_worksheet('child_focused')
self.write_table(ws, 'ChildFocused', rows)
def child_gender_worksheets(self, wb):
"""
For documents with child sources, give various breakdowns by gender of
those children. All reports are source focused, providing counts
of *sources* in each category.
"""
from dexter.models.views import DocumentsView, DocumentSourcesView
# genders
query = db.session.query(
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)\
.join(Document)\
.filter(DocumentSourcesView.c.source_type == 'child')\
.group_by('gender')
rows = self.filter(query).all()
ws = wb.add_worksheet('child_genders')
self.write_table(ws, 'ChildGenders', rows)
rownum = len(rows) + 4
# topics by gender
query = self.filter(
db.session.query(
DocumentsView.c.topic_group,
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.join(DocumentSourcesView, DocumentsView.c.document_id == DocumentSourcesView.c.document_id)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('topic_group', 'gender')
.order_by('topic_group'))
rownum += 3 + self.write_summed_table(ws, 'ChildGenderTopics', query, rownum=rownum)
# origins by gender
query = self.filter(
db.session.query(
DocumentsView.c.origin,
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.join(DocumentSourcesView, DocumentsView.c.document_id == DocumentSourcesView.c.document_id)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('origin', 'gender')
.order_by('origin'))
rownum += 3 + self.write_summed_table(ws, 'ChildGenderOrigins', query, rownum=rownum)
# roles by gender
query = self.filter(
db.session.query(
DocumentSourcesView.c.role,
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('role', 'gender')
.order_by('role'))
rownum += 3 + self.write_summed_table(ws, 'ChildGenderRoles', query, rownum=rownum)
# ages by gender
query = self.filter(
db.session.query(
DocumentSourcesView.c.source_age,
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('source_age', 'gender')
.order_by('source_age'))
rownum += 3 + self.write_summed_table(ws, 'ChildGenderAges', query, rownum=rownum)
# quoted-vs-non by gender
query = self.filter(
db.session.query(
DocumentSourcesView.c.quoted,
DocumentSourcesView.c.gender,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('quoted', 'gender')
.order_by('quoted'))
self.write_summed_table(ws, 'ChildGenderQuoted', query, rownum=rownum)
def child_race_worksheets(self, wb):
"""
For documents with child sources, give various breakdowns by race of
those children. All reports are source focused, providing counts
of *sources* in each category.
"""
from dexter.models.views import DocumentsView, DocumentSourcesView
# races
rows = self.filter(
db.session.query(
DocumentSourcesView.c.race,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('race')).all()
ws = wb.add_worksheet('child_races')
rownum = 3 + self.write_table(ws, 'ChildRace', rows)
# topics by race
query = self.filter(
db.session.query(
DocumentsView.c.topic_group,
DocumentSourcesView.c.race,
func.count(DocumentSourcesView.c.document_source_id).label('count')
)
.join(Document)
.join(DocumentSourcesView, DocumentsView.c.document_id == DocumentSourcesView.c.document_id)
.filter(DocumentSourcesView.c.source_type == 'child')
.group_by('topic_group', 'race')
.order_by('topic_group'))
self.write_summed_table(ws, 'RaceTopics', query, rownum=rownum)
def child_context_worksheet(self, wb):
from dexter.models.views import DocumentChildrenView
rows = self.filter(
db.session.query(
func.sum(DocumentChildrenView.c.basic_context == 'basic-context', type_=Integer).label('basic_context'),
func.sum(DocumentChildrenView.c.causes_mentioned == 'causes-mentioned', type_=Integer).label('causes_mentioned'),
func.sum(DocumentChildrenView.c.consequences_mentioned == 'consequences-mentioned', type_=Integer).label('consequences_mentioned'),
func.sum(DocumentChildrenView.c.solutions_offered == 'solutions-offered', type_=Integer).label('solutions_offered'),
func.sum(DocumentChildrenView.c.relevant_policies == 'relevant-policies', type_=Integer).label('relevant_policies'),
func.sum(DocumentChildrenView.c.self_help_offered == 'self-help-offered', type_=Integer).label('self_help_offered'),
)
.join(Document)).all()
if not rows:
return
ws = wb.add_worksheet('child_context')
d = rows[0]._asdict()
data = [[k, d[k]] for k in d.keys()]
ws.add_table(0, 0, len(data), 1, {
'name': 'ChildContext',
'data': data,
'columns': [
{'header': ''},
{'header': 'count'},
]
})
def write_summed_table(self, ws, name, query, rownum=0):
"""
For a query which returns three columns, [A, B, C],
write a table that uses A as row labels, B values as column
labels, and C as counts for each.
The query must return rows ordered by the first column.
Returns number of rows written, including headers and footers.
"""
row_label = query.column_descriptions[0]['name']
# calculate col labels dynamically
col_labels = set()
data = OrderedDict()
for label, rows in groupby(query.all(), lambda r: r[0]):
data[label or '(none)'] = row = defaultdict(int)
for r in rows:
col_label = r[1] or '(none)'
col_labels.add(col_label)
row[col_label] = r[2]
row['total'] += r[2]
# final column labels
col_labels = sorted(list(col_labels)) + ['total']
keys = [row_label] + col_labels
# decompose rows into a list of values
data = [[label] + [r[col] for col in col_labels] for label, r in data.iteritems()]
ws.add_table(rownum, 0, rownum + len(data) + 1, len(keys) - 1, {
'name': name,
'total_row': True,
'columns': [{'header': k, 'total_function': 'sum' if i > 0 else None} for i, k in enumerate(keys)],
'data': data,
})
# number of rows plus header and footer
return len(data) + 2
def places_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentPlacesView
ws = wb.add_worksheet('raw_places')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['places'] = DocumentPlacesView
rows = self.filter(
db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.join(DocumentPlacesView)).all()
self.write_table(ws, 'Places', rows)
def everything_worksheet(self, wb):
from dexter.models.views import DocumentsView, DocumentSourcesView, DocumentFairnessView, DocumentPlacesView
ws = wb.add_worksheet('raw_everything')
tables = OrderedDict()
tables['doc'] = DocumentsView
tables['fairness'] = DocumentFairnessView
tables['sources'] = DocumentSourcesView
tables['places'] = DocumentPlacesView
rows = self.filter(
db.session
.query(*self.merge_views(tables, ['document_id']))
.join(Document)
.outerjoin(DocumentFairnessView)
.outerjoin(DocumentSourcesView)
.outerjoin(DocumentPlacesView)).all()
self.write_table(ws, 'Everything', rows)
def bias_worksheet(self, wb):
ws = wb.add_worksheet('bias')
calc = BiasCalculator()
docs = self.filter(calc.get_query()).all()
scores = calc.calculate_bias_scores(docs, key=lambda d: d.medium.group_name())
ws.write(1, 0, 'oppose')
ws.write(2, 0, 'favour')
ws.write(3, 0, 'discrepancy')
ws.write(4, 0, 'parties')
ws.write(5, 0, 'fair')
ws.write(6, 0, 'final score')
for i, score in enumerate(scores):
col = i + 1
ws.write(0, col, score.group)
ws.write(1, col, score.oppose)
ws.write(2, col, score.favour)
ws.write(3, col, score.discrepancy)
ws.write(4, col, score.parties)
ws.write(5, col, score.fair)
ws.write(6, col, score.score)
# key
ws.write(9, 0, 'KEY')
key = [
('Oppose', 'number of stories biased against an entity'),
('Favour', 'number of stories biased in favour of an entity'),
('Discrepancy', 'difference between the number of stories biased and/or favouring an entity (1 is the ideal score)'),
('Parties', 'spread of political party coverage (the better the spread of coverage the better the score - 1 is the ideal score although media are compared against each other'),
('Fair', 'percentage of stories that are fair'),
('Final score', 'the total weighting of all the scores above. The closer the score is to 1, the better the performance in terms of fairness. This can be used as a percentage.'),
]
for i, item in enumerate(key):
ws.write(10 + i, 0, item[0])
ws.write(10 + i, 1, item[1])
def write_table(self, ws, name, rows, keys=None, rownum=0, colnum=0):
if rows:
if not keys:
keys = rows[0].keys()
data = [list(doc) for doc in rows]
else:
data = []
for row in rows:
info = row._asdict()
data.append([info[k] for k in keys])
ws.add_table(rownum, colnum, rownum + len(rows), colnum + len(keys) - 1, {
'name': name,
'columns': [{'header': k} for k in keys],
'data': data,
})
return len(rows) + 1
def filter(self, query):
return query.filter(Document.id.in_(self.doc_ids))
def merge_views(self, tables, singletons=None):
"""
Merge a name-to-table map into an array of
aliased column objects that can be used in a query.
This ensures that if two tables have columns with the
same name, that they get renamed to be unique.
The +singletons+ array is a list of column names
which should only be included once (useful for common PK columns).
"""
singletons = set(singletons or [])
included = set()
# we need to alias columns so they don't clash
cols = []
for alias, table in tables.iteritems():
for col in table.c:
if col.name not in singletons or col.name not in included:
included.add(col.name)
cols.append(col.label('%s_%s' % (alias, col.name)))
return cols
|
11533987
|
from PIL import Image, ImageDraw, ImageFilter
im1 = Image.open('data/src/rocket.jpg')
im2 = Image.open('data/src/lena.jpg')
# 
# 
im1.paste(im2)
im1.save('data/dst/rocket_pillow_paste.jpg', quality=95)
# 
im1 = Image.open('data/src/rocket.jpg')
im2 = Image.open('data/src/lena.jpg')
back_im = im1.copy()
back_im.paste(im2)
back_im.save('data/dst/rocket_pillow_paste.jpg', quality=95)
back_im = im1.copy()
back_im.paste(im2, (100, 50))
back_im.save('data/dst/rocket_pillow_paste_pos.jpg', quality=95)
# 
back_im = im1.copy()
back_im.paste(im2, (400, 100))
back_im.save('data/dst/rocket_pillow_paste_out.jpg', quality=95)
# 
mask_im = Image.new("L", im2.size, 0)
draw = ImageDraw.Draw(mask_im)
draw.ellipse((140, 50, 260, 170), fill=255)
mask_im.save('data/dst/mask_circle.jpg', quality=95)
back_im = im1.copy()
back_im.paste(im2, (0, 0), mask_im)
back_im.save('data/dst/rocket_pillow_paste_mask_circle.jpg', quality=95)
# 
mask_im_blur = mask_im.filter(ImageFilter.GaussianBlur(10))
mask_im_blur.save('data/dst/mask_circle_blur.jpg', quality=95)
back_im = im1.copy()
back_im.paste(im2, (0, 0), mask_im_blur)
back_im.save('data/dst/rocket_pillow_paste_mask_circle_blur.jpg', quality=95)
# 
mask_im = Image.open('data/src/horse.png').resize(im2.size).convert('L')
back_im = im1.copy()
back_im.paste(im2, (100, 50), mask_im)
back_im.save('data/dst/rocket_pillow_paste_mask_horse.jpg', quality=95)
# 
|
11533995
|
import pytest
from channels.testing import WebsocketCommunicator
from .consumers import MyConsumer, Demultiplexer
from .routing import application
def test_consumer_action():
assert hasattr(MyConsumer.incr_counter, 'action_type')
assert MyConsumer.incr_counter.action_type == 'INCREMENT_COUNTER'
@pytest.mark.asyncio
async def test_consumer():
communicator = WebsocketCommunicator(application, "/ws/")
await communicator.connect()
await communicator.send_json_to({
'type': 'INCREMENT_COUNTER',
'payload': 2,
})
received = await communicator.receive_json_from()
assert received == {
'type': 'INCREMENTED_COUNTER',
'payload': 2,
}
await communicator.disconnect()
@pytest.mark.asyncio
async def test_consumer_no_auth():
communicator = WebsocketCommunicator(MyConsumer, "/")
await communicator.connect()
await communicator.send_json_to({
'type': 'INCREMENT_COUNTER',
'payload': 2,
})
received = await communicator.receive_json_from()
assert received == {
'type': 'INCREMENTED_COUNTER',
'payload': 2,
}
await communicator.disconnect()
@pytest.mark.asyncio
async def __test_multiplexer():
communicator = WebsocketCommunicator(Demultiplexer, "/")
await communicator.connect()
await communicator.send_json_to({
'stream': 'redux',
'payload': {
'type': 'INCREMENT_COUNTER',
'payload': 2,
}
})
await communicator.disconnect()
|
11534036
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SignLoss(nn.Module):
def __init__(self, alpha, b=None):
super(SignLoss, self).__init__()
self.alpha = alpha #alpha 是网络结构中是否加sign loss的flag
self.register_buffer('b', b) #将b注册到模型参数里
self.loss = 0
self.acc = 0
self.scale_cache = None #初始化
def set_b(self, b):
self.b.copy_(b)
def get_acc(self):
if self.scale_cache is not None:
acc = (torch.sign(self.b.view(-1)) == torch.sign(self.scale_cache.view(-1))).float().mean()
return acc
else:
raise Exception('scale_cache is None') #一般不报错,因为现有scale,在计算signloss
def get_loss(self):
if self.scale_cache is not None:
loss = (self.alpha * F.relu(-self.b.view(-1) * self.scale_cache.view(-1) + 0.1)).sum() #view(-1)都展开类似成一位标量
return loss
else:
raise Exception('scale_cache is None')
def add(self, scale):
self.scale_cache = scale
self.loss += self.get_loss()
self.loss += (0.00001 * scale.view(-1).pow(2).sum()) # to regularize the scale not to be so large,scale 的平方 正则项,限制scale不要太大
self.acc += self.get_acc()
def reset(self):
self.loss = 0
self.acc = 0
self.scale_cache = None
|
11534037
|
import logging
import os
import random
# import sys
from flask import Flask, render_template, request, url_for
from fourlang.corenlp_wrapper import CoreNLPWrapper
from fourlang.utils import draw_dep_graph, draw_text_graph, ensure_dir, get_cfg
from fourlang.dependency_processor import Dependencies
from fourlang.dep_to_4lang import DepTo4lang
from fourlang.dict_to_4lang import DictTo4lang
from fourlang.text_to_4lang import TextTo4lang
from pymachine.utils import MachineTraverser
__LOGLEVEL__ = 'INFO'
class FourlangDemo():
def __init__(self, cfg):
self.cfg = cfg
tmp_root = cfg.get('demo', 'tmp_root')
self.tmp_dir = self.get_tmp_dir_name(tmp_root)
ensure_dir(self.tmp_dir)
self.parser_wrapper = CoreNLPWrapper(self.cfg)
self.dep_to_4lang = DepTo4lang(self.cfg)
self.dict_to_4lang = DictTo4lang(self.cfg)
self.dict_to_4lang.read_dict()
def get_tmp_dir_name(self, tmp_root):
return tmp_root # TODO
def get_dep_table(self, sen_deps):
t = '<table border="1">\n'
for dep in sen_deps:
t += "<tr>\n"
for e in (dep[0], dep[1][0], dep[2][0]):
t += "<td> {0} </td>".format(e)
t += "</tr>\n"
t += '</table>\n'
return t
def dict_to_4lang_demo(self, word, fn='pic', dep_fn='deps'):
if word in self.dep_to_4lang.lexicon.lexicon:
source = '4lang'
elif word in self.dep_to_4lang.lexicon.ext_lexicon:
source = 'ext'
else:
# OOV
return None, None, None, None
machine = self.dep_to_4lang.lexicon.get_machine(word)
pic_path = draw_text_graph({word: machine}, self.tmp_dir, fn=fn)
if source == '4lang':
return source, None, None, os.path.basename(pic_path)
else:
entry = demo.dict_to_4lang.dictionary[word]
definition = entry['senses'][0]['definition']
deps = map(Dependencies.parse_dependency, definition['deps'])
dep_path = draw_dep_graph(deps, self.tmp_dir, dep_fn)
return source, definition['sen'], os.path.basename(dep_path), os.path.basename(pic_path) # nopep8
def text_to_4lang_demo(self, text, expand, fn='pic', dep_fn='deps'):
preproc_sen = TextTo4lang.preprocess_text(text.strip().decode('utf-8'))
deps, corefs, parse_trees = self.parser_wrapper.parse_text(preproc_sen)
words2machines = self.dep_to_4lang.get_machines_from_deps_and_corefs(
deps, corefs)
# TODO
orig_machines = set()
for machine in words2machines.itervalues():
orig_machines |= set(MachineTraverser.get_nodes(
machine, names_only=False, keep_upper=True))
# orig_machines = set([m.printname() for m in words2machines.values()])
# logging.info(u'orig_machines: {0}'.format(
# [m.printname() for m in orig_machines]))
if expand:
self.dep_to_4lang.lexicon.expand(words2machines)
pic_path = draw_text_graph(
words2machines, self.tmp_dir, fn=fn,
orig_machines=orig_machines)
dep_path = draw_dep_graph(deps[0], self.tmp_dir, dep_fn)
# deps_table = self.get_dep_table(deps[0])
return os.path.basename(dep_path), os.path.basename(pic_path)
def backend_test(self):
u_pic_fn = self.text_to_graph(
'A man stands in the door', False, 'test_unexpanded')
logging.info('unexpanded pic drawn to {0}'.format(u_pic_fn))
e_pic_fn = self.text_to_graph(
'A man stands in the door', True, 'test_expanded')
logging.info('expanded pic drawn to {0}'.format(e_pic_fn))
logging.basicConfig(
level=__LOGLEVEL__,
format="%(asctime)s : " +
"%(module)s (%(lineno)s) - %(levelname)s - %(message)s")
cfg_file = os.path.join(os.environ['FOURLANGPATH'], 'conf/demo.cfg')
cfg = get_cfg(cfg_file)
demo = FourlangDemo(cfg)
app = Flask(__name__, static_folder=demo.tmp_dir)
app.debug = True
@app.route('/', methods=['GET'])
def test():
return render_template('test.html')
@app.route('/dfl', methods=['POST'])
def dfl_demo():
word = request.form['word']
source, sen, dep_fn, pic_fn = demo.dict_to_4lang_demo(word)
if source is None:
return 'oov'
# return render_template('oov.html', word=word)
pic_url = url_for(
'static', filename=pic_fn, nocache=random.randint(0, 9999))
if source == '4lang':
return render_template('dfl_4lang.html', word=word, img_url=pic_url)
elif source == 'ext':
dep_url = url_for(
'static', filename=dep_fn, nocache=random.randint(0, 9999))
return render_template(
'dfl_ext.html', word=word, img_url=pic_url, sen=sen,
dep_url=dep_url)
else:
assert False
@app.route('/tfl', methods=['POST'])
def tfl_demo():
sen = request.form['text']
dep_fn, pic_fn = demo.text_to_4lang_demo(sen, True)
pic_url = url_for(
'static', filename=pic_fn, nocache=random.randint(0, 9999))
dep_url = url_for(
'static', filename=dep_fn, nocache=random.randint(0, 9999))
return render_template(
'tfl.html', img_url=pic_url, sen=sen, dep_url=dep_url)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
|
11534071
|
import asyncio
import click
from lib.data_fetcher import DataFetcher
@click.command()
@click.argument(
'config',
type=click.Path(
exists=True,
readable=True,
)
)
def fetch_ohlcv_data(config):
data_fetcher = DataFetcher(config)
loop = asyncio.get_event_loop()
# wait for all tasks to be finished
loop.run_until_complete(data_fetcher.run())
if __name__ == '__main__':
fetch_ohlcv_data()
|
11534091
|
class PythonTest():
def twice(selt, array):
list = [0 for i in range(len(array))]
i = 0
for x in array:
list[i] = x * 2
i += 1
return list;
|
11534117
|
import decimal
import re
from enum import Enum
from typing import Optional, Union
from boto3.dynamodb.conditions import Key
from boto3.dynamodb.types import TypeSerializer, TypeDeserializer
from botocore.exceptions import ClientError
from typhoon.aws.boto3_helper import boto3_session
from typhoon.aws.exceptions import TyphoonResourceNotFoundError
"""Module containing low-level functions to interact with DynamoDB
In general all functions take a dynamodb client or resource.
We do not worry about creating those resources/clients in this layer.
"""
class DynamoDBConnectionType(Enum):
RESOURCE = 'resource'
CLIENT = 'client'
def dynamodb_connection(
aws_profile: Optional[str] = None,
conn_type: Union[str, DynamoDBConnectionType] = 'resource',
aws_region: Optional[str] = None,
endpoint_url: Optional[str] = None,
):
session = boto3_session(aws_profile)
aws_region = aws_region or getattr(session, 'region_name', None)
extra_params = {'region_name': aws_region} if aws_region else {}
endpoint_url = endpoint_url if not re.match(r'dynamodb\.[\w-]+\.amazonaws\.com', endpoint_url) else None
if endpoint_url:
extra_params = {
'aws_access_key_id': 'dummy',
'aws_secret_access_key': 'dummy',
'endpoint_url': endpoint_url,
**extra_params,
}
if conn_type is DynamoDBConnectionType.CLIENT or conn_type == 'client':
ddb = session.client('dynamodb', **extra_params)
elif conn_type is DynamoDBConnectionType.RESOURCE or conn_type == 'resource':
ddb = session.resource('dynamodb', **extra_params)
else:
raise ValueError(f'Expected conn_type as client or resource, found: {conn_type}')
return ddb
def scan_dynamodb_table(ddb_resource, table_name: str):
table = ddb_resource.Table(table_name)
response = table.scan()
data = response['Items']
while 'LastEvaluatedKey' in response:
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
return data
def dynamodb_table_exists(ddb_client, table_name: str):
existing_tables = ddb_client.list_tables()['TableNames']
return table_name in existing_tables
def create_dynamodb_table(
ddb_client,
table_name: str,
primary_key: str,
range_key: Union[str, None] = None, # May have other types in the future
read_capacity_units: int = 1,
write_capacity_units: int = 1,
):
key_schema = [
{
'AttributeName': primary_key,
'KeyType': 'HASH'
},
]
attribute_definitions = [
{
'AttributeName': primary_key,
'AttributeType': 'S'
},
]
if range_key:
key_schema.append({
'AttributeName': range_key,
'KeyType': 'RANGE'
})
if isinstance(range_key, str):
attribute_type = 'S'
else:
raise ValueError(f'Expected range key to be in [str]. Found: {type(range_key)}')
attribute_definitions.append({
'AttributeName': range_key,
'AttributeType': attribute_type
})
table = ddb_client.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attribute_definitions,
ProvisionedThroughput={
'ReadCapacityUnits': read_capacity_units,
'WriteCapacityUnits': write_capacity_units
}
)
return table
def dynamodb_put_item(ddb_client, table_name: str, item: dict):
serializer = TypeSerializer()
serialized_item = serializer.serialize(item)['M']
ddb_client.put_item(
TableName=table_name,
Item=serialized_item)
def dynamodb_get_item(ddb_client, table_name: str, key_name: str, key_value: str):
try:
response = ddb_client.get_item(
TableName=table_name,
Key={key_name: {'S': key_value}}
)
except ddb_client.exceptions.ResourceNotFoundException:
raise TyphoonResourceNotFoundError(f'Table "{table_name}" does not exist in DynamoDB')
if 'Item' not in response:
raise TyphoonResourceNotFoundError(
f'Item {key_name}="{key_value}" does not exist in DynamoDB table {table_name}')
deserializer = TypeDeserializer()
return {k: deserializer.deserialize(v) for k, v in response['Item'].items()}
def dynamodb_query_item(
ddb_resource,
table_name: str,
partition_key_name: str,
partition_key_value: str,
):
try:
table = ddb_resource.Table(table_name)
response = table.query(KeyConditionExpression=Key(partition_key_name).eq(partition_key_value))
except ClientError:
raise TyphoonResourceNotFoundError(f'Table "{table_name}" does not exist in DynamoDB')
if 'Items' not in response or not response['Items']:
raise TyphoonResourceNotFoundError(
f'Item {partition_key_name}="{partition_key_value}" does not exist in DynamoDB table {table_name}')
deserializer = TypeDeserializer()
return {k: deserializer.deserialize(v) for k, v in response['Items'][0].items()}
def dynamodb_delete_item(ddb_client, table_name, key_name: str, key_value: str):
ddb_client.delete_item(
TableName=table_name,
Key={key_name: {'S': key_value}}
)
def replace_decimals(obj):
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = replace_decimals(v)
return obj
elif isinstance(obj, set):
return set(replace_decimals(i) for i in obj)
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
|
11534125
|
import numpy as np
import matplotlib.pyplot as plt
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
import os
if not os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
print("Downloading pre-trained CaffeNet model...")
caffe.set_mode_cpu()
net = caffe.Net(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(50,3,227,227)
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))
out = net.forward()
print("Predicted class is #{}.".format(out['prob'][0].argmax()))
plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))
# load labels
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]
# CPU mode
net.forward() # call once for allocation
#caffe.set_device(0)
#caffe.set_mode_gpu()
#net.forward() # call once for allocation
[(k, v.data.shape) for k, v in net.blobs.items()]
[(k, v[0].data.shape) for k, v in net.params.items()]
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
plt.show()
# the parameters are a list of [weights, biases]
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
feat = net.blobs['conv1'].data[0, :36]
vis_square(feat, padval=1)
filters = net.params['conv2'][0].data
vis_square(filters[:48].reshape(48**2, 5, 5))
feat = net.blobs['conv2'].data[0, :36]
vis_square(feat, padval=1)
feat = net.blobs['conv3'].data[0]
vis_square(feat, padval=0.5)
feat = net.blobs['conv4'].data[0]
vis_square(feat, padval=0.5)
feat = net.blobs['conv5'].data[0]
vis_square(feat, padval=0.5)
feat = net.blobs['pool5'].data[0]
vis_square(feat, padval=1)
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
feat = net.blobs['fc7'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
feat = net.blobs['prob'].data[0]
plt.plot(feat.flat)
# load labels
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]
|
11534129
|
import praw
import time
import html.entities
import tkinter
import datetime
import string
import sqlite3
from tkinter import Tk, BOTH, Entry, PhotoImage, OptionMenu, Spinbox, Text, Scrollbar, Listbox
from tkinter.ttk import Frame, Button, Style, Label
from tkinter.tix import ScrolledWindow
class Program():
def __init__(self, name, path):
self.name = name
self.path = path
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("")
#self.style = Style()
#self.style.theme_use("clam")
#self.pack(fill=BOTH, expand = 1)
self.quitbutton = Button(self, text="Quit", command= lambda: self.quit())
self.quitbutton.grid(row=3, column=1, pady=4)
self.labelErrorPointer = Label(self, text="◀")
self.labellist = []
self.entrylist = []
self.verifylist = []
self.misclist = []
self.optionCreate = "Create"
self.optionUpcoming = "Upcoming"
self.optionPast = "Past"
self.prevmode = self.optionCreate
self.curmode = self.optionCreate
self.optionvar = tkinter.StringVar(self)
self.optionvar.trace("w",self.permaloop)
self.optionvar.set(self.optionCreate)
self.option = OptionMenu(self, self.optionvar, self.optionCreate, self.optionUpcoming, self.optionPast)
self.optionpostmodevar = tkinter.StringVar(self)
self.optionpostmodevar.trace("w",self.permaloop)
self.optionpostmodevar.set('url')
self.optionpostmode = OptionMenu(self, self.optionpostmodevar, 'url', 'text')
self.labelText = Label(self, text='Selftext:')
self.entryText = Text(self)
self.labelURL = Label(self, text='URL:')
self.entryURL = Entry(self)
self.entryURL.configure(width=60)
self.sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
self.cur = self.sql.cursor()
self.cur.execute('CREATE TABLE IF NOT EXISTS upcoming(ID TEXT, SUBREDDIT TEXT, TIME INT, TITLE TEXT, URL TEXT, BODY TEXT)')
self.cur.execute('CREATE TABLE IF NOT EXISTS past(ID TEXT, SUBREDDIT TEXT, TIME INT, TITLE TEXT, URL TEXT, BODY TEXT, POSTLINK TEXT)')
self.cur.execute('CREATE TABLE IF NOT EXISTS internal(NAME TEXT, ID INT)')
print('Loaded Completed table')
self.cur.execute('SELECT * FROM internal')
f = self.cur.fetchone()
if not f:
print('Database is new. Adding ID counter')
self.cur.execute('INSERT INTO internal VALUES(?, ?)', ['counter', 1])
self.idcounter = 1
else:
self.idcounter = f[1]
print('Current ID counter: ' + str(self.idcounter))
self.sql.commit()
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
w=853
h=480
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y-50))
self.login()
def login(self):
try:
self.quitbutton.grid_forget()
self.quitbutton.grid(row=9000, column=0, columnspan=20)
self.option.grid(row=1,column=0,columnspan=80,pady=8)
self.updategui(fullclean=True)
except praw.errors.InvalidUserPass:
pass
print('Invalid username or password')
self.entryPassword.delete(0,200)
self.labelErrorPointer.grid(row=1, column=2)
def permaloop(self, *args):
self.curmode = self.optionvar.get()
print('Was: ' + self.prevmode + ' | Now: ' + self.curmode)
if self.curmode != self.prevmode:
self.prevmode = self.curmode
self.updategui(fullclean=True)
else:
self.updategui(False)
def getTime(self, bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool is False:
return timeNow
else:
return timeUnix
def addentrytobase(self, subreddit, title, url="", body="", mode="", ptime=""):
curtime = round(self.getTime(True))
try:
t = self.entryMo.get() + ' ' + self.entryDa.get() + ' ' + self.entryYr.get() + ' ' + self.entryHH.get() + ':' + self.entryMM.get()
plandate = datetime.datetime.strptime(t, "%B %d %Y %H:%M")
plandate = plandate.timestamp()
except ValueError:
print('Invalid Day')
return False
if mode == 'url':
url = self.entryURL.get()
body = ""
if 'http://' not in url and 'https://' not in url:
print('Please enter a proper URL')
return False
if mode == 'text':
body = self.entryText.get("1.0", "end")
url = ""
if plandate < curtime:
print('Please enter a time in the future')
return False
if not all(char in string.ascii_letters+string.digits+'_-' for char in subreddit):
print('Subreddit contains invalid characters')
return False
if len(subreddit) == 0:
print('You must enter a subreddit')
return False
if len(title) == 0:
print('You must enter a title')
return False
if len(title) > 300:
print('Title is too long. ' + str(len(title)) + '/300 char max')
return False
if len(body) > 15000:
print('Body is too long. ' + str(len(body)) + '/15,000 char max')
print('Timestamp:', plandate)
self.cur.execute('INSERT INTO upcoming VALUES(?, ?, ?, ?, ?, ?)', [self.idcounter, subreddit, int(plandate), title, url, body])
self.idcounter += 1
self.cur.execute('UPDATE internal SET ID=? WHERE NAME=?', [self.idcounter, 'counter'])
self.sql.commit()
print('\nPost Saved!')
print(self.idcounter, subreddit, self.timestamptoday(int(plandate)))
print(title)
print(url, body)
print()
self.entryText.delete("1.0", "end")
self.entryURL.delete(0, 'end')
self.entryTitle.delete(0, 'end')
#self.updategui(halfclean=True)
def timestamptoday(self, timestamp):
d = datetime.datetime.fromtimestamp(timestamp)
info = datetime.datetime.strftime(d, "%b %d %H:%M")
return info
def dropentryfrombase(self, ID):
if '-' not in ID:
try:
ID = int(ID)
l = [ID]
except ValueError:
print('You must enter a number')
return
else:
if ID.count('-') == 1:
try:
ID = ID.replace(' ', '')
ID = ID.split('-')
ID[0] = int(ID[0])
ID[1] = int(ID[1])
if ID[1] > ID[0]:
l = list(range(ID[0], ID[1]+1))
else:
return
except ValueError:
return
for item in l:
item = str(item)
print('Dropping Item ' + item + ' from Upcoming')
self.cur.execute('DELETE FROM upcoming WHERE ID=?', [item])
self.sql.commit()
self.updategui(fullclean=True)
def printbasetofile(self, db):
filea = open(db + '.txt', 'w')
if db == 'past':
self.cur.execute('SELECT * FROM past')
if db == 'upcoming':
self.cur.execute('SELECT * FROM upcoming')
f = self.cur.fetchall()
print('Printed ' + db + ' unimpeded to file')
for item in f:
i = list(item)
i[2] = self.timestamptoday(i[2])
i.remove('')
print(str(i)[1:-1], file=filea)
filea.close()
def updategui(self, halfclean=False, fullclean=False):
if self.curmode == self.optionCreate:
try:
print(self.optionpostmodevar.get())
if self.optionpostmodevar.get() == 'url':
self.entryText.delete("1.0", 'end')
self.labelText.grid_forget()
self.entryText.grid_forget()
self.labelURL.grid(row=8, column=0, columnspan=30)
self.entryURL.grid(row=9, column=0, columnspan=12, pady=10)
if self.optionpostmodevar.get() == 'text':
self.entryURL.delete(0, 'end')
self.labelURL.grid_forget()
self.entryURL.grid_forget()
self.labelText.grid(row=8, column=0, columnspan=30)
self.entryText.configure(width=40, height=8)
self.entryText.grid(row=9, column=0, columnspan=12)
except AttributeError:
pass
if fullclean is True:
print('Cleaning GUI')
for item in self.labellist:
item.grid_forget()
for item in self.entrylist:
item.grid_forget()
for item in self.verifylist:
item.grid_forget()
for item in self.misclist:
item.grid_forget()
self.labellist = []
self.entrylist = []
self.verifylist = []
self.misclist = []
if self.curmode == self.optionCreate:
self.newrowindex = 6
self.labelSubreddit = Label(self, text="Subreddit: /r/")
self.labelTitle = Label(self, text="Post title: ")
self.entrySubreddit = Entry(self)
self.entryTitle = Entry(self)
self.labelHH = Label(self, text="Schedule time (Local timezone):")
nowlist = datetime.datetime.strftime(datetime.datetime.now(), "%B %d %Y %H %M").split()
self.entryMo = Spinbox(self, width=9, values=('January', 'February', 'March', 'April', 'May', 'June', 'July', \
'August', 'September', 'October', 'November', 'December'))
self.entryMo.delete(0,'end')
self.entryMo.insert(0, nowlist[0])
self.entryDa = Spinbox(self, width=2, from_=1, to=31)
self.entryDa.delete(0,'end')
self.entryDa.insert(0, nowlist[1])
self.entryYr = Spinbox(self, width=4, from_=2014, to=2500)
self.entryYr.delete(0,'end')
self.entryYr.insert(0, nowlist[2])
self.entryHH = Spinbox(self, from_=0, to=23, width=2)
self.entryHH.delete(0,'end')
self.entryHH.insert(0, nowlist[3])
self.entryMM = Spinbox(self, from_=0, to=59, width=2)
self.entryMM.delete(0,'end')
self.entryMM.insert(0, nowlist[4])
self.buttonAddentry = Button(self, text='Save', command=lambda: self.addentrytobase(self.entrySubreddit.get(), self.entryTitle.get(),\
mode=self.optionpostmodevar.get()))
self.misclist.append(self.labelSubreddit)
self.misclist.append(self.entrySubreddit)
self.misclist.append(self.labelHH)
self.misclist.append(self.entryHH)
self.misclist.append(self.entryMM)
self.misclist.append(self.entryMo)
self.misclist.append(self.entryDa)
self.misclist.append(self.entryYr)
self.misclist.append(self.labelTitle)
self.misclist.append(self.entryTitle)
self.misclist.append(self.buttonAddentry)
self.misclist.append(self.optionpostmode)
self.misclist.append(self.labelText)
self.misclist.append(self.entryText)
self.misclist.append(self.labelURL)
self.misclist.append(self.entryURL)
self.labelSubreddit.grid(row=2, column=0, sticky="e")
self.labelTitle.grid(row=3, column=0, sticky="e")
self.entrySubreddit.grid(row=2, column=1, columnspan=3, sticky="w")
self.entryTitle.grid(row=3, column=1, columnspan=3, sticky="w")
self.entryMo.grid(row=4, column=1,sticky="e")
self.entryDa.grid(row=4, column=2)
self.entryYr.grid(row=4, column=3)
self.labelHH.grid(row=4, column=0, sticky="se", pady=5)
self.entryHH.grid(row=5, column=1, sticky="e")
self.entryMM.grid(row=5, column=2, sticky="w")
self.optionpostmode.grid(row=6, column=0, columnspan=20, pady=10)
self.buttonAddentry.grid(row=200, column=0, columnspan=20)
if self.curmode == self.optionUpcoming:
self.cur.execute('SELECT * FROM upcoming')
dobutton = True
if self.curmode == self.optionPast:
self.cur.execute('SELECT * FROM past')
dobutton = False
if self.curmode == self.optionPast or self.curmode == self.optionUpcoming:
self.listboxId = Listbox(self)
self.listboxId.configure(width=118, height=20, font=("Courier 8"))
self.misclist.append(self.listboxId)
self.listboxScroller = Scrollbar(self, orient='horizontal', command=self.listboxId.xview)
self.listboxScroller.grid(row=4, column=0, columnspan=900)
self.listboxId.grid(row=3, column=0, columnspan=10)
self.listboxId.configure(xscrollcommand=self.listboxScroller.set)
self.misclist.append(self.listboxScroller)
self.buttonPrinter = Button(self, text="Print to .txt file")
if self.curmode == self.optionPast:
self.buttonPrinter.configure(command=lambda: self.printbasetofile('past'))
if self.curmode == self.optionUpcoming:
self.buttonPrinter.configure(command=lambda: self.printbasetofile('upcoming'))
self.buttonPrinter.grid(row = 6, column=0, columnspan=90)
self.misclist.append(self.buttonPrinter)
if dobutton is True:
self.entryDelete = Entry(self)
self.buttonDelete = Button(self, text="Delete Item: ", command=lambda: self.dropentryfrombase(self.entryDelete.get()))
self.buttonDelete.grid(row=5, column=0, sticky='e')
self.entryDelete.grid(row=5, column=1, sticky='w')
self.misclist.append(self.entryDelete)
self.misclist.append(self.buttonDelete)
fetched = self.cur.fetchall()
for item in fetched:
info = self.timestamptoday(item[2])
if item[4] == '':
infx = item[5]
if item[5] == '':
infx = item[4]
if self.curmode == self.optionPast:
infy = '.' + item[6]
else:
infy = ''
self.listboxId.insert('end', \
item[0] + '.'*(6 - len(item[0])) \
+ item[1][:10] + '.'*(12 - len(item[1][:10])) \
+ info + '.'*(15 - len(info[:14])) \
+ item[3][:18] + '.'*(20 - len(item[3][:14])) \
+ infx[:45] + '.'*(47-len(infx[:45])) \
+ infy)
def morerows(self, label, columnm, columnn, limit, *args):
self.redditlabel = Label(self,text=label)
self.redditlabel.grid(row=self.newrowindex,column=columnm, sticky="e")
self.labellist.append(self.redditlabel)
self.redditentry = Entry(self)
self.redditentry.grid(row=self.newrowindex,column=columnn, columnspan=9)
self.entrylist.append(self.redditentry)
self.newrowindex += 1
if self.newrowindex >= limit:
self.morerowbutton.grid_forget()
print(self.newrowindex)
def main():
root = Tk()
f1 = tkinter.Frame(width=200, height=200)
ex = Example(root)
f1.pack(fill="both", expand=True, padx=20, pady=20)
ex.place(in_=f1, anchor="c", relx=.5, rely=.5)
root.mainloop()
if __name__ == '__main__':
main()
|
11534171
|
import os
from toolchain import run_script
from commands import CMD_GET_SHOW_TAPS
from commands import CMD_SET_SHOW_TAPS
isOff = (os.getenv("function") == "debug_off")
try:
result = run_script(CMD_GET_SHOW_TAPS)
isOn = (result[-1:] == '1') or isOff
shell_cmd = CMD_SET_SHOW_TAPS.format(("1", "0")[isOn])
run_script(shell_cmd)
print("Show taps is " + ("ON", "OFF")[isOn])
except:
print("Failed to toggle show taps")
|
11534182
|
from __future__ import print_function
import baseline as bl
import argparse
import os
from baseline.utils import str2bool
def main():
parser = argparse.ArgumentParser(description='Encoder-Decoder execution')
parser.add_argument('--model', help='An encoder-decoder model', required=True, type=str)
parser.add_argument('--text', help='raw value or a file', type=str)
parser.add_argument('--backend', help='backend', default='tf')
parser.add_argument('--remote', help='(optional) remote endpoint', type=str) # localhost:8500
parser.add_argument('--name', help='(optional) signature name', type=str)
parser.add_argument('--target', help='A file to write decoded output (or print to screen)')
parser.add_argument('--tsv', help='print tab separated', type=bl.str2bool, default=False)
parser.add_argument('--batchsz', help='Size of a batch to pass at once', default=32, type=int)
parser.add_argument('--device', help='device')
parser.add_argument('--alpha', type=float, help='If set use in the gnmt length penalty.')
parser.add_argument('--beam', type=int, default=30, help='The size of beam to use.')
parser.add_argument('--prefer_eager', help="If running in TensorFlow, should we prefer eager model", type=str2bool)
args = parser.parse_known_args()[0]
if args.backend == 'tf':
from eight_mile.tf.layers import set_tf_eager_mode
set_tf_eager_mode(args.prefer_eager)
batches = []
if os.path.exists(args.text) and os.path.isfile(args.text):
with open(args.text, 'r') as f:
batch = []
for line in f:
text = line.strip().split()
if len(batch) == args.batchsz:
batches.append(batch)
batch = []
batch.append(text)
if len(batch) > 0:
batches.append(batch)
else:
batch = [args.text.split()]
batches.append(batch)
m = bl.EncoderDecoderService.load(args.model, backend=args.backend, beam=args.beam,
remote=args.remote, name=args.name, device=args.device)
f = open(args.target, 'w') if args.target is not None else None
for texts in batches:
decoded = m.predict(texts, alpha=args.alpha, beam=args.beam)
for src, dst in zip(texts, decoded):
src_str = ' '.join(src)
dst_str = ' '.join(dst)
if args.tsv:
line = src_str + '\t' + dst_str
else:
line = dst_str
print(line, file=f, flush=True)
if f is not None:
f.close()
if __name__ == '__main__':
main()
|
11534229
|
import threading
import time
from functools import wraps
import statsd
import strgen
from flask import make_response
from flask import request
from flask_login import current_user
from flask_login import login_user
import config as base_config
import database.user
import util.cache
import util.response
from linkr import cache
COOKIE_SPA_TOKEN = 'linkr-spa-token'
def time_request(bucket):
"""
Time the request and send the duration to statsd as a timing event under the specified bucket.
This decorator sits at the highest level, and is used as follows:
@time_request('my.bucket.name')
@app.route('/')
def view_function():
pass
:param bucket: Name of the statsd bucket for this latency stat.
"""
def decorator(func):
@wraps(func)
def proxy_func_with_timing(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
duration = (time.time() - start_time) * 1000
statsd.timing(bucket, duration)
return ret
return proxy_func_with_timing
return decorator
def api_method(func):
"""
Designate this endpoint function as an API method. If secure frontend requests are enabled, this
decorator will invalidate any incoming SPA tokens and assign a new SPA token as a response
cookie. Incoming SPA tokens are invalidated in the cache with a short, asynchronous delay (to
alleviate race conditions from concurrent client requests), and new SPA tokens are synchronously
inserted into the cache before returning to the client. The logic of the underlying endpoint
function is otherwise passed through transparently.
This decorator should be used as a top-level wrapper of an endpoint function:
@app.route('/', methods=['POST'])
@require_form_args()
@api_method
def view_function():
pass
:param func: The wrapped API endpoint function.
"""
def async_delete_token(spa_token):
"""
Asynchronously delete the specified SPA token from the cache, after a small delay. This is
a noop if the token does not currently exist in the cache.
:param spa_token: The SPA token to invalidate.
"""
def task():
time.sleep(5)
cache.delete(util.cache.format_key(util.cache.TAG_SPA_TOKEN, spa_token))
thread = threading.Thread(target=task, args=())
thread.daemon = True
thread.start()
@wraps(func)
def decorator(*args, **kwargs):
if not base_config.options.server('secure_frontend_requests'):
return func(*args, **kwargs)
# Asynchronously delete the incoming SPA token (assigned from a prior request)
existing_spa_token = request.cookies.get(COOKIE_SPA_TOKEN)
async_delete_token(existing_spa_token)
# Generate a new, replacement SPA token
new_spa_token = strgen.StringGenerator("[\d\p\w]{50}").render()
# Transparently generate a response from the decorated API endpoint and attach the newly
# created SPA token as a cookie
resp = make_response(*func(*args, **kwargs))
resp.set_cookie(COOKIE_SPA_TOKEN, new_spa_token)
# To retain server-side state of this assigned token, synchronously add its value to the
# local cache
cache.set(
name=util.cache.format_key(util.cache.TAG_SPA_TOKEN, new_spa_token),
value=True,
ex=6 * 60 * 60, # Automated TTL of 6 hours
)
return resp
return decorator
def require_form_args(form_args=tuple([]), allow_blank_values=False, strict_params=False):
"""
Require this endpoint function to be requested with at least the specified parameters in its
JSON body.
Example usage for an endpoint that requires, at minimum, the params 'username' and 'password':
@app.route('/', methods=['POST'])
@require_form_args(['username', 'password'])
def view_function():
pass
On failure, returns HTTP status code 400 with a predefined failure_incomplete_params response.
:param form_args: Comma-separated strings representing required POST params.
:param allow_blank_values: True to explicitly consider an empty value as a valid param value.
:param strict_params: True to check if the POST request params are strictly equal to form_args.
False by default, thereby considering the request valid if there are extra
arguments.
"""
def decorator(func):
@wraps(func)
def abort_if_invalid_args(*args, **kwargs):
data = request.get_json(force=True, silent=True) or {}
if (len(form_args) > 0 and not data) or \
(not strict_params and not set(form_args).issubset(data.keys())) or \
(strict_params and set(form_args) != set(data.keys())) or \
(not allow_blank_values and not all([
data[arg] is not None and len(unicode(data[arg])) > 0 for arg in form_args
])):
return util.response.error(
status_code=400,
message='Required parameters are missing',
failure='failure_incomplete_params',
data={
'missing_params': list(set(form_args).difference(set(data.keys()))),
},
)
return func(data, *args, **kwargs)
return abort_if_invalid_args
return decorator
def require_login_api(admin_only=False, only_if=None):
"""
A custom implementation of Flask-login's built-in @login_required decorator.
This decorator will allow usage of the API endpoint if the user is either currently logged in
via the app or if the user authenticates with an API key in the POSTed JSON parameters.
This implementation overrides the behavior taken when the current user is not authenticated by
returning a predefined auth failure response with HTTP status code 401.
This decorator is intended for use with API endpoints, and REQUIRES use of require_form_args on
the same view function. Example usage for an authentication-required endpoint:
@app.route('/', methods=['POST'])
@require_form_args([])
@require_login_api()
def view_function():
pass
:param admin_only: True to only allow admin users to access this endpoint; False to allow any
authenticated user.
:param only_if: Optional boolean parameter to denote that login is only required if the
expression is true in value.
"""
def decorator(func):
@wraps(func)
def validate_auth(data, *args, **kwargs):
# Allow access if the user is authenticated (or in the case of admin_only, only if the
# user is also an admin).
if current_user.is_authenticated and (not admin_only or current_user.is_admin):
return func(data, *args, **kwargs)
# If a condition is set, allow access if the condition is false in value.
if only_if is not None and not only_if:
return func(data, *args, **kwargs)
api_key = request.headers.get('X-Linkr-Key') or data.get('api_key')
if not api_key:
return util.response.error(
status_code=403,
message='You must be authenticated to access this endpoint.',
failure='failure_unauth',
)
user = database.user.get_user_by_api_key(api_key)
if user and (not admin_only or user.is_admin):
# Log the user in before servicing the request, passing along the input data to
# the API endpoint, excluding sensitive information (API key).
login_user(user)
if data.get('api_key'):
del data['api_key']
return func(data, *args, **kwargs)
elif not user:
return util.response.error(
status_code=401,
message='The supplied API key is invalid.',
failure='failure_unauth',
)
else:
return util.response.error(
status_code=403,
message='Only admin users are allowed to access this endpoint.',
failure='failure_unauth',
)
return validate_auth
return decorator
def optional_login_api(func):
"""
This decorator is similar in behavior to require_login_api, but is intended for use with
endpoints that offer extended functionality with a login, but can still be used without any
authentication.
The decorator will set current_user if authentication via an API key is provided, and will
continue without error otherwise.
This decorator is intended for use with API endpoints, and REQUIRES use of require_form_args on
the same view function. Example usage for an authentication-required endpoint:
@app.route('/', methods=['POST'])
@require_form_args([])
@optional_login_api
def view_function():
pass
:param func: The wrapped API endpoint function.
"""
@wraps(func)
def decorator(data, *args, **kwargs):
if current_user.is_authenticated:
return func(data, *args, **kwargs)
api_key = request.headers.get('X-Linkr-Key') or data.get('api_key')
if api_key:
user = database.user.get_user_by_api_key(api_key)
if user:
login_user(user)
if data.get('api_key'):
del data['api_key']
return func(data, *args, **kwargs)
return decorator
def require_frontend_api(func):
"""
Require this API endpoint to be requested from a browser. The request should pass an SPA token
as a cookie assigned from a previous request. The request should also supply a User-Agent header
consistent with a browser. Refusing to supply an SPA token or supplying a stale token will cause
the request to be rejected.
This decorator should be used in conjunction with @api_method. This specifies an API endpoint
function that should both invalidate and assign SPA tokens, and reject requests if an incoming
token was not previously assigned by an @api_method function.
@app.route('/', methods=['POST'])
@require_form_args()
@require_frontend_api
@api_method
def view_function():
pass
:param func: The wrapped API endpoint function.
"""
@wraps(func)
def decorator(data, *args, **kwargs):
if not base_config.options.server('secure_frontend_requests'):
return func(data, *args, **kwargs)
spa_token = request.cookies.get(COOKIE_SPA_TOKEN)
if not cache.get(util.cache.format_key(util.cache.TAG_SPA_TOKEN, spa_token)):
return util.response.error(
status_code=403,
message='Client context requirements not fulfilled.',
failure='failure_bad_client',
)
return func(data, *args, **kwargs)
return decorator
|
11534233
|
def test_get_username_by_user_id(bot, config):
usernames_ids = config.get("usernames_ids")
for item in usernames_ids:
username = item[0]
userid = item[1]
assert username == bot.get_username_by_user_id(userid)
|
11534241
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from .HubSIRV import HubSIRV
class HubSIRSV(HubSIRV):
"""
SIRSV compartmental model with the Hub model assumption.
Parameters
----------
pss: float
probability someone is considered a super spreader.
rstart: float
the spreading radius of every normal spreader.
side: float
size of one side of the square plane.
S0: int
The initial amount of susceptibles at the start of the simulation.
I0: int
The initial amount of infectious individuals at the start of the simulation.
R0: int
The inital amount of removed individuals at the start of the simulation.
days: int
The number of days that are simulated.
gamma: float
The probability of someone from I going to R.
kappa: float
The probability of someone going from R compartment to S.
eta: float
The probability of someone goign from S to V, given they don't go from S to I.
w0: float (optional)
The probability of infection if an infectious and susceptible individual are in the same location. Default is 1.0.
hubConstant: float (optional)
The factor k multliplied to the rstart if the person is a super spreader. Default is sqrt(6).
alpha: int
constant used in the P(infection) formula. Default is 2.0.
Attributes
----------
popsize: int
size of the population.
details: Simul_Details
an object that can be returned using run(getDetails=True) that provides more insight about simulation
by showing transmissions chains, personal history with states, and more.
S : ndarray
stores the number of people S compartmet on each day.
I : ndarray
stores the number of people I compartmet on each day.
R : ndarray
stores the number of people R compartmet on each day.
V: ndarray
stores the number of people in the V compartment on each day.
Scollect: list
contains the Person objects of everyone in simulation. If an element in Scollect has isIncluded=True,
that means person is currently in susceptible compartment.
Icollect: list
contains the Person objects of everyone in simulation. If an element in Icollect has isIncluded=True,
that means person is currently in infected compartment.
Rcollect: list
contains the Person objects of everyone in simulation. If an element in Rcollect has isIncluded=True,
that means person is currently in removed compartment.
locx: ndarray
stores the x coordinate of each person in the simulation.
locy: ndarray
stores the y coordinate of each person in the simulation.
"""
def __init__(self, S0: int, I0: int, R0:int, V0: int, pss: float, gamma: float, kappa: float, eta:float, rstart: float, side: float, days:int, alpha=2, w0=1.0, hubConstant=6**0.5, timeDelay=-1):
# error checking
self.intCheck([S0, I0, R0,V0, days])
self.floatCheck([pss, gamma, kappa, eta, side, rstart, w0, alpha, hubConstant, timeDelay])
self.negValCheck([S0, I0, R0, V0, pss, gamma, kappa, eta, side, rstart, days, w0, hubConstant, alpha])
self.probValCheck([pss, gamma, kappa, eta, w0])
super().__init__(S0=S0, I0=I0, R0=R0, V0=V0, pss=pss, gamma=gamma, eta=eta, rstart=rstart, side=side, days=days, alpha=alpha, w0=w0, hubConstant=hubConstant, timeDelay=timeDelay)
self.kappa = kappa
def _RtoS(self):
return self._changeHelp(self.Rcollect, self.kappa)
def run(self, getDetails=True):
for i in range(1, self.days + 1):
#print("Day ",i)
# run the transfers from different compartments
transferSI = self._StoI(i)
transferIr = self._ItoR()
transferSV = set()
if i > self.timeDelay:
transferSV = self._StoV()
transferRS = self._RtoS()
# go after and change the indices in the collection data structure thing
self._stateChanger(transferSI, self.Icollect, "I", i)
self._stateChanger(transferIr, self.Rcollect, "R", i)
self._stateChanger(transferSV, self.Vcollect, "V", i)
self._stateChanger(transferRS, self.Scollect, "S", i)
# change the number of people in each state on the day i by adjusting the previous day's count
self.S[i] = self.S[i - 1] - len(transferSI) - len(transferSV) + len(transferRS)
self.I[i] = self.I[i - 1] + len(transferSI) - len(transferIr)
self.R[i] = self.R[i-1] + len(transferIr) - len(transferRS)
self.V[i] = self.V[i-1] + len(transferSV)
if getDetails:
return self.details
def plot(self):
"Plots the number of susceptible, exposed, infected, and recovered individuals on the y-axis and the number of days on the x-axis."
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("# Susceptibles")
ax1.set_title("Hub Model SIRSV Simulation")
ax3.plot(t, self.V, label="Vaccinated", color='g')
ax3.set_ylabel("# Vaccinated")
ax2.plot(t, self.I, label="Active Cases", color='b')
ax2.set_ylabel("# Active Infections")
ax4.set_xlabel("Days")
ax4.set_ylabel("# Recovered")
ax4.plot(t, self.R, label="Removed")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show()
|
11534252
|
from copy import copy
from typing import Optional, Union
from hwt.code import And, Or
from hwt.doc_markers import internal
from hwt.hdl.constants import DIRECTION
from hwt.hdl.types.array import HArray
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.enum import HEnum
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.structCast import hstruct_reinterpret
from hwt.hdl.types.structValBase import StructValBase
from hwt.interfaces.agents.structIntf import StructIntfAgent
from hwt.interfaces.std import Signal
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.typePath import TypePath
from hwtSimApi.hdlSimulator import HdlSimulator
class StructIntf(Interface):
"""
Create dynamic interface based on HStruct or HUnion description
:ivar ~._fieldsToInterfaces: dictionary {field_path: sub interface for it}
field path is a tuple of HStructFields which leads to this interface
:ivar ~._dtype: HStruct instance used as template for this interface
:param _instantiateFieldFn: function(FieldTemplateItem instance)
return interface instance
:attention: _instantiateFieldFn should also share _fieldsToInterfaces
with all other instances of StructIntf on this interface
"""
def __init__(self, structT: HStruct,
field_path: TypePath,
instantiateFieldFn,
masterDir=DIRECTION.OUT,
loadConfig=True):
Interface.__init__(self,
masterDir=masterDir,
loadConfig=loadConfig)
if not field_path:
field_path = TypePath()
else:
assert isinstance(field_path, TypePath), field_path
self._field_path = field_path
self._dtype = structT
assert self._dtype.fields, "Needs to have at least some mebers (othervise this interface is useless)"
self._instantiateFieldFn = instantiateFieldFn
self._fieldsToInterfaces = {}
def _declr(self):
_t = self._dtype
if isinstance(_t, HStruct):
fields = _t.fields
else:
fields = _t.fields.values()
self._fieldsToInterfaces[self._field_path] = self
for field in fields:
# skip padding
if field.name is not None:
# generate interface based on struct field
intf = self._instantiateFieldFn(self, field)
p = self._field_path / field.name
assert p not in self._fieldsToInterfaces, p
self._fieldsToInterfaces[p] = intf
setattr(self, field.name, intf)
def _initSimAgent(self, sim: HdlSimulator):
self._ag = StructIntfAgent(sim, self)
def _eq(self, other: Union["StructIntf", StructValBase]):
if isinstance(other, self.__class__):
assert self._dtype == other._dtype
return And(*(si._eq(oi) for si, oi in zip(self._interfaces, other._interfaces)))
else:
return And(*(si._eq(getattr(other, si._name)) for si in self._interfaces))
def __ne__(self, other: Union["StructIntf", StructValBase]):
if isinstance(other, self.__class__):
assert self._dtype == other._dtype
return Or(*(si != oi for si, oi in zip(self._interfaces, other._interfaces)))
else:
return Or(*(si != getattr(other, si._name) for si in self._interfaces))
def _reinterpret_cast(self, toT: HdlType):
return hstruct_reinterpret(self._dtype, self, toT)
class HdlType_to_Interface():
"""
Convert instance of HdlType to an interface shich represents same data.
:note: Interface is only instanciated, that means it does not have sub-interfaces
loaded yet, it can be done manually or by assigning to a property of parent Interface/Unit
instance.
"""
def apply(self, dtype: HdlType, field_path: Optional[TypePath]=None, masterDir=DIRECTION.OUT) -> Interface:
"""
Run the connversion
"""
if isinstance(dtype, HStruct):
return StructIntf(dtype, field_path,
instantiateFieldFn=self.instantiateFieldFn,
masterDir=masterDir)
elif isinstance(dtype, (Bits, HEnum)):
return Signal(dtype=dtype, masterDir=masterDir)
elif isinstance(dtype, HArray):
return HObjList(self.apply(dtype.element_t, masterDir=masterDir)
for _ in range(dtype.size))
else:
raise NotImplementedError(dtype)
@internal
def instantiateFieldFn(self, intf, fieldInfo) -> Interface:
if isinstance(intf, StructIntf):
c = self.apply(
fieldInfo.dtype,
field_path=intf._field_path / fieldInfo.name)
c._fieldsToInterfaces = intf._fieldsToInterfaces
return c
else:
raise NotImplementedError(intf)
class Interface_to_HdlType():
"""
Convert instance of HdlType to an interface shich represents same data.
:note: Interface instance has to have definitions loaded.
"""
def apply(self, intf: Union[Interface, RtlSignal], const=False):
"""
Run the connversion
"""
if isinstance(intf, Interface) and intf._interfaces:
return HStruct(
*((self.apply(i, const=const), i._name)
for i in intf._interfaces)
)
else:
t = intf._dtype
if t.const != const:
t = copy(t)
t.const = const
return t
|
11534255
|
import click
import responses
import pytest
from yogit.api.client import GraphQLClient, GITHUB_API_URL_V4
def _add_response(status, json):
responses.add(responses.POST, GITHUB_API_URL_V4, json=json, status=status)
@responses.activate
def test_ok_200():
_add_response(200, {"data": "result"})
client = GraphQLClient()
assert client.get({"query": "request"}) == {"data": "result"}
@responses.activate
def test_ko_400():
_add_response(400, {"error": "result"})
client = GraphQLClient()
with pytest.raises(click.ClickException) as e:
client.get({"query": "request"})
assert str(e.value) == "Bad request"
@responses.activate
def test_ko_401():
_add_response(401, {"error": "result"})
client = GraphQLClient()
with pytest.raises(click.ClickException) as e:
client.get({"query": "request"})
assert str(e.value) == "Unauthorized"
@responses.activate
def test_ko():
_add_response(500, {"error": "result"})
client = GraphQLClient()
with pytest.raises(click.ClickException) as e:
client.get({"query": "request"})
assert str(e.value) == "Internal server error"
|
11534309
|
import numpy as np
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics.pairwise
import scipy.cluster.hierarchy as sch
import scipy.sparse as spsp
import scedar.eda as eda
import pytest
class TestSampleDistanceMatrix(object):
"""docstring for TestSampleDistanceMatrix"""
x_3x2 = [[0, 0], [1, 1], [2, 2]]
x_2x4_arr = np.array([[0, 1, 2, 3], [1, 2, 0, 6]])
def test_valid_init(self):
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric='euclidean')
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
np.testing.assert_allclose(sdm.d, dist_mat)
sdm2 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='euclidean', nprocs=5)
sdm2_d1 = np.sqrt(
np.power(self.x_2x4_arr[0] - self.x_2x4_arr[1], 2).sum())
np.testing.assert_allclose(sdm2.d,
np.array([[0, sdm2_d1], [sdm2_d1, 0]]))
sdm3 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='correlation', nprocs=5)
sdm3_corr_d = (1 - np.dot(
self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
self.x_2x4_arr[1] - self.x_2x4_arr[1].mean()) /
(np.linalg.norm(self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
2) *
np.linalg.norm(self.x_2x4_arr[1] - self.x_2x4_arr[1].mean(),
2)))
np.testing.assert_allclose(sdm3.d,
np.array([[0, 0.3618551],
[0.3618551, 0]]))
np.testing.assert_allclose(sdm3.d,
np.array([[0, sdm3_corr_d],
[sdm3_corr_d, 0]]))
sdm4 = eda.SampleDistanceMatrix(self.x_3x2, dist_mat)
sdm5 = eda.SampleDistanceMatrix(
self.x_3x2, dist_mat, metric='euclidean')
sdm5 = eda.SampleDistanceMatrix([[1, 2]], metric='euclidean')
assert sdm5.tsne(n_iter=250).shape == (1, 2)
def test_empty_init(self):
with pytest.raises(ValueError) as excinfo:
eda.SampleDistanceMatrix(np.empty(0), metric='euclidean')
sdm = eda.SampleDistanceMatrix(np.empty((0, 0)), metric='euclidean')
assert len(sdm.sids) == 0
assert len(sdm.fids) == 0
assert sdm._x.shape == (0, 0)
assert sdm._d.shape == (0, 0)
assert sdm._col_sorted_d.shape == (0, 0)
assert sdm._col_argsorted_d.shape == (0, 0)
assert sdm.tsne(n_iter=250).shape == (0, 0)
def test_init_wrong_metric(self):
# when d is None, metric cannot be precomputed
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='precomputed')
# lazy load d
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown')
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown').d
eda.SampleDistanceMatrix(self.x_3x2, metric=1)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1).d
eda.SampleDistanceMatrix(self.x_3x2, metric=1.)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1.).d
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', ))
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', )).d
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean'])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean']).d
def test_init_wrong_d_type(self):
d_3x3 = np.array([[0, np.sqrt(2), np.sqrt(8)],
['1a1', 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x3)
def test_init_wrong_d_size(self):
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_1x6 = np.arange(6)
d_3x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0],
[1, 2]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_2x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_1x6)
def test_to_classified(self):
sdm = eda.SampleDistanceMatrix(np.arange(100).reshape(50, -1),
metric='euclidean')
# initialize cached results
sdm.tsne_plot()
sdm.pca_plot()
sdm.s_knn_graph(2)
sdm.s_ith_nn_d(1)
sdm.s_ith_nn_ind(1)
labs = [0]*10 + [1]*20 + [0]*10 + [2]*10
slcs = sdm.to_classified(labs)
assert slcs.labs == labs
assert slcs._lazy_load_d is sdm._lazy_load_d
assert slcs._lazy_load_d is not None
assert slcs._metric == sdm._metric
assert slcs._nprocs == sdm._nprocs
assert slcs.sids == sdm.sids
assert slcs.fids == sdm.fids
# tsne
assert slcs._tsne_lut is not None
assert slcs._tsne_lut == sdm._tsne_lut
assert slcs._lazy_load_last_tsne is not None
assert slcs._lazy_load_last_tsne is sdm._lazy_load_last_tsne
# knn
assert slcs._lazy_load_col_sorted_d is not None
assert slcs._lazy_load_col_sorted_d is sdm._lazy_load_col_sorted_d
assert slcs._lazy_load_col_argsorted_d is not None
assert (slcs._lazy_load_col_argsorted_d is
sdm._lazy_load_col_argsorted_d)
assert slcs._knn_ng_lut is not None
assert slcs._knn_ng_lut == sdm._knn_ng_lut
# pca
assert slcs._pca_n_components is not None
assert slcs._lazy_load_skd_pca is not None
assert slcs._lazy_load_pca_x is not None
assert slcs._pca_n_components == sdm._pca_n_components
assert slcs._lazy_load_skd_pca is sdm._lazy_load_skd_pca
assert slcs._lazy_load_pca_x is sdm._lazy_load_pca_x
def test_sort_x_by_d(self):
x1 = np.array([[0, 5, 30, 10],
[1, 5, 30, 10],
[0, 5, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
x2 = x1.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x2.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x1, x2)
x3 = np.array([[0, 0, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 4, 30, 7],
[2, 5, 30, 9]])
x4 = x3.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x4.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x3, x4)
def test_sort_features(self):
x = np.array([[0, 2, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
sdm = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2 = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2.sort_features(fdist_metric='euclidean', optimal_ordering=True)
assert sdm2.fids == [2, 3, 1, 0]
def test_get_tsne_kv(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(0) is None
assert sdm.get_tsne_kv(2) is None
def test_get_tsne_kv_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv([1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv({1: 2})
def test_put_tsne_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne(1, [1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne({1: 2}, [1, 2, 3])
def test_tsne(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.tsne_lut == {}
tsne1 = sdm.tsne(n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=False, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 1
with pytest.raises(Exception) as excinfo:
wrong_metric_kwargs = tsne_kwargs.copy()
wrong_metric_kwargs['metric'] = 'correlation'
sdm.tsne(**wrong_metric_kwargs)
assert len(sdm.tsne_lut) == 1
tsne3 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne3)
# (param, ind) as key, so same params get an extra entry.
assert len(sdm.tsne_lut) == 2
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(2)[1])
assert tsne1 is not sdm.get_tsne_kv(1)[1]
assert tsne3 is not sdm.get_tsne_kv(2)[1]
tsne4 = sdm.tsne(store_res=True, n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne4)
np.testing.assert_allclose(sdm.get_tsne_kv(3)[1], tsne4)
assert len(sdm.tsne_lut) == 3
tsne5 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
tsne6 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
np.testing.assert_allclose(tsne6, tsne5)
np.testing.assert_allclose(tsne5, sdm.get_tsne_kv(4)[1])
np.testing.assert_allclose(tsne6, sdm.get_tsne_kv(5)[1])
assert len(sdm.tsne_lut) == 5
def test_par_tsne(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_par_tsne_mp(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False, nprocs=3)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list, nprocs=3)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_tsne_default_init(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
init_tsne = sdm._last_tsne
assert init_tsne.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 2
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sdm = sdm.ind_x([0, 5], list(range(9)))
assert ss_sdm._x.shape == (2, 9)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 19))
np.testing.assert_equal(
ss_sdm.d, sdm._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_sdm = sdm.ind_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select with None
ss_sdm = sdm.ind_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sdm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sdm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sdm.ind_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_id_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sdm = sdm.id_x(['a', 'f'], list(range(10, 15)))
assert ss_sdm._x.shape == (2, 5)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 15))
np.testing.assert_equal(
ss_sdm.d, sdm._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_sdm = sdm.id_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select with None
ss_sdm = sdm.id_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
sdm.id_x([6])
with pytest.raises(ValueError) as excinfo:
sdm.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sdm.id_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.id_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.id_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_getter(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
np.testing.assert_allclose(sdm.d, dist_mat)
assert sdm.d is not sdm._d
assert sdm.metric == tmet
assert sdm.tsne_lut == {}
assert sdm.tsne_lut is not sdm._tsne_lut
assert sdm.tsne_lut == sdm._tsne_lut
sdm.tsne(n_iter=250)
assert sdm.tsne_lut is not sdm._tsne_lut
for k in sdm.tsne_lut:
np.testing.assert_equal(sdm.tsne_lut[k], sdm._tsne_lut[k])
def test_num_correct_dist_mat(self):
tdmat = np.array([[0, 1, 2],
[0.5, 0, 1.5],
[1, 1.6, 0.5]])
# upper triangle is assgned with lower triangle values
ref_cdmat = np.array([[0, 0.5, 1],
[0.5, 0, 1.6],
[1, 1.6, 0]])
with pytest.warns(UserWarning):
cdmat = eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat)
np.testing.assert_equal(cdmat, ref_cdmat)
ref_cdmat2 = np.array([[0, 0.5, 1],
[0.5, 0, 1],
[1, 1, 0]])
# with upper bound
cdmat2 = eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat, 1)
np.testing.assert_equal(cdmat2, ref_cdmat2)
# wrong shape
tdmat3 = np.array([[0, 0.5],
[0.5, 0],
[1, 1]])
# with upper bound
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat3, 1)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat3)
def test_s_ith_nn_d(self):
nn_sdm = eda.SampleDistanceMatrix([[0], [1], [5], [6], [10], [20]],
metric='euclidean')
np.testing.assert_allclose([0, 0, 0, 0, 0, 0],
nn_sdm.s_ith_nn_d(0))
np.testing.assert_allclose([1, 1, 1, 1, 4, 10],
nn_sdm.s_ith_nn_d(1))
np.testing.assert_allclose([5, 4, 4, 4, 5, 14],
nn_sdm.s_ith_nn_d(2))
def test_s_ith_nn_ind(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
np.testing.assert_allclose([0, 1, 2, 3, 4, 5],
nn_sdm.s_ith_nn_ind(0))
np.testing.assert_allclose([1, 0, 3, 2, 3, 4],
nn_sdm.s_ith_nn_ind(1))
np.testing.assert_allclose([2, 2, 1, 4, 2, 3],
nn_sdm.s_ith_nn_ind(2))
# Because summary dist plot calls hist_dens_plot immediately after
# obtaining the summary statistics vector, the correctness of summary
# statistics vector and hist_dens_plot implies the correctness of the
# plots.
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ith_nn_d_dist(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
nn_sdm.s_ith_nn_d_dist(1)
def test_knn_ind_lut(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
assert nn_sdm.s_knn_ind_lut(0) == dict(zip(range(6), [[]]*6))
assert (nn_sdm.s_knn_ind_lut(1) ==
dict(zip(range(6), [[1], [0], [3], [2], [3], [4]])))
assert (nn_sdm.s_knn_ind_lut(2) ==
dict(zip(range(6), [[1, 2], [0, 2], [3, 1],
[2, 4], [3, 2], [4, 3]])))
assert (nn_sdm.s_knn_ind_lut(3) ==
dict(zip(range(6), [[1, 2, 3], [0, 2, 3], [3, 1, 0],
[2, 4, 1], [3, 2, 1], [4, 3, 2]])))
nn_sdm.s_knn_ind_lut(5)
def test_knn_ind_lut_wrong_args(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-1)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-0.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_tsne_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_tsne_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(x, sids=sids, fids=fids)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_tsne_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(x_sorted, sids=sids, fids=fids)
return sdm.tsne_plot(g, figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_pca_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_pca_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(x, sids=sids, fids=fids)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_pca_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(x_sorted, sids=sids, fids=fids)
return sdm.pca_plot(gradient=g, figsize=(10, 10), s=50)
def test_pca_dim(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1))
assert sdm._pca_x.shape == (20, 20)
def test_pca_var_explained(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1))
assert sdm._skd_pca.explained_variance_.shape == (20,)
assert sdm._skd_pca.explained_variance_ratio_.shape == (20,)
@pytest.mark.mpl_image_compare
def test_sdm_umap_feature_gradient_plot_dense(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.umap_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_umap_feature_gradient_plus10_plot_dense(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.umap_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_umap_feature_gradient_plot_dense_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_umap_feature_gradient_plot_dense_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_umap_feature_gradient_plot_dense_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.umap_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_umap_feature_gradient_plot_dense_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(x, sids=sids, fids=fids)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.umap_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_umap_plot_dense(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(x_sorted, sids=sids, fids=fids)
return sdm.umap_plot(gradient=g, figsize=(10, 10), s=50)
def test_umap_dim(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1))
assert sdm._umap_x.shape == (20, 2)
def test_umap_modes(self):
np.random.seed(123)
x5k = np.random.normal(size=5000)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1))
assert sdm.umap(use_pca=False).shape == (20, 2)
sdm = eda.SampleDistanceMatrix(x5k.reshape(20, -1), use_pdist=False)
assert sdm.umap(use_pca=False).shape == (20, 2)
def test_s_knn_connectivity_matrix(self):
nn_sdm = eda.SampleDistanceMatrix([[1], [2], [6]],
metric='euclidean')
np.testing.assert_allclose(
[[0, 1, 0], [1, 0, 0], [0, 4, 0]],
nn_sdm.s_knn_connectivity_matrix(1).toarray())
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(0)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False,
index_params={}).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=False,
index_params=None, query_params={}).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
# hnsw can only handle vectors with more than one non-0 elements.
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='cosine')
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='euclidean')
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
nn_sdm = eda.SampleDistanceMatrix(
[[1, 2, 3], [2, 0, 0], [6, 0, 0]],
metric='euclidean')
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=True, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, use_hnsw=True, use_pca=False).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=True, use_pca=True, index_params={},
query_params={}, verbose=True).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='correlation', use_hnsw=True, use_pca=False,
index_params={}, query_params={},
verbose=True).shape == (3, 3)
with pytest.raises(ValueError):
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='correlation', use_hnsw=True, use_pca=True,
index_params={}, query_params={},
verbose=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=True, verbose=True).shape == (3, 3)
assert nn_sdm.s_knn_connectivity_matrix(
1, metric='cosine', use_hnsw=False, use_pca=False, verbose=True).shape == (3, 3)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_lab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
labs = gradient = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, gradient=gradient, labels=labs,
figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_lab_same_marker(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
labs = gradient = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, gradient=gradient, labels=labs,
different_label_markers=False,
figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_grad_nolab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
gradient = np.array([1] * 10 + [10] * 20)
return sdm.s_knn_graph(5, gradient=gradient, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_nolab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
return sdm.s_knn_graph(5, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_lab(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
labs = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, labels=labs, figsize=(5, 5),
alpha=0.8, random_state=123)
@pytest.mark.mpl_image_compare
def test_s_knn_graph_nograd_lab_same_marker(self):
np.random.seed(123)
x = np.concatenate((np.random.normal(0, 1, 10),
np.random.normal(20, 1, 20))).reshape(30, -1)
sdm = eda.SampleDistanceMatrix(x, metric='euclidean')
sdm.s_knn_graph(5, figsize=(5, 5))
assert (5, 1) in sdm._knn_ng_lut
assert len(sdm._knn_ng_lut) == 1
# use cache
sdm.s_knn_graph(5, figsize=(5, 5))
sdm.s_knn_graph(5, figsize=(5, 5), fa2_kwargs={})
sdm.s_knn_graph(5, figsize=(5, 5), nx_draw_kwargs={})
assert len(sdm._knn_ng_lut) == 1
labs = np.array([1] * 10 + [2] * 20)
return sdm.s_knn_graph(5, labels=labs, figsize=(5, 5),
different_label_markers=False,
alpha=0.8, random_state=123)
def test_cosine_pdist(self):
np.random.seed(222)
x = np.random.ranf(10000).reshape(500, -1)
skd = sklearn.metrics.pairwise.pairwise_distances(x, metric='cosine')
np.testing.assert_allclose(
eda.SampleDistanceMatrix.cosine_pdist(x), skd)
np.testing.assert_allclose(
eda.SampleDistanceMatrix(x, metric='cosine')._d, skd)
def test_correlation_pdist(self):
np.random.seed(222)
x = np.random.ranf(10000).reshape(500, -1)
skd = sklearn.metrics.pairwise.pairwise_distances(
x, metric='correlation')
np.testing.assert_allclose(
eda.SampleDistanceMatrix.correlation_pdist(x), skd)
np.testing.assert_allclose(
eda.SampleDistanceMatrix(x, metric='correlation')._d, skd)
class TestHClustTree(object):
"""docstring for TestHClustTree"""
sdm_5x2 = eda.SampleDistanceMatrix([[0, 0],
[100, 100],
[1, 1],
[101, 101],
[80, 80]],
metric="euclidean")
# This tree should be
# _______|_____
# | ____|___
# __|___ | __|___
# | | | | |
# 0 2 4 1 3
# Leaves are in optimal order.
hct = eda.HClustTree.hclust_tree(sdm_5x2.d, linkage="auto")
def test_hclust_tree_args(self):
eda.HClustTree.hclust_tree(self.sdm_5x2.d, linkage="auto",
n_eval_rounds=-1, is_euc_dist=True,
verbose=True)
def test_hct_from_lkg(self):
lkg = eda.HClustTree.hclust_linkage(
self.sdm_5x2.d, linkage="auto",
n_eval_rounds=-1, is_euc_dist=True,
verbose=True)
tree1 = eda.HClustTree.hct_from_lkg(lkg)
tree2 = eda.HClustTree.hct_from_lkg(lkg)
assert tree1 is not tree2
assert tree1._left is not tree2._left
assert tree1._right is not tree2._right
def test_hclust_tree(self):
assert self.hct.prev is None
assert self.hct.left_count() == 2
assert self.hct.right_count() == 3
assert self.hct.count() == 5
assert len(self.hct.leaf_ids()) == 5
assert self.hct.leaf_ids() == [0, 2, 4, 1, 3]
assert len(self.hct.left_leaf_ids()) == 2
assert self.hct.left_leaf_ids() == [0, 2]
assert len(self.hct.right_leaf_ids()) == 3
assert self.hct.right_leaf_ids() == [4, 1, 3]
assert self.hct.left().left().left().count() == 0
assert self.hct.left().left().left().leaf_ids() == []
assert self.hct.left().left().left_leaf_ids() == []
assert self.hct.left().left().right().count() == 0
def test_hclust_tree_invalid_dmat(self):
with pytest.raises(ValueError) as excinfo:
eda.HClustTree.hclust_tree(np.arange(5))
with pytest.raises(ValueError) as excinfo:
eda.HClustTree.hclust_tree(np.arange(10).reshape(2, 5))
def test_bi_partition_no_min(self):
# return subtrees False
labs1, sids1 = self.hct.bi_partition()
# return subtrees True
labs2, sids2, lst, rst = self.hct.bi_partition(return_subtrees=True)
np.testing.assert_equal(labs1, [0, 0, 1, 1, 1])
np.testing.assert_equal(sids1, [0, 2, 4, 1, 3])
np.testing.assert_equal(sids1, self.hct.leaf_ids())
assert labs1 == labs2
assert sids1 == sids2
assert lst.count() == 2
assert lst.left_count() == 1
assert lst.left_leaf_ids() == [0]
assert lst.right_leaf_ids() == [2]
assert lst.leaf_ids() == [0, 2]
assert rst.leaf_ids() == [4, 1, 3]
assert rst.right_leaf_ids() == [1, 3]
assert rst.left_leaf_ids() == [4]
def test_bi_partition_2min_g_cnt(self):
# _______|_____
# | ____|___
# __|___ | __|___
# | | | | |
# 0 2 4 1 3
# Leaves are in optimal order.
labs1, sids1 = self.hct.bi_partition(soft_min_subtree_size=3)
# return subtrees True
labs2, sids2, lst, rst = self.hct.bi_partition(
soft_min_subtree_size=3, return_subtrees=True)
np.testing.assert_equal(labs1, [0, 0, 1, 1, 1])
np.testing.assert_equal(sids1, [0, 2, 4, 1, 3])
np.testing.assert_equal(sids1, self.hct.leaf_ids())
assert labs1 == labs2
assert sids1 == sids2
assert lst.count() == 2
assert lst.left_count() == 1
assert lst.left_leaf_ids() == [0]
assert lst.right_leaf_ids() == [2]
assert lst.leaf_ids() == [0, 2]
assert rst.leaf_ids() == [4, 1, 3]
assert rst.right_leaf_ids() == [1, 3]
assert rst.left_leaf_ids() == [4]
def test_bi_partition_min_no_spl(self):
# ____|____ 6
# | ___|____ 5
# | | __|___ 4
# | | | |
# 3 2 1 0
z = sch.linkage([[0, 0], [1, 1], [3, 3], [6, 6]],
metric='euclidean', method='complete',
optimal_ordering=True)
hct = eda.HClustTree(sch.to_tree(z))
assert hct.leaf_ids() == [3, 2, 1, 0]
labs, sids, lst, rst = hct.bi_partition(
soft_min_subtree_size=2, return_subtrees=True)
assert labs == [0, 0, 1, 1]
assert sids == [3, 2, 1, 0]
# hct should be changed accordingly
assert hct.leaf_ids() == [3, 2, 1, 0]
assert hct.left_leaf_ids() == [3, 2]
assert hct.right_leaf_ids() == [1, 0]
# subtrees
assert lst.leaf_ids() == [3, 2]
assert rst.leaf_ids() == [1, 0]
# prev
assert lst._prev is hct
assert rst._prev is hct
# ids
assert lst._node.id == 5
assert lst._node.left.id == 3
assert lst._node.right.id == 2
# ids
assert rst._node.id == 4
assert rst._node.left.id == 1
assert rst._node.right.id == 0
def test_bi_partition_min_no_spl_lr_rev(self):
# left right reversed
# ____|____ 6
# | ___|____ 5
# | | __|___ 4
# | | | |
# 3 2 1 0
z = sch.linkage([[0, 0], [1, 1], [3, 3], [6, 6]],
metric='euclidean', method='complete',
optimal_ordering=True)
root = sch.to_tree(z)
# reverse left right subtree
root_left = root.left
root.left = root.right
root.right = root_left
hct = eda.HClustTree(root)
assert hct.leaf_ids() == [2, 1, 0, 3]
labs, sids, lst, rst = hct.bi_partition(
soft_min_subtree_size=2, return_subtrees=True)
assert labs == [0, 0, 1, 1]
assert sids == [2, 1, 0, 3]
# hct should be changed accordingly
assert hct.leaf_ids() == [2, 1, 0, 3]
assert hct.left_leaf_ids() == [2, 1]
assert hct.right_leaf_ids() == [0, 3]
# subtrees
assert lst.leaf_ids() == [2, 1]
assert rst.leaf_ids() == [0, 3]
# prev
assert lst._prev is hct
assert rst._prev is hct
assert hct._left is lst._node
assert hct._right is rst._node
# ids
assert rst._node.id == 4
assert rst._node.left.id == 0
assert rst._node.right.id == 3
# ids
assert lst._node.id == 5
assert lst._node.left.id == 2
assert lst._node.right.id == 1
def test_bi_partition_min_spl(self):
# _____|_____
# | ____|____
# | __|__ __|__
# | | | | |
# 4 3 2 1 0
z = sch.linkage([[0, 0], [1, 1], [3, 3], [4, 4], [10, 10]],
metric='euclidean', method='complete',
optimal_ordering=True)
hct = eda.HClustTree(sch.to_tree(z))
assert hct.leaf_ids() == [4, 3, 2, 1, 0]
assert hct.left_leaf_ids() == [4]
assert hct.right().left().leaf_ids() == [3, 2]
assert hct.right().right().leaf_ids() == [1, 0]
labs, sids, lst, rst = hct.bi_partition(
soft_min_subtree_size=2, return_subtrees=True)
assert labs == [0, 0, 0, 1, 1]
assert sids == [4, 3, 2, 1, 0]
# hct should be changed accordingly
assert hct.leaf_ids() == [4, 3, 2, 1, 0]
assert hct.left_leaf_ids() == [4, 3, 2]
assert hct.right_leaf_ids() == [1, 0]
# left
assert lst._prev is hct
assert lst._node.left.left.id == 4
assert lst._node.left.right.id == 3
assert lst._node.right.id == 2
# right
assert rst._prev is hct
assert rst._node.left.id == 1
assert rst._node.right.id == 0
def test_bi_partition_min_multi_spl(self):
# ____|____
# | ____|___
# | | ___|____
# | | | ___|___
# | | | | __|__
# | | | | | |
# 5 4 3 2 1 0
z = sch.linkage([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
metric='euclidean', method='single',
optimal_ordering=True)
root = sch.to_tree(z)
assert root.left.id == 5
assert root.right.left.id == 4
assert root.right.right.left.id == 3
assert root.right.right.right.left.id == 2
assert root.right.right.right.right.left.id == 1
assert root.right.right.right.right.right.id == 0
hct = eda.HClustTree(root)
labs, sids, lst, rst = hct.bi_partition(
soft_min_subtree_size=3, return_subtrees=True)
assert labs == [0, 0, 0, 1, 1, 1]
assert sids == [5, 4, 3, 2, 1, 0]
# lst
assert hct._left is lst._node
assert lst._prev is hct
assert lst.left_leaf_ids() == [5, 4]
assert lst.right_leaf_ids() == [3]
# rst
assert hct._right is rst._node
assert rst._prev is hct
assert rst.left_leaf_ids() == [2]
assert rst.right_leaf_ids() == [1, 0]
def test_bi_partition_min_switch_spl(self):
# _______|________
# | _____|_____
# | ____|____ |
# | __|__ __|__ |
# | | | | | |
# 0 1 2 3 4 5
# round 1: ( ((0, (1, 2)), (3, 4)), (5) )
# round 2: ( (0, (1, 2), (3, (4, 5)) )
z = sch.linkage([[0], [5], [6], [8], [9], [12]],
method='single', optimal_ordering=True)
root = sch.to_tree(z)
assert root.left.id == 0
assert root.right.right.id == 5
assert root.right.left.left.left.id == 1
assert root.right.left.left.right.id == 2
assert root.right.left.right.left.id == 3
assert root.right.left.right.right.id == 4
hct = eda.HClustTree(root)
labs, sids, lst, rst = hct.bi_partition(
soft_min_subtree_size=3, return_subtrees=True)
assert labs == [0, 0, 0, 1, 1, 1]
assert sids == [0, 1, 2, 3, 4, 5]
# lst
assert hct._left is lst._node
assert lst._prev is hct
assert lst.left_leaf_ids() == [0]
assert lst.right_leaf_ids() == [1, 2]
# rst
assert hct._right is rst._node
assert rst._prev is hct
assert rst.left_leaf_ids() == [3]
assert rst.right_leaf_ids() == [4, 5]
def test_bi_partition_wrong_args(self):
with pytest.raises(ValueError) as excinfo:
self.hct.bi_partition(soft_min_subtree_size=0)
with pytest.raises(ValueError) as excinfo:
self.hct.bi_partition(soft_min_subtree_size=0.5)
with pytest.raises(ValueError) as excinfo:
self.hct.bi_partition(soft_min_subtree_size=-1)
def test_cluster_id_to_lab_list_wrong_id_list_type(self):
with pytest.raises(ValueError) as excinfo:
eda.HClustTree.cluster_id_to_lab_list(
np.array([[0, 1, 2], [3, 4]]), [0, 1, 2, 3, 4])
def test_cluster_id_to_lab_list_mismatched_ids_sids(self):
with pytest.raises(ValueError) as excinfo:
eda.HClustTree.cluster_id_to_lab_list(
[[0, 1, 2], [3, 4]], [0, 1, 2, 3, 5])
def test_cluster_id_to_lab_list_empty_cluster(self):
with pytest.raises(ValueError) as excinfo:
eda.HClustTree.cluster_id_to_lab_list(
[[], [0, 1, 2, 3, 4]], [0, 1, 2, 3, 4])
|
11534335
|
from django.conf.urls import patterns, url
from django.views.generic.base import RedirectView
from django.contrib.auth.decorators import login_required as lr
from forum.views import AdminCourseForumView
from course_material.views import CourseMaterialAdminView
from .views import (AdminView, CourseAdminView, CourseCreateView,
ExportCourseView, ImportCourseView, UserAdminView,)
urlpatterns = patterns(
'',
# home admin
url(r'^home/$', lr(AdminView.as_view(template_name="home.html")), name="administration.home"),
# list all courses
url(r'^$', lr(RedirectView.as_view(url="courses/", permanent=False))),
url(r'^courses/$', AdminView.as_view(template_name="courses.html"), name='administration.courses'),
# users
url(r'^users/$', UserAdminView.as_view(template_name="users.html"), name='administration.users'),
# url(r'^users/(?P<pk>[0-9]+)/$', UserUpdateView.as_view(), name='administration.user-update'),
# url(r'^users/(?P<pk>[0-9]+)/delete/$', UserDeleteView.as_view(), name='administration.user-delete'),
# create, edit and export courses
url(r'^courses/new/$', CourseCreateView.as_view(), name="administration.new_course"),
url(r'^courses/(?P<course_id>[1-9][0-9]*)/$', CourseAdminView.as_view(template_name="course.html"), name="administration.edit_course"),
url(r'^course/(?P<course_id>[1-9][0-9]*)/export/$', ExportCourseView.as_view(), name="administration.export_course"),
url(r'^course/import/$', ImportCourseView.as_view(), name="administration.import_course"),
# create and edit lesson
url(r'^courses/(?P<course_id>[1-9][0-9]*)/lessons/new/$', CourseAdminView.as_view(template_name="lesson.html")),
url(r'^courses/(?P<course_id>[1-9][0-9]*)/lessons/(?P<pk>[1-9][0-9]*)/$', CourseAdminView.as_view(template_name="lesson.html")),
# messages
url(r'^course/(?P<course_id>[1-9][0-9]*)/messages/$', CourseAdminView.as_view(template_name="messages.html"), name="administration.messages"),
url(r'^course/(?P<course_id>[1-9][0-9]*)/message/(?P<message_id>[1-9][0-9]*)$', CourseAdminView.as_view(template_name="message.html")),
url(r'^course/(?P<course_id>[1-9][0-9]*)/forum/', AdminCourseForumView.as_view(template_name="forum.html"), name="administration.forum"),
url(r'^course/(?P<pk>[1-9][0-9]*)/material/$',
CourseMaterialAdminView.as_view(template_name="course-material-admin.html"),
name="administration.course_material"
),
url(r'^course/(?P<course_id>[1-9][0-9]*)/permissions/$', CourseAdminView.as_view(template_name="permissions.html"), name="course.permissions"),
url(r'^course/(?P<course_id>[1-9][0-9]*)/certificatesettings/$', CourseAdminView.as_view(template_name="certificate-settings.html"), name="course.certificate-settings"),
url(r'^course/(?P<course_id>[1-9][0-9]*)/reports/$', CourseAdminView.as_view(template_name="stats.html"), name="administration.reports"),
)
|
11534338
|
from flask import jsonify, request, send_file, abort
from PIL import Image
from io import BytesIO
from app import app
from urlparse import urlparse
@app.route('/convertEMF', methods=['POST'])
def get_tasks():
hostname = urlparse(request.referrer).hostname
if hostname.endswith(".draw.io") == False and hostname.endswith(".jgraph.com") == False:
abort(403)
img = request.files['img']
pngImg = BytesIO()
Image.open(img).save(pngImg, "png")
pngImg.seek(0)
return send_file(pngImg,
attachment_filename=img.filename+'.png',
as_attachment=True,
mimetype='image/png')
if __name__ == '__main__':
app.run(debug=True)
|
11534363
|
import os
from UCTB.utils import multiple_process
def task_func(share_queue, locker, data, parameters):
print('Child process %s with pid %s' % (parameters[0], os.getpid()))
for task in data:
print('Child process', parameters[0], 'running', task)
exec_str = 'python HMM.py --Dataset %s --City %s ' % (task[0], task[1])
if task[2] != '':
exec_str += task[2]
os.system(exec_str)
locker.acquire()
share_queue.put(None)
locker.release()
if __name__ == '__main__':
task_list = [
['Bike', 'NYC', ''],
['Bike', 'Chicago', ''],
['Bike', 'DC', ''],
['Metro', 'Chongqing', ''],
['Metro', 'Shanghai', ''],
['DiDi', 'Chengdu', ''],
['DiDi', 'Xian', ''],
['ChargeStation', 'Beijing', '']
]
n_jobs = 2
multiple_process(distribute_list=task_list,
partition_func=lambda data, i, n_job: [data[e] for e in range(len(data)) if e % n_job == i],
task_func=task_func, n_jobs=n_jobs,
reduce_func=lambda x, y: None, parameters=[])
|
11534407
|
import numpy as np
import torch
from torch.autograd import Function
import holoviews as hv
hv.extension('bokeh')
"""
pytorch-grad-cam by JacobGil (https://github.com/jacobgil/pytorch-grad-cam)
"""
class FeatureExtractor(object):
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermediate targeted layers.
3. Gradients from intermediate targeted layers. """
def __init__(self, model, feature_module, target_layers):
self.model = model
self.feature_module = feature_module
self.feature_extractor = FeatureExtractor(self.feature_module, target_layers)
def get_gradients(self):
return self.feature_extractor.gradients
def __call__(self, x):
target_activations = []
for name, module in self.model._modules.items():
if "f1" not in name.lower():
if module == self.feature_module:
target_activations, x = self.feature_extractor(x)
elif "avgpool" in name.lower():
x = module(x)
x = x.view(x.size(0), -1)
else:
x = module(x)
return target_activations, x
class GradCam:
def __init__(self, model, feature_module, target_layer_names, device='cpu'):
self.model = model
self.feature_module = feature_module
self.model.eval()
self.model = model.to(device)
self.device = device
self.target_layer_names = target_layer_names
self.extractor = ModelOutputs(self.model, self.feature_module, target_layer_names)
@property
def target_layer_names(self):
return self.__target_layer_names
@target_layer_names.setter
def target_layer_names(self, x):
self.__target_layer_names = x
self.extractor = ModelOutputs(self.model, self.feature_module, self.__target_layer_names)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
input_img = input_img.to(self.device)
features, output = self.extractor(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True).to(self.device)
one_hot = torch.sum(one_hot * output)
self.feature_module.zero_grad()
self.model.zero_grad()
one_hot.backward(retain_graph=True)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
target = features[-1]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
# cam = cv2.resize(cam, input_img.shape[2:])
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input_img):
positive_mask = (input_img > 0).type_as(input_img)
output = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), input_img, positive_mask)
self.save_for_backward(input_img, output)
return output
@staticmethod
def backward(self, grad_output):
input_img, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input_img > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img),
torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
def recursive_relu_apply(module_top):
for idx, module in module_top._modules.items():
recursive_relu_apply(module)
if module.__class__.__name__ == 'ReLU':
module_top._modules[idx] = GuidedBackpropReLU.apply
# replace ReLU with GuidedBackpropReLU
recursive_relu_apply(self.model)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
if self.cuda:
input_img = input_img.cuda()
input_img = input_img.requires_grad_(True)
output = self.forward(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_graph=True)
output = input_img.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
|
11534440
|
from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.offer import models, utils
from oscar.apps.shipping.repository import Repository
from oscar.apps.shipping.methods import FixedPrice
from oscar.test.basket import add_product
from oscar.test import factories
def create_offer():
range = models.Range.objects.create(
name="All products", includes_all_products=True)
condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=1)
benefit = models.ShippingFixedPriceBenefit.objects.create(
type=models.Benefit.SHIPPING_FIXED_PRICE,
value=D('1.00'))
return models.ConditionalOffer.objects.create(
condition=condition,
benefit=benefit,
offer_type=models.ConditionalOffer.SITE)
class StubRepository(Repository):
"""
Stubbed shipped repository which overrides the get_shipping_methods method
in order to use a non-free default shipping method. This allows the
shipping discounts to be tested.
"""
methods = [FixedPrice(D('10.00'), D('10.00'))]
class TestAnOfferWithAShippingBenefit(TestCase):
def setUp(self):
self.basket = factories.create_basket(empty=True)
create_offer()
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D('12.00'))
utils.Applicator().apply(self.basket)
self.assertEqual(1, len(self.basket.offer_applications))
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D('12.00'), 2)
utils.Applicator().apply(self.basket)
self.assertEqual(1, len(self.basket.offer_applications))
def test_wraps_shipping_method_from_repository(self):
add_product(self.basket, D('12.00'), 1)
utils.Applicator().apply(self.basket)
methods = StubRepository().get_shipping_methods(self.basket)
method = methods[0]
charge = method.calculate(self.basket)
self.assertEqual(D('1.00'), charge.incl_tax)
def test_has_discount_recorded_correctly_when_order_is_placed(self):
add_product(self.basket, D('12.00'), 1)
utils.Applicator().apply(self.basket)
methods = StubRepository().get_shipping_methods(self.basket)
method = methods[0]
order = factories.create_order(basket=self.basket,
shipping_method=method)
discounts = order.discounts.all()
self.assertEqual(1, len(discounts))
discount = discounts[0]
self.assertTrue(discount.is_shipping_discount)
self.assertEqual(D('9.00'), discount.amount)
|
11534457
|
import os
import sys
import h5py
import pickle
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
# from pytorch_transformers.modeling_bert import BertForSequenceClassification, BertConfig, MultimodalBertForSequenceClassification
# from pytorch_transformers.amir_tokenization import BertTokenizer
# from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
# from transformers.tokenization import BertTokenizer
from models.subNets.BertTextEncoder import BertTextEncoder
class TextPre(object):
"""A single set of features of data."""
def __init__(self, args):
self.device = torch.device('cuda:0')
self.args = args
self.loadTextMap = {
'mosi': self.__load_data_mosi,
'mosei': self.__load_data_mosei
}
self.bert = BertTextEncoder(language=args.language).to(self.device)
def textConvertID(self, data, tokenizer):
features = {}
Input_ids, Input_mask, Segment_ids = [], [], []
Raw_text, Visual, Audio = [], [], []
Label, ids = [], []
max_seq_length = self.args.max_seq_length
for i in tqdm(range(len(data['raw_text']))):
raw_text = data['raw_text'][i]
visual = data['vision'][i]
audio = data['audio'][i]
tokens_a, inversions_a = tokenizer.tokenize(raw_text,invertable=True)
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:max_seq_length - 2]
inversions_a = inversions_a[:max_seq_length - 2]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
if self.args.aligned:
text_len = min(len(raw_text.split()), max_seq_length)
new_visual = [visual[len(visual) - text_len + inv_id] for inv_id in inversions_a]
new_audio = [audio[len(audio) - text_len + inv_id] for inv_id in inversions_a]
visual = np.array(new_visual)
audio = np.array(new_audio)
# add "start" and "end" for audio and vision
audio_zero = np.zeros((1,audio.shape[1]))
audio = np.concatenate((audio_zero,audio,audio_zero))
visual_zero = np.zeros((1,visual.shape[1]))
visual = np.concatenate((visual_zero,visual,visual_zero))
audio_padding = np.zeros((max_seq_length - len(input_ids),audio.shape[1]))
audio = np.concatenate((audio,audio_padding))
video_padding = np.zeros((max_seq_length - len(input_ids),visual.shape[1]))
visual = np.concatenate((visual,video_padding))
assert audio.shape[0] == max_seq_length
assert visual.shape[0] == max_seq_length
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label = float(data['labels'][i])
Input_ids.append(input_ids)
Visual.append(visual)
Audio.append(audio)
Input_mask.append(input_mask)
Segment_ids.append(segment_ids)
Label.append(label)
Raw_text.append(raw_text)
ids.append(data['id'][i])
features['raw_text'] = np.array(Raw_text)
features['audio'] = np.array(Audio)
features['vision'] = np.array(Visual)
features['labels'] = np.array(Label)
features['id'] = np.array(ids)
Input_ids = np.expand_dims(Input_ids, 1)
Input_mask = np.expand_dims(Input_mask, 1)
Segment_ids = np.expand_dims(Segment_ids, 1)
text_bert = np.concatenate((Input_ids, Input_mask, Segment_ids), axis=1)
features['text_bert'] = text_bert
features['text'] = self.__convertID2Vector(text_bert)
return features
def __convertID2Vector(self, ids, batch_size=64):
results = []
left = 0
ids = torch.Tensor(ids)
for left in tqdm(range(0, ids.size(0), batch_size)):
right = min(left + batch_size, ids.size(0))
c_ids = ids[left:right].to(self.device)
c_vector = self.bert(c_ids).detach().cpu().numpy()
results.append(c_vector)
results = np.concatenate(results, axis=0)
return results
def __load_data_mosi(self):
# get text data
link = os.path.join(self.args.data_dir, 'Raw/Transcript/Segmented')
text_data = {}
for file in os.listdir(link):
name = file.split('.')[0]
for line in open(os.path.join(link, file), "r"):
num_id, cur_t = line.split('_DELIM_')
name_id = name + '_' + num_id.strip()
text_data[name_id] = cur_t.strip()
# get data
def matchData(mode='train'):
r_text = []
for cur_id in data[mode]['id']:
r_text.append(text_data[cur_id[0]])
data[mode]['raw_text'] = r_text
with open(os.path.join(self.args.data_dir, 'Processed/mosei_senti_data_noalign.pkl'), 'rb') as lf:
data = pickle.load(lf)
matchData(mode='train')
matchData(mode='valid')
matchData(mode='test')
return data
def __load_data_mosei(self):
def convert0(s):
if s == '0':
return '0.0'
return s
# get text data
link = os.path.join(self.args.data_dir, 'Raw/Transcript/Segmented')
text_data = {}
for file in os.listdir(link):
name = file.split('.')[0]
for line in open(os.path.join(link, file), "r"):
items = line.split('___')
name_id = items[0] + '_' + convert0(items[2]) + '_' + convert0(items[3])
text_data[name_id.strip()] = items[-1].strip()
# get data
def matchData(mode='train'):
r_text = []
for cur_id in data[mode]['id']:
name = '_'.join(cur_id)
r_text.append(text_data[name])
data[mode]['raw_text'] = r_text
with open(os.path.join(self.args.data_dir, 'Processed/mosei_senti_data_noalign.pkl'), 'rb') as lf:
data = pickle.load(lf)
matchData(mode='train')
matchData(mode='valid')
matchData(mode='test')
return data
def run(self):
data = self.loadTextMap[self.args.datasetName]()
train_list = data['train']
valid_list = data['valid']
test_list = data['test']
tokenizer = self.bert.get_tokenizer()
save_data = {}
save_data['train'] = self.textConvertID(train_list, tokenizer)
save_data['valid'] = self.textConvertID(valid_list, tokenizer)
save_data['test'] = self.textConvertID(test_list, tokenizer)
if self.args.aligned:
saved_path = os.path.join(self.args.save_dir, 'aligned_' + str(self.args.max_seq_length) + '.pkl')
else:
saved_path = os.path.join(self.args.save_dir, 'unaligned_' + str(self.args.max_seq_length) + '.pkl')
if not os.path.exists(os.path.dirname(saved_path)):
os.makedirs(os.path.dirname(saved_path))
with open(saved_path, 'wb') as file:
pickle.dump(save_data, file, protocol=4)
print('Save Successful!')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--datasetName', type=str, default='mosei',
help='need aligned data (support mosi / mosei)')
parser.add_argument('--language', type=str, default='cn',
help='data language')
parser.add_argument('--aligned', type=bool, default=True,
help='need aligned data')
parser.add_argument('--data_dir', type=str, default = '/home/sharing/disk3/dataset/multimodal-sentiment-dataset/CMU-MOSEI',
help='path to MOSI / MOSEI')
parser.add_argument('--save_dir', type=str, default = '/home/sharing/disk3/dataset/multimodal-sentiment-dataset/ALL/mosei/raw',
help='path to saved directory')
parser.add_argument('--max_seq_length', type=int, default = 50,
help='length')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
tp = TextPre(args)
tp.run()
# tp.convertID2Vector()
|
11534477
|
from .APIResponse import APIResponse
class LeagueSeason(APIResponse):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.leagueCompleted = kwargs.get("complete", False) or False
self.leagueId = kwargs.get("id", 0) or 0
self.leagueDescription = kwargs.get("league_description", '') or ''
self.leagueName = kwargs.get("name", '') or ''
self.leagueSplit = kwargs.get("round", 0) or 0
self.leagueSeason = kwargs.get("season", 0) or 0
|
11534506
|
import argparse
import os
import subprocess
import sys
import xml.etree.ElementTree as ET
INTELLIJ_VERSION_FLAG = "-intellij-version"
def is_environment_in_jdk_table(environment_name, table):
for elem in table:
for subelem in elem:
attribute = subelem.attrib
if attribute.get("value") == environment_name:
return True
return False
def add_venv_to_xml_root(module: str, module_full_path: str, xml_root):
"""
Add a new entry for the virtual environment to IntelliJ's list of known interpreters
"""
path_to_lib = f"{module_full_path}/.venv/lib/"
python_version = os.listdir(path_to_lib)[0]
environment_name = f"{python_version.capitalize()} ({module})"
table = xml_root.find("component")
if is_environment_in_jdk_table(environment_name, table):
print(f"{environment_name} already exists. Skipping...")
return
jdk_node = ET.SubElement(table, 'jdk', {"version": "2"})
ET.SubElement(jdk_node, "name", {"value": environment_name})
ET.SubElement(jdk_node, "type", {"value": "Python SDK"})
ET.SubElement(jdk_node, "version", {"value": f"{python_version}"})
ET.SubElement(jdk_node, "homePath",
{"value": f"{module_full_path}/.venv/bin/python"})
roots = ET.SubElement(jdk_node, "roots")
annotationsPath = ET.SubElement(roots, "annotationsPath")
ET.SubElement(annotationsPath, "root", {"type": "composite"})
classPath = ET.SubElement(roots, "classPath")
classPathRoot = ET.SubElement(classPath, "root", {"type": "composite"})
ET.SubElement(classPathRoot, "root", {"url":
f"file://{path_to_lib}{python_version}/site-packages",
"type": "simple"
})
def get_output_path(input_path, output_path):
if output_path is None:
return input_path
else:
return output_path
def get_input_path(input_from_args, version, home_directory):
if input_from_args is not None:
return input_from_args
else:
path_to_intellij_settings = f"{home_directory}/Library/Application Support/JetBrains/"
walk = os.walk(path_to_intellij_settings)
intellij_versions = [version for version in next(walk)[1] if version != "consentOptions"]
if version in intellij_versions:
intellij_version_to_update = version
elif len(intellij_versions) == 1:
intellij_version_to_update = intellij_versions[0]
else:
raise RuntimeError(
f"Please select which version of Intellij to update with the `{INTELLIJ_VERSION_FLAG}` flag. Options are: {intellij_versions}")
return f"{path_to_intellij_settings}{intellij_version_to_update}/options/jdk.table.xml"
def module_has_requirements_file(module):
path_to_module = f"{path_to_connectors}{module}"
path_to_requirements_file = f"{path_to_module}/requirements.txt"
return os.path.exists(path_to_requirements_file)
def get_default_airbyte_path():
path_to_script = os.path.dirname(__file__)
relative_path_to_airbyte_root = f"{path_to_script}/../.."
return os.path.realpath(relative_path_to_airbyte_root)
def create_parser():
parser = argparse.ArgumentParser(description="Prepare Python virtual environments for Python connectors")
actions_group = parser.add_argument_group("actions")
actions_group.add_argument("--install-venv", action="store_true",
help="Create virtual environment and install the module's dependencies")
actions_group.add_argument("--update-intellij", action="store_true", help="Add interpreter to IntelliJ's list of known interpreters")
parser.add_argument("-airbyte", default=get_default_airbyte_path(),
help="Path to Airbyte root directory")
modules_group = parser.add_mutually_exclusive_group(required=True)
modules_group.add_argument("-modules", nargs="?", help="Comma separated list of modules to add (eg source-strava,source-stripe)")
modules_group.add_argument("--all-modules", action="store_true", help="Select all Python connector modules")
group = parser.add_argument_group("Update intelliJ")
group.add_argument("-input", help="Path to input IntelliJ's jdk table")
group.add_argument("-output", help="Path to output jdk table")
group.add_argument(INTELLIJ_VERSION_FLAG, help="IntelliJ version to update (Only required if multiple versions are installed)")
return parser
def parse_args(args):
parser = create_parser()
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
if not args.install_venv and not args.update_intellij:
print("No action requested. Add -h for help")
exit(-1)
path_to_connectors = f"{args.airbyte}/airbyte-integrations/connectors/"
if args.all_modules:
print(path_to_connectors)
modules = next(os.walk(path_to_connectors))[1]
else:
modules = args.modules.split(",")
modules = [m for m in modules if module_has_requirements_file(m)]
if args.install_venv:
errors = []
modules_installed = []
for module in modules:
result = subprocess.run(["tools/bin/setup_connector_venv.sh", module, sys.executable], check=False)
if result.returncode == 0:
modules_installed.append(module)
else:
errors.append(module)
if len(modules_installed) > 0:
print(f"Successfully installed virtual environment for {modules_installed}")
if len(errors) > 0:
print(f"Failed to install virtual environment for {errors}")
if args.update_intellij:
home_directory = os.getenv("HOME")
input_path = get_input_path(args.input, args.intellij_version, home_directory)
output_path = get_output_path(input_path, args.output)
with open(input_path, 'r') as f:
root = ET.fromstring(f.read())
for module in modules:
path_to_module = f"{path_to_connectors}{module}"
path_to_requirements_file = f"{path_to_module}/requirements.txt"
requirements_file_exists = os.path.exists(path_to_requirements_file)
print(f"Adding {module} to jdk table")
add_venv_to_xml_root(module, path_to_module, root)
with open(output_path, "w") as fout:
fout.write(ET.tostring(root, encoding="unicode"))
print("Done.")
# --- tests ---
def setup_module():
global pytest
global mock
if "pytest" in sys.argv[0]:
import unittest
class TestNoneTypeError(unittest.TestCase):
def test_output_is_input_if_not_set(self):
input_path = "/input_path"
output_path = get_output_path(input_path, None)
assert input_path == output_path
def test_get_output_path(self):
input_path = "/input_path"
output_path = "/input_path"
assert output_path == get_output_path(input_path, output_path)
@unittest.mock.patch("os.walk")
def test_input_is_selected(self, mock_os):
os.walk.return_value = iter(
(("./test1", ["consentOptions", "IdeaIC2021.3", "PyCharmCE2021.3"], []),))
os.getenv.return_value = "{HOME}"
input_from_args = None
version = "IdeaIC2021.3"
input_path = get_input_path(input_from_args, version, "{HOME}")
assert "{HOME}/Library/Application Support/JetBrains/IdeaIC2021.3/options/jdk.table.xml" == input_path
@unittest.mock.patch("os.walk")
def test_input_single_intellij_version(self, mock_os):
os.walk.return_value = iter(
(("./test1", ["consentOptions", "IdeaIC2021.3"], []),))
input_from_args = None
version = None
input_path = get_input_path(input_from_args, version, "{HOME}")
assert "{HOME}/Library/Application Support/JetBrains/IdeaIC2021.3/options/jdk.table.xml" == input_path
@unittest.mock.patch("os.walk")
def test_input_multiple_intellij_versions(self, mock_os):
os.walk.return_value = iter(
(('./test1', ['consentOptions', 'IdeaIC2021.3', "PyCharmCE2021.3"], []),))
input_from_args = None
version = None
self.assertRaises(RuntimeError, get_input_path, input_from_args, version, "{HOME}")
|
11534509
|
from base import *
import json
class SRLinux(Container):
CONTAINER_NAME = None
GUEST_DIR = '/etc/opt/srlinux'
def __init__(self, host_dir, conf, image='ghcr.io/nokia/srlinux'):
super(SRLinux, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf)
# don't build just download from docker pull ghcr.io/nokia/srlinux
# assume that you do this by hand
@classmethod
def build_image(cls, force=False, tag='ghcr.io/nokia/srlinux', checkout='', nocache=False):
cls.dockerfile = ''
print("Can't build SRLinux, must download yourself")
print("docker pull ghcr.io/nokia/srlinux")
class SRLinuxTarget(SRLinux, Target):
CONTAINER_NAME = 'bgperf_SRLinux_target'
CONFIG_FILE_NAME = 'config.json'
def __init__(self, host_dir, conf, image='ghcr.io/nokia/srlinux'):
super(SRLinuxTarget, self).__init__(host_dir, conf, image=image)
def write_config(self):
config = {}
key = "network-instance"
bgp = 'srl_nokia-bgp:bgp'
config = '''
enter candidate
set / network-instance default
set / network-instance default protocols
set / network-instance default protocols bgp
set / network-instance default protocols bgp admin-state enable
set / network-instance default protocols bgp router-id {0}
set / network-instance default protocols bgp autonomous-system {1}
set / network-instance default protocols bgp group neighbors
set / network-instance default protocols bgp group neighbors ipv4-unicast
set / network-instance default protocols bgp group neighbors ipv4-unicast admin-state enable
'''.format(self.conf['router-id'], self.conf['as'])
config = {}
config[key] = {"default": {"protocols": {"bgp": {}}}}
config[key]["default"]["protocols"]["bgp"]["admin-state"] = 'enable'
config[key]["default"]["protocols"]["bgp"]["autonomous-system"] = self.conf['as']
config[key]["default"]["protocols"]["bgp"]["router-id"] = self.conf['router-id']
config[key]["default"]["protocols"]["bgp"]['group neighbors'] = {"ipv4-unicast": {"admin-state": "enable"}}
def gen_neighbor_config(n):
config = '''
set / network-instance default protocols bgp neighbor {0}
set / network-instance default protocols bgp neighbor {0} peer-as {1}
set / network-instance default protocols bgp neighbor {0} peer-group neighbors
'''.format(n['router-id'], n['as'])
config = {f"neighbor {n['router-id']}": {}}
config[f"neighbor {n['router-id']}"]["peer-as"] = n["as"]
config[f"neighbor {n['router-id']}"]["peer-group"] = "neighbors"
return config
def gen_prefix_configs(n):
pass
def gen_filter(name, match):
pass
def gen_prefix_filter(n, match):
pass
def gen_aspath_filter(n, match):
pass
def gen_community_filter(n, match):
pass
def gen_ext_community_filter(n, match):
pass
for n in sorted(list(flatten(list(t.get('neighbors', {}).values()) for t in self.scenario_global_conf['testers'])) +
[self.scenario_global_conf['monitor']], key=lambda n: n['as']):
config[key]["default"]["protocols"].update(gen_neighbor_config(n))
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
f.write(json.dumps(config))
f.flush()
def exec_startup_cmd(self, stream=False, detach=False):
return self.local('sudo bash -c /opt/srlinux/bin/sr_linux',
detach=detach,
stream=stream)
def get_version_cmd(self):
return "/usr/bin/SRLinuxc -V"
def exec_version_cmd(self):
version = self.get_version_cmd()
i= dckr.exec_create(container=self.name, cmd=version, stderr=True)
return dckr.exec_start(i['Id'], stream=False, detach=False).decode('utf-8').strip('\n')
def get_neighbors_state(self):
neighbors_accepted = {}
neighbor_received_output = json.loads(self.local("/usr/bin/SRLinuxc bgp --host 127.0.0.1 -J").decode('utf-8'))
return neighbor_received_output['neighbor_summary']['recv_converged']
def get_neighbor_received_routes(self):
## if we call this before the daemon starts we will not get output
tester_count, neighbors_checked = self.get_test_counts()
neighbors_accepted = self.get_neighbors_state() - 1 # have to discount the monitor
i = 0
for n in neighbors_checked.keys():
if i >= neighbors_accepted:
break
neighbors_checked[n] = True
i += 1
return neighbors_checked, neighbors_checked
|
11534530
|
from typing import Any, Dict, List, Union
from returns.curry import partial
from returns.pipeline import flow, is_successful
from returns.pointfree import bind, fix, map_, rescue
from returns.result import ResultE
from piri.collection_handlers import fetch_data_by_keys
from piri.constants import (
CASTING,
DEFAULT,
IF_STATEMENTS,
MAPPINGS,
PATH,
REGEXP,
SEPARATOR,
SLICING,
)
from piri.functions import (
apply_casting,
apply_default,
apply_if_statements,
apply_regexp,
apply_separator,
apply_slicing,
)
from piri.valuetypes import MapValue
def handle_mapping(
collection: Union[Dict[str, Any], List[Any]],
cfg: Dict[str, Any],
) -> ResultE[MapValue]:
"""Find data in path and apply if statements or default value.
.. versionadded:: 0.0.1
:param configuration: :term:`configuration` data to use when mapping
:type configuration: Dict[str, Any]
:param collection: The collection of data to find data in
:type collection: Union[Dict[str, Any], List[Any]]
:return: Success/Failure containers
:rtype: GoResult
configuration expected to look like this:
.. code-block:: json
{
"path": [],
"if_statementss": [{}, {}],
"default": 'val'
}
Flow description:
find data from path or None ->
apply if statements ->
return default value if Failure else mapped value
"""
return flow(
collection,
partial(fetch_data_by_keys, path=cfg.get(PATH, [])),
fix(lambda _: None), # type: ignore
bind(
partial(
apply_regexp, regexp=cfg.get(REGEXP, {}),
),
),
fix(lambda _: None), # type: ignore
map_(partial(
apply_slicing, slicing=cfg.get(SLICING, {}),
)),
bind(partial(
apply_if_statements, if_objects=cfg.get(IF_STATEMENTS, []),
)),
rescue( # type: ignore
lambda _: apply_default(cfg.get(DEFAULT)),
),
)
def handle_attribute(
collection: Union[Dict[str, Any], List[Any]],
cfg: dict,
) -> ResultE[MapValue]:
"""Handle one attribute with mappings, ifs, casting and default value.
:param collection: The collection of data to find data in
:type collection: Union[Dict[str, Any], List[Any]]
:param configuration: :term:`configuration` data to use when mapping
:type configuration: Dict[str, Any]
:return: Success/Failure containers
:rtype: MapValue
configuration expected to look like this:
.. code-block:: json
{
"mappings": [], # array of mapping objects
"separator": None,
"if_statements": [], # array of if statement objects
"casting": {} # casting object, for casting types
"default": "default value"
}
flow description:
Map all objects in cfg[MAPPINGS] ->
Apply separator to values if there are more than 1
Failure -> fix to Success(None)
Apply if statements
Success -> Cast Value
Failure -> apply default value
Return Result
"""
mapped_values = [
mapped.unwrap()
for mapped in
[
handle_mapping(collection, mapping)
for mapping in cfg.get(MAPPINGS, [])
]
if is_successful(mapped)
]
# partially declare if statement and casting functions
ifs = partial(apply_if_statements, if_objects=cfg.get(IF_STATEMENTS, []))
cast = partial(apply_casting, casting=cfg.get(CASTING, {}))
return flow(
apply_separator(mapped_values, separator=cfg.get(SEPARATOR, '')),
fix(lambda _: None), # type: ignore
bind(ifs),
bind(cast),
rescue(
lambda _: apply_default(default=cfg.get(DEFAULT)),
),
)
|
11534551
|
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock
from hummingbot.strategy.conditional_execution_state import RunAlwaysExecutionState, RunInTimeConditionalExecutionState
class RunAlwaysExecutionStateTests(TestCase):
def test_always_process_tick(self):
strategy = MagicMock()
state = RunAlwaysExecutionState()
state.process_tick(datetime.now, strategy)
strategy.process_tick.assert_called()
class RunInTimeSpanExecutionStateTests(TestCase):
def setUp(self) -> None:
super().setUp()
self.debug_logs = []
def debug(self, message: str):
self.debug_logs.append(message)
def test_process_tick_when_current_time_in_span(self):
start_timestamp = datetime.fromisoformat("2021-06-22 09:00:00")
end_timestamp = datetime.fromisoformat("2021-06-22 10:00:00")
state = RunInTimeConditionalExecutionState(start_timestamp=start_timestamp, end_timestamp=end_timestamp)
strategy = MagicMock()
strategy.logger().debug.side_effect = self.debug
state.process_tick(datetime.fromisoformat("2021-06-22 08:59:59").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 1)
self.assertEqual(self.debug_logs[0], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
state.process_tick(datetime.fromisoformat("2021-06-22 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
state.process_tick(datetime.fromisoformat("2021-06-22 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
strategy.process_tick.reset_mock()
state.process_tick(datetime.fromisoformat("2021-06-22 10:00:01").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 2)
self.assertEqual(self.debug_logs[1], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
state = RunInTimeConditionalExecutionState(start_timestamp=start_timestamp.time(), end_timestamp=end_timestamp.time())
state.process_tick(datetime.fromisoformat("2021-06-22 08:59:59").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 3)
self.assertEqual(self.debug_logs[0], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
state.process_tick(datetime.fromisoformat("2021-06-22 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
state.process_tick(datetime.fromisoformat("2021-06-22 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
strategy.process_tick.reset_mock()
state.process_tick(datetime.fromisoformat("2021-06-22 10:00:01").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 4)
self.assertEqual(self.debug_logs[1], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
state.process_tick(datetime.fromisoformat("2021-06-30 08:59:59").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 5)
self.assertEqual(self.debug_logs[0], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
state.process_tick(datetime.fromisoformat("2021-06-30 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
state.process_tick(datetime.fromisoformat("2021-06-30 09:00:00").timestamp(), strategy)
strategy.process_tick.assert_called()
strategy.process_tick.reset_mock()
state.process_tick(datetime.fromisoformat("2021-06-30 10:00:01").timestamp(), strategy)
strategy.process_tick.assert_not_called()
self.assertEqual(len(self.debug_logs), 6)
self.assertEqual(self.debug_logs[1], "Time span execution: tick will not be processed "
f"(executing between {start_timestamp} and {end_timestamp})")
|
11534565
|
from komand_elasticsearch.util.request_api import RequestAPI
from logging import Logger
class ElasticSearchAPI(RequestAPI):
def __init__(self, url: str, logger: Logger, ssl_verify: bool, username: str = None, password: str = None):
super(ElasticSearchAPI, self).__init__(
url=url, logger=logger, ssl_verify=ssl_verify, username=username, password=password
)
def index(self, index: str, _id: str = None, params: dict = None, document: dict = None, _type: str = None) -> dict:
return super()._index(index=index, _type="_doc", _id=_id, params=params, document=document)
def update(self, index: str, _type: str, _id: str, params: dict = None, script: dict = None) -> dict:
return self._call_api("POST", f"{index}/_update/{_id}", params, {"script": script})
def search_documents(self, index: str, json_data: dict = {}, routing: str = None, _type: str = None) -> dict:
return super()._search_documents(path=f"{index}/_search", routing=routing, json_data=json_data)
|
11534578
|
low = 1
high = 1000
loop_num = 0 # 记录循环轮数
while low <= high:
m = int((high - low) / 2) + low
print("My guess is", m)
# userInput是循环条件中被判断的变量,因此需要在循环之前先有个值,否则循环会出错
user_input = ""
input_num = 0
while user_input != '1' and user_input != '2' and user_input != '3':
if input_num == 3:
print("\nYou input too many invalid options. Game over!")
exit(0)
print("\t\t1) Bingo! %s is the secret number! \n\
2) %s < the secret number.\n\
3) %s > the secret number." % (m, m, m))
user_input = input("Your option:")
user_input = user_input.strip()
input_num += 1
input_num = 0
loop_num += 1
if user_input == '1':
print("Succeeded! The secret number is %s.\n\
It took %s round to locate the secret number. \n" % (m, loop_num))
break
else:
if user_input == '2':
low = m + 1
else:
high = m - 1
if low > high:
print("Failed!Cannot got your secret number. Make sure it in range of [1, 1000].")
|
11534604
|
import pytz
from datetime import datetime
from manabi.apps.flashcards.models import (
Card,
)
from manabi.apps.flashcards.models.new_cards_limit import (
NewCardsLimit,
)
class ReviewInterstitial:
def __init__(
self,
user,
deck=None,
new_cards_per_day_limit_override=None,
early_review_began_at=None,
excluded_card_ids=set(),
time_zone=None,
new_cards_limit=None,
buffered_cards_count=None,
buffered_new_cards_count=None,
is_for_manabi_reader=False,
jmdict_ids=None,
words_without_jmdict_ids=None,
):
'''
`new_cards_limit` is an instance of `NewCardsLimit.`
'''
from manabi.apps.flashcards.models.review_availabilities import (
ReviewAvailabilities,
)
self.review_availabilities = ReviewAvailabilities(
user,
deck=deck,
excluded_card_ids=excluded_card_ids,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
early_review_began_at=early_review_began_at,
time_zone=time_zone,
new_cards_limit=new_cards_limit,
buffered_cards_count=buffered_cards_count,
buffered_new_cards_count=buffered_new_cards_count,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
class NextCardsForReview:
def __init__(
self,
user,
count,
deck=None,
early_review=False,
early_review_began_at=None,
include_new_buried_siblings=False,
new_cards_per_day_limit_override=None,
excluded_card_ids=set(),
is_for_manabi_reader=False,
jmdict_ids=None,
words_without_jmdict_ids=None,
time_zone=None,
):
new_cards_limit = NewCardsLimit(
user,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
)
next_cards = Card.objects.next_cards(
user,
count,
excluded_ids=excluded_card_ids,
deck=deck,
early_review=early_review,
early_review_began_at=early_review_began_at,
include_new_buried_siblings=include_new_buried_siblings,
new_cards_limit=new_cards_limit.next_new_cards_limit,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
card_ids = [card.id for card in next_cards]
# FIXME don't need 2 queries here...
self.cards = (
Card.objects
.filter(pk__in=card_ids)
.select_related('fact')
)
excluded_card_ids.update(card_ids)
buffered_new_cards_count = len([
card for card in self.cards if card.is_new
])
self.interstitial = ReviewInterstitial(
user,
deck=deck,
time_zone=time_zone,
excluded_card_ids=excluded_card_ids,
buffered_cards_count=len(self.cards),
buffered_new_cards_count=buffered_new_cards_count,
new_cards_per_day_limit_override=new_cards_per_day_limit_override,
new_cards_limit=new_cards_limit,
early_review_began_at=early_review_began_at,
is_for_manabi_reader=is_for_manabi_reader,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
self.server_datetime = datetime.now(pytz.utc)
|
11534607
|
class tkMath( object ):
PIXELS_PER_INCH = 0
PIXELS_PER_CM = 0
PIXELS_PER_MM = 0
PIXELS_PER_POINT = 0
@staticmethod
def setup( root ):
'''Must be called before any of the methods are used to initialize
the conversion constants.'''
tkMath.PIXELS_PER_INCH = root.winfo_fpixels( '1i' )
tkMath.PIXELS_PER_CM = root.winfo_fpixels( '1c' )
tkMath.PIXELS_PER_MM = root.winfo_fpixels( '1m' )
tkMath.PIXELS_PER_POINT = root.winfo_fpixels( '1p' )
@staticmethod
def pixelsToInches( pixels ):
'''Convert pixels (python float or int) to inches.'''
return pixels / tkMath.PIXELS_PER_INCH
@staticmethod
def pixelsToCM( pixels ):
'''Convert pixels (python float or int) to centimeters.'''
return pixels / tkMath.PIXELS_PER_CM
@staticmethod
def pixelsToMM( pixels ):
'''Convert pixels (python float or int) to millimeters.'''
return pixels / tkMath.PIXELS_PER_MM
@staticmethod
def pixelsToPoints( pixels ):
'''Convert pixels (python float or int) to points.'''
return pixels / tkMath.PIXELS_PER_POINT
@staticmethod
def inchesToPixels( inches ):
'''Convert inches (python float or int) to pixels.'''
return inches * tkMath.PIXELS_PER_INCH
@staticmethod
def cmToPixels( cm ):
'''Convert centimeters (python float or int) to pixels.'''
return cm * tkMath.PIXELS_PER_CM
def mmToPixels( mm ):
'''Convert millimeters (python float or int) to pixels.'''
return mm * tkMath.PIXELS_PER_MM
def pointsToPixels( points ):
'''Convert points (python float or int) to pixels.'''
return points * tkMath.PIXELS_PER_POINTS
@staticmethod
def toPixels( tkCoord ):
'''Convenience function for inches, cm, mm and pointsToPixels().
Convert a tkCoord (string appended by 'i', 'c', 'm' or 'p') to pixels.'''
if isinstance( tkCoord, str ):
if tkCoord[-1] == 'i':
return tkMath.inchesToPixels( float(tkCoord[:-1]) )
elif tkCoord[-1] == 'c':
return tkMath.cmToPixels( float(tkCoord[:-1]) )
elif tkCoord[-1] == 'm':
return tkMath.mmToPixels( float(tkCoord[:-1]) )
elif tkCoord[-1] == 'p':
return tkMath.pointsToPixels( float(tkCoord[:-1]) )
else:
return float(tkCoord)
else:
return tkCoord
@staticmethod
def compare( coord1, coord2 ):
'''Compare two tk measures -- they need not be in the same units.'''
return tkMath.coordToPixels(coord1) - tkMath.coordToPixels(coord2)
@staticmethod
def add( coord1, coord2 ):
'''Add two tk measures -- they need not be in the same units.'''
return tkMath.coordToPixels(coord1) + tkMath.coordToPixels(coord2)
@staticmethod
def sub( coord1, coord2 ):
'''Subtract two tk measures -- they need not be in the same units.'''
return tkMath.coordToPixels(coord1) - tkMath.coordToPixels(coord2)
@staticmethod
def tkPolar( x1, y1, x2, y2 ):
'''Calculate the direction (in radians, 3 o'clock is 0,
down is 1/2 PI, etc.) and distance (in pixels) between to points.
All arguments should be in the same units (python float or int).
The result is in the same units as the arguments.'''
import math
deltaX = math.fabs( x1 - x2 )
deltaY = math.fabs( y1 - y2 )
direction = math.atan2( deltaY, deltaX )
distance = math.sqrt( math.pow(deltaX, 2) + math.pow(deltaY, 2) )
return direction, distance
@staticmethod
def tkCartesian( x, y, direction, distance ):
'''Complementary to tkPolar(). Given a x,y point, direction in
radians (0 is at 3 o'clock, 1/2 PI is straight down, etc.) and a
distance (all as python float or int). This function returns
the x and y of the end-point.'''
import math
deltaX = distance * math.cos( direction )
deltaY = distance * math.sin( direction )
return x + deltaX, y + deltaY
|
11534656
|
from onadata.apps.main.tests.test_base import TestBase
from django.test.client import RequestFactory
from onadata.apps.viewer.views import stats_tables
class TestStatsTableView(TestBase):
def setUp(self):
super(TestStatsTableView, self).setUp()
# Every test needs access to the request factory.
self.factory = RequestFactory()
self._publish_transportation_form_and_submit_instance()
def test_view_returns_200(self):
request = self.factory.get(
'/{}/forms/{}/tables'.format(
self.user.username, self.xform.id_string))
request.user = self.user
response = stats_tables(
request, self.user.username, self.xform.id_string)
self.assertEqual(response.status_code, 200)
|
11534659
|
import SimpleITK as sitk
import numpy as np
import random
from PIL import Image
import cv2,os
#input_path='/home/cwx/extra/CAP'
#input_mask='/mnt/data6/CAP/resampled_seg'
output_path_slices='/mnt/data9/covid_detector_jpgs/selected_train_pos/nor'
os.makedirs(output_path_slices,exist_ok=True)
cnt=0
train_list='trainlist_xct.list'
train_list2='trainlist_ct_only.list'
train_list3='train_ex.list'
train_list=open(train_list,'r').readlines()+open(train_list2,'r').readlines()+open(train_list3,'r').readlines()
for idx,name in enumerate(train_list):
set_name=name.split('/')[-2]
if not 'healthy' in set_name:
continue
#if not 'cap'in set_name:
# continue#wait segmentation
#input_path = '/home/cwx/extra/covid_project_data/' + set_name
#input_mask = '/home/cwx/extra/covid_project_segs/lungs/' + set_name
#input_path = '/home/cwx/extra/dr_ct_data/CT/' + set_name
#input_mask = '/mnt/data11/seg_of_XCT/lung/' + set_name
#input_lesion_mask='/home/cwx/extra/covid_project_segs/lesion/' + set_name
volume = sitk.ReadImage(name.split(',')[0])
mask = sitk.ReadImage(name.split(',')[1][:-1])
M=sitk.GetArrayFromImage(mask)
M[M>0]=1
V = sitk.GetArrayFromImage(volume)
sums = M.sum(1).sum(1)
idd=np.where(sums>500)
iddx=np.where(M>0)
M = M[idd[0],iddx[1].min():iddx[1].max(),iddx[2].min():iddx[2].max()]
V = V[idd[0],iddx[1].min():iddx[1].max(),iddx[2].min():iddx[2].max()]
if not 'healthy' in set_name and False:
L = L[idd[0], iddx[1].min():iddx[1].max(), iddx[2].min():iddx[2].max()]
sums2 = L.sum(1).sum(1)
sums2=np.where(sums2>50)[0]
for idx, i in enumerate(range(0,V.shape[0],10)):
if not 'healthy' in set_name and False:
if not i in sums2:
continue
data=V[i,:,:]
data[data>500]=500
data[data<-1200]=-1200#-1200~500
data=data*255.0/1700
data=data-data.min()
data=np.stack([data,M[i,:,:]*data,M[i,:,:]*255],-1)#mask one channel
data = data.astype(np.uint8)
cv2.imwrite(os.path.join(output_path_slices,'nor_'+set_name+'_'
+name.split(',')[0].split('/')[-1].split('.nii')[0]
+'_'+str(int(i/(V.shape[0])*100))+'.jpg'),data)
|
11534679
|
from pandas import DataFrame
class JsonFormatter(object):
"""Class that receive pandas dataframe
and write it down in Json format
"""
key = 'json-array'
def __init__(self, specification={}):
self.default = {'orient': 'records',
'date_unit': 's'}
self.specification = specification
@staticmethod
def rules():
return {'required': {},
'optional': {
'options.orient': {'none': False, 'type': str},
'options.index': {'none': False, 'type': bool}
}}
def format(self, dataframe: DataFrame, path_or_buffer) -> None:
"""Format dataframe to json.
Keyword arguments:
- dataframe - pandas.Dataframe: dataframe containing the records
"""
parameters = self.default
options = self.specification.get('options', {})
options['indent'] = 2 if options.get("indent") == "pretty" else 0
parameters.update(options)
if dataframe.shape[0] > 0:
dataframe.to_json(
path_or_buf=path_or_buffer, force_ascii=False, **parameters)
|
11534681
|
import distutils.ccompiler
import distutils.dist
import glob
import io
import os
import sys
import cffi
# Get the directory for the cmark source files. It's under the package root
# as /third_party/cmark/src
HERE = os.path.dirname(os.path.abspath(__file__))
PACKAGE_ROOT = os.path.abspath(os.path.join(HERE, '../../'))
SRC_DIR = os.path.join(PACKAGE_ROOT, 'third_party/cmark/src')
EXTENSIONS_SRC_DIR = os.path.join(PACKAGE_ROOT, 'third_party/cmark/extensions')
UNIX_GENERATED_SRC_DIR = os.path.join(PACKAGE_ROOT, 'generated', 'unix')
WIN_GENERATED_SRC_DIR = os.path.join(PACKAGE_ROOT, 'generated', 'windows')
CMARK_DEF_H_PATH = os.path.join(HERE, 'cmark.cffi.h')
CMARK_MODULE_H_PATH = os.path.join(HERE, 'cmark_module.h')
with io.open(CMARK_DEF_H_PATH, 'r', encoding='utf-8') as fh:
CMARK_DEF_H = fh.read()
with io.open(CMARK_MODULE_H_PATH, 'r', encoding='utf-8') as fh:
CMARK_MODULE_H = fh.read()
def _get_sources(dir, exclude=set()):
sources = glob.iglob(os.path.join(dir, '*.c'))
return sorted([
os.path.relpath(path, start=PACKAGE_ROOT)
for path in
sources
if os.path.basename(path) not in exclude
])
SOURCES = _get_sources(SRC_DIR, exclude=set(['main.c']))
SOURCES.extend(_get_sources(EXTENSIONS_SRC_DIR))
def _compiler_type():
"""
Gets the compiler type from distutils. On Windows with MSVC it will be
"msvc". On macOS and linux it is "unix".
Borrowed from https://github.com/pyca/cryptography/blob\
/05b34433fccdc2fec0bb014c3668068169d769fd/src/_cffi_src/utils.py#L78
"""
dist = distutils.dist.Distribution()
dist.parse_config_files()
cmd = dist.get_command_obj('build')
cmd.ensure_finalized()
compiler = distutils.ccompiler.new_compiler(compiler=cmd.compiler)
return compiler.compiler_type
COMPILER_TYPE = _compiler_type()
PY2 = sys.version_info[0] < 3
# Note: on Python 2.7 in Windows we're using mingw so we use the unix
# srcs for that as well.
if COMPILER_TYPE in {'unix', 'mingw32'} or PY2:
EXTRA_COMPILE_ARGS = ['-std=c99']
GENERATED_SRC_DIR = UNIX_GENERATED_SRC_DIR
elif COMPILER_TYPE == 'msvc':
EXTRA_COMPILE_ARGS = ['/TP']
GENERATED_SRC_DIR = WIN_GENERATED_SRC_DIR
else:
raise AssertionError("unsupported compiler: %s" % COMPILER_TYPE)
ffibuilder = cffi.FFI()
ffibuilder.cdef(CMARK_DEF_H)
ffibuilder.set_source(
'cmarkgfm._cmark',
CMARK_MODULE_H,
sources=SOURCES,
include_dirs=[SRC_DIR, EXTENSIONS_SRC_DIR, GENERATED_SRC_DIR],
extra_compile_args=EXTRA_COMPILE_ARGS
)
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
|
11534682
|
import logging
import sys
import socket
sys.path.append('logmatic/')
import logmatic
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setFormatter(logmatic.JsonFormatter(extra={"hello": "world","hostname":socket.gethostname()}))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
test_logger = logging.getLogger("test")
test_logger.info({"special": "value", "run": 12})
test_logger.info("classic message", extra={"special": "value", "run": 12})
def exception_test():
try:
raise Exception('test')
except Exception:
test_logger.exception("This is a fake exception")
exception_test()
|
11534698
|
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.runner import ActionRunner
from tracardi.service.plugin.domain.result import Result
from .model.config import Config, Token
from tracardi.process_engine.action.v1.connectors.mailchimp.service.mailchimp_audience_editor import MailChimpAudienceEditor
from tracardi.service.storage.driver import storage
from tracardi.domain.resource import ResourceCredentials
def validate(config: dict):
return Config(**config)
class MailChimpAudienceRemover(ActionRunner):
@staticmethod
async def build(**kwargs) -> 'MailChimpAudienceRemover':
config = validate(kwargs)
resource = await storage.driver.resource.load(config.source.id)
return MailChimpAudienceRemover(config, resource.credentials)
def __init__(self, config: Config, credentials: ResourceCredentials):
self.config = config
self._client = MailChimpAudienceEditor(credentials.get_credentials(self, Token).token)
async def run(self, payload):
dot = self._get_dot_accessor(payload)
emails = dot[self.config.email]
emails = emails if isinstance(emails, list) else [emails]
results = [
await self._delete_or_archive(list_id=self.config.list_id, email_address=email) for email in emails
]
for result in results:
if result is not None:
return Result(port="error", value={"result": results})
return Result(port="response", value={"result": results})
async def _delete_or_archive(self, **kwargs):
return await self._client.delete_contact(**kwargs) if self.config.delete else \
await self._client.archive_contact(**kwargs)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='MailChimpAudienceRemover',
inputs=["payload"],
outputs=["response", "error"],
version='0.6.0.1',
license="MIT",
author="<NAME>",
init={
"source": {
"id": None,
"name": None
},
"list_id": None,
"email": None,
"delete": False
},
manual="remove_from_mailchimp_audience_action",
form=Form(
groups=[
FormGroup(
name="Plugin configuration",
fields=[
FormField(
id="source",
name="MailChimp resource",
description="Please select your MailChimp resource.",
component=FormComponent(type="resource", props={"label": "Resource", "tag": "token"})
),
FormField(
id="list_id",
name="ID of your e-mail list (audience)",
description="Please type in your MailChimp audience ID.",
component=FormComponent(type="text", props={"label": "Audience ID"})
),
FormField(
id="email",
name="Contact's e-mail address",
description="Please provide path to contact's e-mail address.",
component=FormComponent(type="dotPath", props={"label": "E-mail",
"defaultSourceValue": "profile",
"defaultPathValue": "pii.email"
})
),
]
),
FormGroup(
name="For Advanced Users Only",
fields=[
FormField(
id="delete",
name="Permanently delete contact",
description="Please determine if plugin should permanently delete contact, or archive "
"it. Please notice that if you permanently delete your contact, then you"
" cannot add it again. ON switch position indicates deleting mode.",
component=FormComponent(type="bool", props={"label": "Permanently delete contact"})
)
]
)
]
)
),
metadata=MetaData(
name='Remove from audience',
brand="MailChimp",
desc='Removes contact to MailChimp audience or archives it.',
icon='mailchimp',
group=["Mailchimp"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes any JSON-like object.")
},
outputs={
"response": PortDoc(desc="This port returns response from MailChimp API."),
"error": PortDoc(desc="This port returns response from MailChimp API if an error occurs.")
}
)
)
)
|
11534700
|
import autosar.base
import autosar.component
import autosar.rte.base
from autosar.rte.base import (ReadPortFunction, WritePortFunction, SendPortFunction, ReceivePortFunction, CallPortFunction,
CalPrmPortFunction, DataElement, Operation, RequirePort, ProvidePort)
import cfile as C
import sys
import autosar.bsw.com
innerIndentDefault=3 #(number of spaces)
class ComponentAPI:
"""
defines the API both for clients (components) and server (RTE)
"""
def __init__(self):
self.read = {}
self.write = {}
self.send = {}
self.receive = {}
self.mode = {}
self.call = {}
self.calprm = {}
self.get = {}
self.setReadData = {}
self.setReadResult = {}
self.final = {
'read': [],
'write': [],
'receive': [],
'send': [],
'mode': [],
'call': [],
'calprm': [],
'modeswitch': [],
'get': [], #FOR UNIT TEST PURPOSES
'setReadData': [], #FOR UNIT TEST PURPOSES
'setReadResult': [], #FOR UNIT TEST PURPOSES
}
def finalize(self):
if len(self.read)>0:
self.final['read']=[self.read[k] for k in sorted(self.read.keys())]
if len(self.write)>0:
self.final['write']=[self.write[k] for k in sorted(self.write.keys())]
if len(self.receive)>0:
self.final['receive']=[self.receive[k] for k in sorted(self.receive.keys())]
if len(self.mode)>0:
self.final['receive']=[self.mode[k] for k in sorted(self.mode.keys())]
if len(self.call)>0:
self.final['call']=[self.call[k] for k in sorted(self.call.keys())]
if len(self.calprm)>0:
self.final['calprm']=[self.calprm[k] for k in sorted(self.calprm.keys())]
if len(self.get)>0:
self.final['get']=[self.get[k] for k in sorted(self.get.keys())]
if len(self.setReadData)>0:
self.final['setReadData']=[self.setReadData[k] for k in sorted(self.setReadData.keys())]
if len(self.setReadResult)>0:
self.final['setReadResult']=[self.setReadResult[k] for k in sorted(self.setReadResult.keys())]
def get_all(self):
for func in self.final['read']:
yield func
for func in self.final['write']:
yield func
for func in self.final['receive']:
yield func
for func in self.final['send']:
yield func
for func in self.final['mode']:
yield func
for func in self.final['call']:
yield func
for func in self.final['calprm']:
yield func
for func in self.final['get']:
yield func
for func in self.final['setReadData']:
yield func
for func in self.final['setReadResult']:
yield func
def update(self, other):
self.read.update(other.read)
self.write.update(other.write)
self.send.update(other.send)
self.receive.update(other.receive)
self.mode.update(other.mode)
self.call.update(other.call)
self.calprm.update(other.calprm)
class Component:
"""
RTE Container class for AUTOSAR SWC
"""
def __init__(self, swc, parent, rte_prefix='Rte'):
self.parent = parent
self.name = swc.name
self.inner = swc
self.clientAPI = ComponentAPI() #function calls towards the RTE (stuff that the RTE must provide)
self.events = []
self.runnables = []
self.data_vars = []
self.rte_prefix = rte_prefix
self.is_finalized = False
self.requirePorts = []
self.providePorts = []
self.data_element_port_access = {}
self.operation_port_access = {}
ws = swc.rootWS()
assert(ws is not None)
self._process_ports(ws)
self._process_runnables(ws)
self._process_events(ws)
def _process_ports(self, ws):
for ar_port in self.inner.providePorts:
self.providePorts.append(ProvidePort(ws, ar_port, self))
for ar_port in self.inner.requirePorts:
self.requirePorts.append(RequirePort(ws, ar_port, self))
# def pre_finalize(self, ws, type_manager):
# if not self.is_finalized:
# self._process_runnables(ws)
# self._process_events(ws)
def finalize(self,ws, type_manager):
if not self.is_finalized:
self._process_port_access()
for port in self.requirePorts+self.providePorts:
port.process_types(ws, type_manager)
port.update_client_api(self.clientAPI)
self.clientAPI.finalize()
self._runnables_finalize()
self.is_finalized=True
def get_runnable(self, name):
return self.rte_runnables[name]
def find_require_port(self, name):
for port in self.requirePorts:
if port.name == name: return port
raise KeyError("No port found with name "+name)
def find_provide_port(self, name):
for port in self.providePorts:
if port.name == name: return port
raise KeyError("No port found with name "+name)
def add_event(self, rte_event):
self.rte_events.append(rte_event)
def _process_runnables(self, ws):
if self.inner.behavior is not None:
for ar_runnable in self.inner.behavior.runnables:
runnable = Runnable(self, ar_runnable)
self.runnables.append(runnable)
for dataPoint in ar_runnable.dataReceivePoints+ar_runnable.dataSendPoints:
ar_port=ws.find(dataPoint.portRef)
if ar_port is None:
raise ValueError('Error: Invalid port reference: '+dataPoint.dataPoint.portRef)
ar_data_element = ws.find(dataPoint.dataElemRef)
if ar_data_element is None:
raise ValueError('Error: Invalid data element reference: '+dataPoint.dataElemRef)
if isinstance(dataPoint, autosar.behavior.DataSendPoint):
port = self.find_provide_port(ar_port.name)
else:
port = self.find_require_port(ar_port.name)
data_element = port.find_data_element(ar_data_element.name)
runnable.data_element_access.append(data_element)
port.create_data_access_api(ws, data_element)
self.data_element_port_access['%s/%s'%(port.name, data_element.name)]=autosar.rte.base.DataElementPortAccess(port, data_element, runnable)
for callPoint in ar_runnable.serverCallPoints:
for instanceRef in callPoint.operationInstanceRefs:
ar_port = ws.find(instanceRef.portRef)
if ar_port is None:
raise ValueError('Error: Invalid port reference: '+instanceRef.portRef)
ar_operation = ws.find(instanceRef.operationRef)
if ar_operation is None:
raise ValueError('Error: Invalid operation reference: '+instanceRef.operationRef)
port = self.find_require_port(ar_port.name)
operation = port.find_operation(ar_operation.name)
runnable.operation_access.append(operation)
self.operation_port_access['%s/%s'%(port.name, operation.name)]=autosar.rte.base.OperationPortAccess(port, operation, runnable)
def _process_events(self, ws):
if self.inner.behavior is None:
return
for ar_event in self.inner.behavior.events:
ar_runnable = ws.find(ar_event.startOnEventRef)
if ar_runnable is None:
raise ValueError('Invalid StartOnEvent reference: '+ar_event.startOnEventRef)
for runnable in self.runnables:
if runnable.inner is ar_runnable:
break
else:
raise ValueError('Runnable not found')
if isinstance(ar_event, autosar.behavior.TimingEvent):
event = autosar.rte.base.TimerEvent(ar_event, runnable)
elif isinstance(ar_event, autosar.behavior.ModeSwitchEvent):
if ar_event.modeInstRef is not None:
event = autosar.rte.base.ModeSwitchEvent(ws, ar_event, runnable)
elif isinstance(ar_event, autosar.behavior.OperationInvokedEvent):
port_refs = autosar.base.splitRef(ar_event.operationInstanceRef.portRef)
operation_refs = autosar.base.splitRef(ar_event.operationInstanceRef.operationRef)
port = self.find_provide_port(port_refs[-1])
assert (port is not None) and (port.ar_port is ws.find(ar_event.operationInstanceRef.portRef))
operation = port.find_operation(operation_refs[-1])
assert (operation is not None)
event = autosar.rte.base.OperationInvokedEvent(ar_event, runnable, port, operation)
else:
raise NotImplementedError(str(type(event)))
self.events.append(event)
def _process_port_access(self):
for access in self.operation_port_access.values():
if isinstance(access, autosar.rte.base.OperationPortAccess):
proto = access.port.create_server_call_api(access.operation)
else:
raise NotImplementedError(str(type(access)))
def _runnables_finalize(self):
#operation_invoke_events = [event for event in self.events if isinstance(event, OperationInvokedEvent)]
for runnable in self.runnables:
if runnable.prototype is None:
runnable.prototype = (C.function(runnable.symbol, 'void'))
def create_data_elements(self, data_element_map):
for provide_port in self.providePorts:
for require_port in provide_port.connectors:
if len(require_port.data_elements)>0:
for port_func in require_port.portAPI.values():
if isinstance(port_func, (ReadPortFunction, ReceivePortFunction)) and port_func.data_element.parent is require_port:
data_element = provide_port.find_data_element(port_func.data_element.name)
assert(data_element is not None)
variable_name = '_'.join([self.name,provide_port.name,data_element.name])
if variable_name not in data_element_map:
data_element.symbol = variable_name
data_element_map[variable_name] = data_element
#reassign require_port data element to access the data element from the provide port
port_func.data_element = data_element
class Runnable:
"""RTE Runnable"""
def __init__(self, parent, ar_runnable):
self.parent = parent
self.inner = ar_runnable
self.name = ar_runnable.name
self.symbol = ar_runnable.symbol
self.data_element_access=[]
self.operation_access=[]
self.prototype = None
self.event_triggers=[]
self.processed=False
class Partition:
def __init__(self, mode='full', prefix='Rte'):
self.prefix=prefix
self.components = [] #clients (components)
self.upperLayerAPI = ComponentAPI() #functions that the RTE must support towards its clients
self.lowerLayerAPI = {}
self.types = autosar.rte.RteTypeManager() #centralized type manager
self.isFinalized = False
self.ws = None
self.assemblyConnectorMap = {}
self.data_element_map = {}
self.mode_switch_functions = {}
self.static_vars = {}
def addComponent(self, swc, runnables = None, name=None):
"""
adds software component to partition.
Optional parameters:
name: Can be used to override name of swc. Default is to use name from swc.
"""
swc_name = name if name is not None else swc.name
if isinstance(swc, (autosar.component.AtomicSoftwareComponent, autosar.bsw.com.ComComponent)):
ws = swc.rootWS()
assert(ws is not None)
if self.ws is None:
self.ws = ws
else:
if self.ws is not ws:
raise ValueError('Cannot add components from different workspaces!')
component = Component(swc, self)
self.components.append(component)
else:
print("Unsupported component type: "+str(type(swc)), file=sys.stderr)
def finalize(self):
if not self.isFinalized:
# for component in self.components:
# component.pre_finalize(self.ws, self.types)
for component in self.components:
component.finalize(self.ws, self.types)
self.upperLayerAPI.update(component.clientAPI)
for component in self.components:
component.create_data_elements(self.data_element_map)
self._generate_com_access()
self.upperLayerAPI.finalize()
for component in self.components:
self._process_mode_switch_events(component)
self.isFinalized=True
def createConnector(self, portRef1, portRef2):
"""
creates a connector between two ports
"""
assert (self.ws is not None)
port1 = self._analyzePortRef(portRef1)
port2 = self._analyzePortRef(portRef2)
providePort=None
requirePort=None
if isinstance(port1, RequirePort) and isinstance(port2, ProvidePort):
requirePort, providePort = port1, port2
elif isinstance(port1, ProvidePort) and isinstance(port2, RequirePort):
providePort, requirePort = port1, port2
elif isinstance(port1, RequirePort) and isinstance(port2, RequirePort):
raise ValueError('cannot create assembly connector between two require ports')
else:
raise ValueError('cannot create assembly connector between two provide ports')
self._createConnectorInternal(providePort, requirePort)
def autoConnect(self):
"""
Attemts to create compatible connectors between components
"""
require_port_list = [] #list of RequirePort
provide_port_list = [] #list of ProvidePort
for rte_comp in self.components:
for rte_port in rte_comp.requirePorts:
require_port_list.append(rte_port)
for rte_port in rte_comp.providePorts:
provide_port_list.append(rte_port)
for require_port in require_port_list:
provide_port = self._findCompatibleProvidePort(require_port, provide_port_list)
if provide_port is not None:
self._createConnectorInternal(provide_port, require_port)
def unconnectedPorts(self):
"""
Returns a generator that yields all unconnected ports of this partition
"""
for component in self.components:
for port in component.requirePorts+component.providePorts:
if len(port.connectors)==0:
yield port
def _findCompatibleProvidePort(self, require_port, provide_port_list):
require_port_interface = self.ws.find(require_port.ar_port.portInterfaceRef)
if require_port_interface is None: raise ValueError("Invalid port interface ref: %s"%require_port.ar_port.portInterfaceRef)
for provide_port in provide_port_list:
provide_port_interface = self.ws.find(provide_port.ar_port.portInterfaceRef)
if provide_port_interface is None: raise ValueError("Invalid port interface ref: %s"%provide_port.ar_port.portInterfaceRef)
if require_port_interface==provide_port_interface and (require_port.ar_port.name == provide_port.ar_port.name):
return provide_port
return None
def _createConnectorInternal(self, provide_port, require_port):
connectorName='_'.join([provide_port.parent.name, provide_port.name, require_port.parent.name, require_port.name])
if connectorName in self.assemblyConnectorMap:
raise ValueError('connector "%s" already exists'%connectorName)
self.assemblyConnectorMap[connectorName]=(provide_port,require_port)
provide_port.connectors.append(require_port)
require_port.connectors.append(provide_port)
# def _process_parameter_ports(self, component, ws, swc):
# for port in swc.requirePorts:
# portInterface = ws.find(port.portInterfaceRef)
# if portInterface is not None:
# if isinstance(portInterface, autosar.portinterface.ParameterInterface):
# for data_element in portInterface.dataElements:
# data_type = ws.find(data_element.typeRef)
# if data_type is None:
# raise ValueError("Error: Invalid type reference: "+ data_element.typeRef)
# self.types.processType(ws, data_type)
# component.create_parameter(ws, port, data_element, data_type)
# def _process_events(self, component, ws, swc, runnables=None):
# for event in swc.behavior.events:
# ar_runnable = ws.find(event.startOnEventRef)
# rte_runnable = component.get_runnable(ar_runnable.name)
# if isinstance(event, autosar.behavior.TimingEvent):
# rte_event = TimerEvent(event.name, rte_runnable)
# component.add_event(rte_event)
# elif isinstance(event, autosar.behavior.ModeSwitchEvent):
# if ar_runnable is None:
# raise ValueError('invalid reference: '+event.startOnEventRef)
# if runnables is None or ar_runnable.name in runnables:
# rte_event = ModeSwitchEvent(ws, event, rte_runnable)
# rte_runnable.events.append(rte_event)
# component.add_event(rte_event)
# elif isinstance(event, autosar.behavior.OperationInvokedEvent):
# pass #already processed
# else:
# raise NotImplementedError(type(event))
def _analyzePortRef(self, portRef):
parts=autosar.base.splitRef(portRef)
if len(parts)==2:
#assume format 'componentName/portName' with ComponentType role set
port=None
for component in self.components:
if component.name == parts[0]:
for port in component.requirePorts + component.providePorts:
if parts[1] == port.name:
return port
return None
def _generate_com_access(self):
for component in self.components:
if isinstance(component.inner, autosar.bsw.com.ComComponent):
for port in component.requirePorts:
for remote_port in port.connectors:
for data_element in remote_port.data_elements:
isPointer = True if data_element.dataType.isComplexType else False
proto = C.function(
"%s_Send_%s_%s"%(component.inner.name, remote_port.name, data_element.name),
'Std_ReturnType',
args = [C.variable('value', data_element.dataType.name, pointer=isPointer)])
data_element.com_access['Send'] = proto
component.inner.addSendInterface(proto, port, data_element)
for port in component.providePorts:
for data_element in port.data_elements:
isPointer = True
proto = C.function(
"%s_Receive_%s_%s"%(component.inner.name, remote_port.name, data_element.name),
'Std_ReturnType',
args = [C.variable('value', data_element.dataType.name, pointer=isPointer)])
data_element.com_access['Receive'] = proto
component.inner.addReceiveInterface(proto, port, data_element)
#remove from internal RTE variables
symbol = data_element.symbol
data_element.symbol = None
if symbol in self.data_element_map:
del self.data_element_map[symbol]
def _process_mode_switch_events(self, component):
for event in component.events:
if isinstance(event, autosar.rte.base.ModeSwitchEvent):
if event.mode not in self.mode_switch_functions:
func = autosar.rte.base.ModeSwitchFunction(event)
self.mode_switch_functions[event.mode] = func
if func.static_var not in self.static_vars:
self.static_vars[func.static_var.name] = func.static_var
function_name = "_".join(['os', 'task', event.activationType, event.mode, event.modeDeclaration])
if function_name not in self.mode_switch_functions[event.mode].calls:
if (event.activationType == 'OnEntry'):
self.mode_switch_functions[event.mode].generate_on_entry_code(event, function_name)
else:
self.mode_switch_functions[event.mode].generate_on_exit_code(event, function_name)
else:
self.mode_switch_functions[event.mode].add_event_to_call(event, function_name)
|
11534713
|
from genrss import GenRSS
def create_rss(**kwargs):
return GenRSS(title='SmartFridge', site_url='https://smartfridge.me/',
feed_url='https://smartfridge.me/rss.xml', **kwargs)
def create_item(feed, **kwargs):
feed.item(title='Recipe', **kwargs)
|
11534774
|
import matplotlib.pylab as plt
def main():
x = [100, 200, 300, 400, 500, 600,700, 800, 900, 1000,
1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 7500,
10000, 15000, 20000, 25000, 30000]
y = [0.75330, 0.81608, 0.85652, 0.86402, 0.87372, 0.87764,
0.88382, 0.89334, 0.89644, 0.90168, 0.91856, 0.92490,
0.92966, 0.93690, 0.94190, 0.94424, 0.94964, 0.95032,
0.95862, 0.96526, 0.96664, 0.96944, 0.96980, 0.96926]
plt.plot(x,y, marker="o", color="b")
plt.xlabel("Number of training samples")
plt.ylabel("Mean test score")
plt.tight_layout()
plt.savefig("mnist_nn_experiments_samples_plot.pdf", type="pdf", dpi=600)
plt.show()
main()
|
11534808
|
from KratosMultiphysics.RomApplication.element_selection_strategy import ElementSelectionStrategy
from KratosMultiphysics.RomApplication.randomized_singular_value_decomposition import RandomizedSingularValueDecomposition
import KratosMultiphysics
import numpy as np
import json
try:
from matplotlib import pyplot as plt
missing_matplotlib = False
except ImportError as e:
missing_matplotlib = True
class EmpiricalCubatureMethod(ElementSelectionStrategy):
"""
This class selects a subset of elements and corresponding positive weights necessary for the construction of a hyper-reduced order model
Reference: Hernandez 2020. "A multiscale method for periodic structures using domain decomposition and ECM-hyperreduction"
"""
"""
Constructor setting up the parameters for the Element Selection Strategy
ECM_tolerance: approximation tolerance for the element selection algorithm
SVD_tolerance: approximation tolerance for the singular value decomposition of the ResidualSnapshots matrix
Filter_tolerance: parameter limiting the number of candidate points (elements) to those above this tolerance
Take_into_account_singular_values: whether to multiply the matrix of singular values by the matrix of left singular vectors. If false, convergence is easier
Plotting: whether to plot the error evolution of the element selection algorithm
"""
def __init__(self, ECM_tolerance = 1e-6, SVD_tolerance = 1e-6, Filter_tolerance = 1e-16, Take_into_account_singular_values = False, Plotting = False):
super().__init__()
self.ECM_tolerance = ECM_tolerance
self.SVD_tolerance = SVD_tolerance
self.Filter_tolerance = Filter_tolerance
self.Name = "EmpiricalCubature"
self.Take_into_account_singular_values = Take_into_account_singular_values
self.Plotting = Plotting
"""
Method for setting up the element selection
input: ResidualSnapshots: numpy array containing the matrix of residuals projected onto a basis
OriginalNumberOfElements: number of elements in the original model part. Necessary for the construction of the hyperreduced mdpa
ModelPartName: name of the original model part. Necessary for the construction of the hyperreduced mdpa
"""
def SetUp(self, ResidualSnapshots, OriginalNumberOfElements, ModelPartName):
super().SetUp()
self.ModelPartName = ModelPartName
self.OriginalNumberOfElements = OriginalNumberOfElements
u , s = self._ObtainBasis(ResidualSnapshots)
self.W = np.ones(np.shape(u)[0])
if self.Take_into_account_singular_values == True:
G = u*s
G = G.T
G = np.vstack([ G , np.ones( np.shape(G)[1] )] )
b = G @ self.W
bEXACT = b
else:
G = u.T
b = G @ self.W
bEXACT = b * s
self.SingularValues = s
self.b = b
self.G = G
self.ExactNorm = np.linalg.norm(bEXACT)
"""
Method performing calculations required before launching the Calculate method
"""
def Initialize(self):
super().Initialize()
self.Gnorm = np.sqrt(sum(np.multiply(self.G, self.G), 0))
M = np.shape(self.G)[1]
normB = np.linalg.norm(self.b)
self.y = np.arange(0,M,1) # Set of candidate points (those whose associated column has low norm are removed)
GnormNOONE = np.sqrt(sum(np.multiply(self.G[:-1,:], self.G[:-1,:]), 0))
if self.Filter_tolerance > 0:
TOL_REMOVE = self.Filter_tolerance * normB
rmvpin = np.where(GnormNOONE[self.y] < TOL_REMOVE)
self.y = np.delete(self.y,rmvpin)
self.z = {} # Set of intergration points
self.mPOS = 0 # Number of nonzero weights
self.r = self.b # residual vector
self.m = len(self.b) # Default number of points
self.nerror = np.linalg.norm(self.r)/normB
self.nerrorACTUAL = self.nerror
"""
Method launching the element selection algorithm to find a set of elements: self.z, and wiegths: self.w
"""
def Calculate(self):
super().Calculate()
k = 1 # number of iterations
while self.nerrorACTUAL > self.ECM_tolerance and self.mPOS < self.m and len(self.y) != 0:
#Step 1. Compute new point
ObjFun = self.G[:,self.y].T @ self.r.T
ObjFun = ObjFun.T / self.Gnorm[self.y]
indSORT = np.argmax(ObjFun)
i = self.y[indSORT]
if k==1:
alpha = np.linalg.lstsq(self.G[:, [i]], self.b)[0]
H = 1/(self.G[:,i] @ self.G[:,i].T)
else:
H, alpha = self._UpdateWeightsInverse(self.G[:,self.z],H,self.G[:,i],alpha)
#Step 3. Move i from set y to set z
if k == 1:
self.z = i
else:
self.z = np.r_[self.z,i]
self.y = np.delete(self.y,indSORT)
# Step 4. Find possible negative weights
if any(alpha < 0):
print("WARNING: NEGATIVE weight found")
indexes_neg_weight = np.where(alpha <= 0.)[0]
self.y = np.append(self.y, (self.z[indexes_neg_weight]).T)
self.z = np.delete(self.z, indexes_neg_weight)
H = self._MultiUpdateInverseHermitian(H, indexes_neg_weight)
alpha = H @ (self.G[:, self.z].T @ self.b)
alpha = alpha.reshape(len(alpha),1)
#Step 6 Update the residual
if len(alpha)==1:
self.r = self.b - (self.G[:,self.z] * alpha)
else:
Aux = self.G[:,self.z] @ alpha
self.r = np.squeeze(self.b - Aux.T)
self.nerror = np.linalg.norm(self.r) / np.linalg.norm(self.b) # Relative error (using r and b)
if self.Take_into_account_singular_values == False:
self.nerrorACTUAL = self.SingularValues * self.r
self.nerrorACTUAL = np.linalg.norm(self.nerrorACTUAL / self.ExactNorm )
self.nerrorACTUAL = self.nerror
# STEP 7
self.mPOS = np.size(self.z)
print(f'k = {k}, m = {np.size(self.z)}, error n(res)/n(b) (%) = {self.nerror*100}, Actual error % = {self.nerrorACTUAL*100} ')
if k == 1:
ERROR_GLO = np.array([self.nerrorACTUAL])
NPOINTS = np.array([np.size(self.z)])
else:
ERROR_GLO = np.c_[ ERROR_GLO , self.nerrorACTUAL]
NPOINTS = np.c_[ NPOINTS , np.size(self.z)]
k = k+1
self.w = alpha.T * np.sqrt(self.W[self.z])
print(f'Total number of iterations = {k}')
if missing_matplotlib == False and self.Plotting == True:
plt.plot(NPOINTS[0], ERROR_GLO[0])
plt.title('Element Selection Error Evolution')
plt.xlabel('Number of elements')
plt.ylabel('Error %')
plt.show()
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _UpdateWeightsInverse(self, A,Aast,a,xold):
c = np.dot(A.T, a)
d = np.dot(Aast, c).reshape(-1, 1)
s = np.dot(a.T, a) - np.dot(c.T, d)
aux1 = np.hstack([Aast + np.outer(d, d) / s, -d / s])
if np.shape(-d.T / s)[1]==1:
aux2 = np.squeeze(np.hstack([-d.T / s, 1 / s]))
else:
aux2 = np.hstack([np.squeeze(-d.T / s), 1 / s])
Bast = np.vstack([aux1, aux2])
v = np.dot(a.T, self.r) / s
x = np.vstack([(xold - d * v), v])
return Bast, x
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _MultiUpdateInverseHermitian(self, invH, neg_indexes):
neg_indexes = np.sort(neg_indexes)
for i in range(np.size(neg_indexes)):
neg_index = neg_indexes[i] - i
invH = self._UpdateInverseHermitian(invH, neg_index)
return invH
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _UpdateInverseHermitian(self, invH, neg_index):
if neg_index == np.shape(invH)[1]:
aux = (invH[0:-1, -1] * invH[-1, 0:-1]) / invH(-1, -1)
invH_new = invH[:-1, :-1] - aux
else:
aux1 = np.hstack([invH[:, 0:neg_index], invH[:, neg_index + 1:], invH[:, neg_index].reshape(-1, 1)])
aux2 = np.vstack([aux1[0:neg_index, :], aux1[neg_index + 1:, :], aux1[neg_index, :]])
invH_new = aux2[0:-1, 0:-1] - np.outer(aux2[0:-1, -1], aux2[-1, 0:-1]) / aux2[-1, -1]
return invH_new
"""
Method calculating the singular value decomposition of the ResidualSnapshots matrix
input: ResidualSnapshots: numpy array containing a matrix of residuals projected onto a basis
output: u: numpy array containing the matrix of left singular vectors
s: numpy array containing the matrix of singular values
"""
def _ObtainBasis(self,ResidualSnapshots):
### Building the Snapshot matrix ####
for i in range (len(ResidualSnapshots)):
if i == 0:
SnapshotMatrix = ResidualSnapshots[i]
else:
SnapshotMatrix = np.c_[SnapshotMatrix,ResidualSnapshots[i]]
### Taking the SVD ### (randomized and truncated here)
u,s,_,_ = RandomizedSingularValueDecomposition().Calculate(SnapshotMatrix, self.SVD_tolerance)
return u, s
"""
Method to write a json file containing the selected elements and corresponding weights
"""
def WriteSelectedElements(self):
w = np.squeeze(self.w)
### Saving Elements and conditions
ElementsAndWeights = {}
ElementsAndWeights["Elements"] = {}
ElementsAndWeights["Conditions"] = {}
#Only one element found !
if type(self.z)==np.int64 or type(self.z)==np.int32:
if self.z <= self.OriginalNumberOfElements-1:
ElementsAndWeights["Elements"][int(self.z)] = (float(w))
else:
ElementsAndWeights["Conditions"][int(self.z)-self.OriginalNumberOfElements] = (float(w))
#Many elements found
else:
for j in range (0,len(self.z)):
if self.z[j] <= self.OriginalNumberOfElements-1:
ElementsAndWeights["Elements"][int(self.z[j])] = (float(w[j]))
else:
ElementsAndWeights["Conditions"][int(self.z[j])-self.OriginalNumberOfElements] = (float(w[j]))
with open('ElementsAndWeights.json', 'w') as f:
json.dump(ElementsAndWeights,f, indent=2)
print('\n\n Elements and conditions selected have been saved in a json file\n\n')
self._CreateHyperReducedModelPart()
"""
Method to create an mdpa file containing the selected elements and the skin
"""
def _CreateHyperReducedModelPart(self):
current_model = KratosMultiphysics.Model()
computing_model_part = current_model.CreateModelPart("main")
model_part_io = KratosMultiphysics.ModelPartIO(self.ModelPartName)
model_part_io.ReadModelPart(computing_model_part)
hyper_reduced_model_part_help = current_model.CreateModelPart("Helping")
with open('ElementsAndWeights.json') as f:
HR_data = json.load(f)
for key in HR_data["Elements"].keys():
for node in computing_model_part.GetElement(int(key)+1).GetNodes():
hyper_reduced_model_part_help.AddNode(node,0)
for key in HR_data["Conditions"].keys():
for node in computing_model_part.GetCondition(int(key)+1).GetNodes():
hyper_reduced_model_part_help.AddNode(node,0)
# The HROM model part. It will include two sub-model parts. One for caculation, another one for visualization
HROM_Model_Part = current_model.CreateModelPart("HROM_Model_Part")
# Building the COMPUTE_HROM submodel part
hyper_reduced_model_part = HROM_Model_Part.CreateSubModelPart("COMPUTE_HROM")
# TODO implement the hyper-reduced model part creation in C++
with open('ElementsAndWeights.json') as f:
HR_data = json.load(f)
for originalSubmodelpart in computing_model_part.SubModelParts:
hyperReducedSubmodelpart = hyper_reduced_model_part.CreateSubModelPart(originalSubmodelpart.Name)
print(f'originalSubmodelpart.Name {originalSubmodelpart.Name}')
print(f'originalSubmodelpart.Elements {len(originalSubmodelpart.Elements)}')
print(f'originalSubmodelpart.Conditions {len(originalSubmodelpart.Conditions)}')
for originalNode in originalSubmodelpart.Nodes:
if originalNode in hyper_reduced_model_part_help.Nodes:
hyperReducedSubmodelpart.AddNode(originalNode,0)
## More eficient way to implement this is possible
for originalElement in originalSubmodelpart.Elements:
for key in HR_data["Elements"].keys():
if originalElement.Id == int(key)+1:
hyperReducedSubmodelpart.AddElement(originalElement,0)
print(f'For the submodelpart {hyperReducedSubmodelpart.Name}, the element with the Id {originalElement.Id} is assigned the key {key}')
for originalCondition in originalSubmodelpart.Conditions:
for key in HR_data["Conditions"].keys():
if originalCondition.Id == int(key)+1:
hyperReducedSubmodelpart.AddCondition(originalCondition,0)
print(f'For the submodelpart {hyperReducedSubmodelpart.Name}, the condition with the Id {originalCondition.Id} is assigned the key {key}')
# Building the VISUALIZE_HROM submodel part
print('Adding skin for visualization...')
hyper_reduced_model_part2 = HROM_Model_Part.CreateSubModelPart("VISUALIZE_HROM")
for condition in computing_model_part.Conditions:
for node in condition.GetNodes():
hyper_reduced_model_part2.AddNode(node, 0)
hyper_reduced_model_part2.AddCondition(condition, 0)
for node in computing_model_part.Nodes:
hyper_reduced_model_part2.AddNode(node, 0)
## Creating the mdpa file using ModelPartIO object
print('About to print ...')
KratosMultiphysics.ModelPartIO("Hyper_Reduced_Model_Part", KratosMultiphysics.IO.WRITE| KratosMultiphysics.IO.MESH_ONLY ).WriteModelPart(HROM_Model_Part)
print('\nHyper_Reduced_Model_Part.mdpa created!\n')
KratosMultiphysics.kratos_utilities.DeleteFileIfExisting("Hyper_Reduced_Model_Part.time")
|
11534876
|
import unittest
from galry import *
from test import GalryTest
class PM(PaintManager):
def initialize(self):
position = np.zeros((4, 2))
position[:,0] = [-.5, .5, .5, -.5]
position[:,1] = [-.5, -.5, .5, .5]
# update the index to a bigger array
index0 = [0, 2, 1]
index1 = [0, 1, 2, 3, 0]
self.add_visual(PlotVisual, position=position, color=(1., 1., 1., 1.),
primitive_type='LINE_STRIP', index=index0)
self.set_data(index=index1)
class PlotUpdateIndexedTest(GalryTest):
def test(self):
self.show(paint_manager=PM)
if __name__ == '__main__':
unittest.main()
# show_basic_window(paint_manager=PM)
|
11534920
|
import pandas as pd
from sklearn.base import BaseEstimator
from hcrystalball.exceptions import DuplicatedModelNameError
from hcrystalball.utils import check_fit_before_predict
from hcrystalball.utils import check_X_y
from hcrystalball.utils import enforce_y_type
from hcrystalball.utils import get_estimator_name
class SimpleEnsemble(BaseEstimator):
"""SimpleEnsemble model, which takes a list of any hcrystalball model
wrapper instance(s) as base learners and aggregates their prediction
using `ensemble_func`.
See motivation to average forecasts from different models
https://otexts.com/fpp2/combinations.html
Parameters
----------
name: str
Unique name / identifier of the model instance
base_learners: list
List of fully instantiated hcrystalball model wrappers
ensemble_func: {'mean', 'median', 'min', 'max'}
Function to aggregate `base_learners` predictions
"""
def __init__(
self,
base_learners,
ensemble_func="mean",
name="simple_ensemble",
clip_predictions_lower=None,
clip_predictions_upper=None,
):
self._check_base_learners_names(base_learners)
self.base_learners = base_learners
self.name = name
if ensemble_func not in ("mean", "median", "min", "max"):
raise ValueError(
"Invalid ensemble_func passed. Valid choices are: 'mean', 'median', 'min', 'max' "
)
self.ensemble_func = ensemble_func
self.fitted = False
self.clip_predictions_lower = clip_predictions_lower
self.clip_predictions_upper = clip_predictions_upper
@staticmethod
def _check_base_learners_names(models):
"""Check if the base learner models have all unique names
Parameters
----------
models: list
List of instatiated hcrystalball model wrapper instances
Raises
------
DuplicatedModelNameError
If multiple models have the same `name` attribute.
"""
names = [get_estimator_name(model) for model in models]
if len(names) != len(set(names)):
raise DuplicatedModelNameError(
"There seems to be duplicates in model names among SimpleEnsemble base learners."
"Model names should be unique."
)
@enforce_y_type
@check_X_y
def fit(self, X, y=None):
"""Fit the stacking ensemble model
Parameters
----------
X: pandas.DataFrame
Input features.
y: numpy.ndarray
Target vector.
Returns
-------
SimpleEnsemble
A fitted SimpleEnsemble instance
"""
self._check_base_learners_names(self.base_learners)
for model in self.base_learners:
model.fit(X, y)
self.fitted = True
return self
@check_fit_before_predict
def predict(self, X):
"""Calculate the prediction of the ensemble for a given set of date / time
Parameters
----------
X: pandas.DataFrame
DataFrame container with a single column, named 'date',
containing the datetimes for which the predictions should be made.
Returns
-------
pandas.DataFrame
A DataFrame container with the index being the input (date)time vector.
The single column in the DataFrame contains the prediction and the column
name is the name of the model (i.e. the `name` parameter passed to the constructor)
"""
y_pred = pd.DataFrame(index=X.index, columns=[self.name])
for model in self.base_learners:
model_name = get_estimator_name(model)
y_pred[model_name] = model.predict(X)
y_pred[self.name] = y_pred.drop(columns=[self.name]).apply(self.ensemble_func, axis=1)
y_pred[self.name] = y_pred[self.name].clip(
lower=self.clip_predictions_lower, upper=self.clip_predictions_upper
)
return y_pred[[self.name]]
|
11534933
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from ...utils import setup
class GetAvailableLanguagesTagTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'i18n12': '{% load i18n %}'
'{% get_available_languages as langs %}{% for lang in langs %}'
'{% if lang.0 == "de" %}{{ lang.0 }}{% endif %}{% endfor %}'})
def test_i18n12(self):
output = self.engine.render_to_string('i18n12')
self.assertEqual(output, 'de')
|
11534938
|
import pytest
from hypothesis import given
import hypothesis.strategies as st
import helpers
@given(st.lists(st.integers(), min_size=1), st.integers(1))
def test_divide_into_chunks(l, n):
assert list(helpers.divide_into_chunks(l, n)) == list(
l[i : i + n] for i in range(0, len(l), max(1, n))
)
|
11534943
|
def helper(lst,s,e):
if(s >= e):
return True
if(lst[s]==lst[e]):
return helper(lst,s+1,e-1)
else:
return False
lst = list(map(int, input().split()))
ans = helper(lst,0,len(lst)-1)
if(ans==True):
print("Palindrome")
else:
print("Not Palindrome")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.