id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11553225
|
import math as _math
def error(msg, debug=True):
if debug:
print(f"[ERROR]! {msg}")
def warn(msg, debug=True):
if debug:
print(f"[WARNING]! {msg}")
def info(msg, debug=True):
if debug: print(f"{msg}")
def success(msg, debug=True):
if debug:
print(f"[SUCCESS]! {msg}")
def lazy_debug(x, add=0):
_scale = int(_math.log(max(x, 1)) * _math.log(max(add, 1)))
return x % (_scale + 1) == 0
|
11553243
|
import yaml
from twisted.trial import unittest
from ooni.reporter import OSafeDumper
from scapy.all import IP, UDP
class TestScapyRepresent(unittest.TestCase):
def test_represent_scapy(self):
data = IP() / UDP()
yaml.dump_all([data], Dumper=OSafeDumper)
|
11553282
|
import functools
import spconv.pytorch as spconv
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from ..ops import (ballquery_batch_p, bfs_cluster, get_mask_iou_on_cluster, get_mask_iou_on_pred,
get_mask_label, global_avg_pool, sec_max, sec_min, voxelization,
voxelization_idx)
from ..util import cuda_cast, force_fp32, rle_encode
from .blocks import MLP, ResidualBlock, UBlock
class SoftGroup(nn.Module):
def __init__(self,
channels=32,
num_blocks=7,
semantic_only=False,
semantic_classes=20,
instance_classes=18,
sem2ins_classes=[],
ignore_label=-100,
grouping_cfg=None,
instance_voxel_cfg=None,
train_cfg=None,
test_cfg=None,
fixed_modules=[]):
super().__init__()
self.channels = channels
self.num_blocks = num_blocks
self.semantic_only = semantic_only
self.semantic_classes = semantic_classes
self.instance_classes = instance_classes
self.sem2ins_classes = sem2ins_classes
self.ignore_label = ignore_label
self.grouping_cfg = grouping_cfg
self.instance_voxel_cfg = instance_voxel_cfg
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fixed_modules = fixed_modules
block = ResidualBlock
norm_fn = functools.partial(nn.BatchNorm1d, eps=1e-4, momentum=0.1)
# backbone
self.input_conv = spconv.SparseSequential(
spconv.SubMConv3d(
6, channels, kernel_size=3, padding=1, bias=False, indice_key='subm1'))
block_channels = [channels * (i + 1) for i in range(num_blocks)]
self.unet = UBlock(block_channels, norm_fn, 2, block, indice_key_id=1)
self.output_layer = spconv.SparseSequential(norm_fn(channels), nn.ReLU())
# point-wise prediction
self.semantic_linear = MLP(channels, semantic_classes, norm_fn=norm_fn, num_layers=2)
self.offset_linear = MLP(channels, 3, norm_fn=norm_fn, num_layers=2)
# topdown refinement path
if not semantic_only:
self.tiny_unet = UBlock([channels, 2 * channels], norm_fn, 2, block, indice_key_id=11)
self.tiny_unet_outputlayer = spconv.SparseSequential(norm_fn(channels), nn.ReLU())
self.cls_linear = nn.Linear(channels, instance_classes + 1)
self.mask_linear = MLP(channels, instance_classes + 1, norm_fn=None, num_layers=2)
self.iou_score_linear = nn.Linear(channels, instance_classes + 1)
self.init_weights()
for mod in fixed_modules:
mod = getattr(self, mod)
for param in mod.parameters():
param.requires_grad = False
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, MLP):
m.init_weights()
if not self.semantic_only:
for m in [self.cls_linear, self.iou_score_linear]:
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def train(self, mode=True):
super().train(mode)
for mod in self.fixed_modules:
mod = getattr(self, mod)
for m in mod.modules():
if isinstance(m, nn.BatchNorm1d):
m.eval()
def forward(self, batch, return_loss=False):
if return_loss:
return self.forward_train(**batch)
else:
return self.forward_test(**batch)
@cuda_cast
def forward_train(self, batch_idxs, voxel_coords, p2v_map, v2p_map, coords_float, feats,
semantic_labels, instance_labels, instance_pointnum, instance_cls,
pt_offset_labels, spatial_shape, batch_size, **kwargs):
losses = {}
feats = torch.cat((feats, coords_float), 1)
voxel_feats = voxelization(feats, p2v_map)
input = spconv.SparseConvTensor(voxel_feats, voxel_coords.int(), spatial_shape, batch_size)
semantic_scores, pt_offsets, output_feats = self.forward_backbone(input, v2p_map)
# point wise losses
point_wise_loss = self.point_wise_loss(semantic_scores, pt_offsets, semantic_labels,
instance_labels, pt_offset_labels)
losses.update(point_wise_loss)
# instance losses
if not self.semantic_only:
proposals_idx, proposals_offset = self.forward_grouping(semantic_scores, pt_offsets,
batch_idxs, coords_float,
self.grouping_cfg)
if proposals_offset.shape[0] > self.train_cfg.max_proposal_num:
proposals_offset = proposals_offset[:self.train_cfg.max_proposal_num + 1]
proposals_idx = proposals_idx[:proposals_offset[-1]]
assert proposals_idx.shape[0] == proposals_offset[-1]
inst_feats, inst_map = self.clusters_voxelization(
proposals_idx,
proposals_offset,
output_feats,
coords_float,
rand_quantize=True,
**self.instance_voxel_cfg)
instance_batch_idxs, cls_scores, iou_scores, mask_scores = self.forward_instance(
inst_feats, inst_map)
instance_loss = self.instance_loss(cls_scores, mask_scores, iou_scores, proposals_idx,
proposals_offset, instance_labels, instance_pointnum,
instance_cls, instance_batch_idxs)
losses.update(instance_loss)
return self.parse_losses(losses)
def point_wise_loss(self, semantic_scores, pt_offsets, semantic_labels, instance_labels,
pt_offset_labels):
losses = {}
semantic_loss = F.cross_entropy(
semantic_scores, semantic_labels, ignore_index=self.ignore_label)
losses['semantic_loss'] = semantic_loss
pos_inds = instance_labels != self.ignore_label
if pos_inds.sum() == 0:
offset_loss = 0 * pt_offsets.sum()
else:
offset_loss = F.l1_loss(
pt_offsets[pos_inds], pt_offset_labels[pos_inds], reduction='sum') / pos_inds.sum()
losses['offset_loss'] = offset_loss
return losses
@force_fp32(apply_to=('cls_scores', 'mask_scores', 'iou_scores'))
def instance_loss(self, cls_scores, mask_scores, iou_scores, proposals_idx, proposals_offset,
instance_labels, instance_pointnum, instance_cls, instance_batch_idxs):
losses = {}
proposals_idx = proposals_idx[:, 1].cuda()
proposals_offset = proposals_offset.cuda()
# cal iou of clustered instance
ious_on_cluster = get_mask_iou_on_cluster(proposals_idx, proposals_offset, instance_labels,
instance_pointnum)
# filter out background instances
fg_inds = (instance_cls != self.ignore_label)
fg_instance_cls = instance_cls[fg_inds]
fg_ious_on_cluster = ious_on_cluster[:, fg_inds]
# overlap > thr on fg instances are positive samples
max_iou, gt_inds = fg_ious_on_cluster.max(1)
pos_inds = max_iou >= self.train_cfg.pos_iou_thr
pos_gt_inds = gt_inds[pos_inds]
# compute cls loss. follow detection convention: 0 -> K - 1 are fg, K is bg
labels = fg_instance_cls.new_full((fg_ious_on_cluster.size(0), ), self.instance_classes)
labels[pos_inds] = fg_instance_cls[pos_gt_inds]
cls_loss = F.cross_entropy(cls_scores, labels)
losses['cls_loss'] = cls_loss
# compute mask loss
mask_cls_label = labels[instance_batch_idxs.long()]
slice_inds = torch.arange(
0, mask_cls_label.size(0), dtype=torch.long, device=mask_cls_label.device)
mask_scores_sigmoid_slice = mask_scores.sigmoid()[slice_inds, mask_cls_label]
mask_label = get_mask_label(proposals_idx, proposals_offset, instance_labels, instance_cls,
instance_pointnum, ious_on_cluster, self.train_cfg.pos_iou_thr)
mask_label_weight = (mask_label != -1).float()
mask_label[mask_label == -1.] = 0.5 # any value is ok
mask_loss = F.binary_cross_entropy(
mask_scores_sigmoid_slice, mask_label, weight=mask_label_weight, reduction='sum')
mask_loss /= (mask_label_weight.sum() + 1)
losses['mask_loss'] = mask_loss
# compute iou score loss
ious = get_mask_iou_on_pred(proposals_idx, proposals_offset, instance_labels,
instance_pointnum, mask_scores_sigmoid_slice.detach())
fg_ious = ious[:, fg_inds]
gt_ious, _ = fg_ious.max(1)
slice_inds = torch.arange(0, labels.size(0), dtype=torch.long, device=labels.device)
iou_score_weight = (labels < self.instance_classes).float()
iou_score_slice = iou_scores[slice_inds, labels]
iou_score_loss = F.mse_loss(iou_score_slice, gt_ious, reduction='none')
iou_score_loss = (iou_score_loss * iou_score_weight).sum() / (iou_score_weight.sum() + 1)
losses['iou_score_loss'] = iou_score_loss
return losses
def parse_losses(self, losses):
loss = sum(v for v in losses.values())
losses['loss'] = loss
for loss_name, loss_value in losses.items():
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
losses[loss_name] = loss_value.item()
return loss, losses
@cuda_cast
def forward_test(self, batch_idxs, voxel_coords, p2v_map, v2p_map, coords_float, feats,
semantic_labels, instance_labels, pt_offset_labels, spatial_shape, batch_size,
scan_ids, **kwargs):
feats = torch.cat((feats, coords_float), 1)
voxel_feats = voxelization(feats, p2v_map)
input = spconv.SparseConvTensor(voxel_feats, voxel_coords.int(), spatial_shape, batch_size)
semantic_scores, pt_offsets, output_feats = self.forward_backbone(
input, v2p_map, x4_split=self.test_cfg.x4_split)
if self.test_cfg.x4_split:
coords_float = self.merge_4_parts(coords_float)
semantic_labels = self.merge_4_parts(semantic_labels)
instance_labels = self.merge_4_parts(instance_labels)
pt_offset_labels = self.merge_4_parts(pt_offset_labels)
semantic_preds = semantic_scores.max(1)[1]
ret = dict(
scan_id=scan_ids[0],
coords_float=coords_float.cpu().numpy(),
semantic_preds=semantic_preds.cpu().numpy(),
semantic_labels=semantic_labels.cpu().numpy(),
offset_preds=pt_offsets.cpu().numpy(),
offset_labels=pt_offset_labels.cpu().numpy(),
instance_labels=instance_labels.cpu().numpy())
if not self.semantic_only:
proposals_idx, proposals_offset = self.forward_grouping(semantic_scores, pt_offsets,
batch_idxs, coords_float,
self.grouping_cfg)
inst_feats, inst_map = self.clusters_voxelization(proposals_idx, proposals_offset,
output_feats, coords_float,
**self.instance_voxel_cfg)
_, cls_scores, iou_scores, mask_scores = self.forward_instance(inst_feats, inst_map)
pred_instances = self.get_instances(scan_ids[0], proposals_idx, semantic_scores,
cls_scores, iou_scores, mask_scores)
gt_instances = self.get_gt_instances(semantic_labels, instance_labels)
ret.update(dict(pred_instances=pred_instances, gt_instances=gt_instances))
return ret
def forward_backbone(self, input, input_map, x4_split=False):
if x4_split:
output_feats = self.forward_4_parts(input, input_map)
output_feats = self.merge_4_parts(output_feats)
else:
output = self.input_conv(input)
output = self.unet(output)
output = self.output_layer(output)
output_feats = output.features[input_map.long()]
semantic_scores = self.semantic_linear(output_feats)
pt_offsets = self.offset_linear(output_feats)
return semantic_scores, pt_offsets, output_feats
def forward_4_parts(self, x, input_map):
"""Helper function for s3dis: devide and forward 4 parts of a scene."""
outs = []
for i in range(4):
inds = x.indices[:, 0] == i
feats = x.features[inds]
coords = x.indices[inds]
coords[:, 0] = 0
x_new = spconv.SparseConvTensor(
indices=coords, features=feats, spatial_shape=x.spatial_shape, batch_size=1)
out = self.input_conv(x_new)
out = self.unet(out)
out = self.output_layer(out)
outs.append(out.features)
outs = torch.cat(outs, dim=0)
return outs[input_map.long()]
def merge_4_parts(self, x):
"""Helper function for s3dis: take output of 4 parts and merge them."""
inds = torch.arange(x.size(0), device=x.device)
p1 = inds[::4]
p2 = inds[1::4]
p3 = inds[2::4]
p4 = inds[3::4]
ps = [p1, p2, p3, p4]
x_split = torch.split(x, [p.size(0) for p in ps])
x_new = torch.zeros_like(x)
for i, p in enumerate(ps):
x_new[p] = x_split[i]
return x_new
@force_fp32(apply_to=('semantic_scores, pt_offsets'))
def forward_grouping(self,
semantic_scores,
pt_offsets,
batch_idxs,
coords_float,
grouping_cfg=None):
proposals_idx_list = []
proposals_offset_list = []
batch_size = batch_idxs.max() + 1
semantic_scores = semantic_scores.softmax(dim=-1)
radius = self.grouping_cfg.radius
mean_active = self.grouping_cfg.mean_active
npoint_thr = self.grouping_cfg.npoint_thr
class_numpoint_mean = torch.tensor(
self.grouping_cfg.class_numpoint_mean, dtype=torch.float32)
for class_id in range(self.semantic_classes):
if class_id in self.grouping_cfg.ignore_classes:
continue
scores = semantic_scores[:, class_id].contiguous()
object_idxs = (scores > self.grouping_cfg.score_thr).nonzero().view(-1)
if object_idxs.size(0) < self.test_cfg.min_npoint:
continue
batch_idxs_ = batch_idxs[object_idxs]
batch_offsets_ = self.get_batch_offsets(batch_idxs_, batch_size)
coords_ = coords_float[object_idxs]
pt_offsets_ = pt_offsets[object_idxs]
idx, start_len = ballquery_batch_p(coords_ + pt_offsets_, batch_idxs_, batch_offsets_,
radius, mean_active)
proposals_idx, proposals_offset = bfs_cluster(class_numpoint_mean, idx.cpu(),
start_len.cpu(), npoint_thr, class_id)
proposals_idx[:, 1] = object_idxs[proposals_idx[:, 1].long()].int()
# merge proposals
if len(proposals_offset_list) > 0:
proposals_idx[:, 0] += sum([x.size(0) for x in proposals_offset_list]) - 1
proposals_offset += proposals_offset_list[-1][-1]
proposals_offset = proposals_offset[1:]
if proposals_idx.size(0) > 0:
proposals_idx_list.append(proposals_idx)
proposals_offset_list.append(proposals_offset)
proposals_idx = torch.cat(proposals_idx_list, dim=0)
proposals_offset = torch.cat(proposals_offset_list)
return proposals_idx, proposals_offset
def forward_instance(self, inst_feats, inst_map):
feats = self.tiny_unet(inst_feats)
feats = self.tiny_unet_outputlayer(feats)
# predict mask scores
mask_scores = self.mask_linear(feats.features)
mask_scores = mask_scores[inst_map.long()]
instance_batch_idxs = feats.indices[:, 0][inst_map.long()]
# predict instance cls and iou scores
feats = self.global_pool(feats)
cls_scores = self.cls_linear(feats)
iou_scores = self.iou_score_linear(feats)
return instance_batch_idxs, cls_scores, iou_scores, mask_scores
@force_fp32(apply_to=('semantic_scores', 'cls_scores', 'iou_scores', 'mask_scores'))
def get_instances(self, scan_id, proposals_idx, semantic_scores, cls_scores, iou_scores,
mask_scores):
num_instances = cls_scores.size(0)
num_points = semantic_scores.size(0)
cls_scores = cls_scores.softmax(1)
semantic_pred = semantic_scores.max(1)[1]
cls_pred_list, score_pred_list, mask_pred_list = [], [], []
for i in range(self.instance_classes):
if i in self.sem2ins_classes:
cls_pred = cls_scores.new_tensor([i + 1], dtype=torch.long)
score_pred = cls_scores.new_tensor([1.], dtype=torch.float32)
mask_pred = (semantic_pred == i)[None, :].int()
else:
cls_pred = cls_scores.new_full((num_instances, ), i + 1, dtype=torch.long)
cur_cls_scores = cls_scores[:, i]
cur_iou_scores = iou_scores[:, i]
cur_mask_scores = mask_scores[:, i]
score_pred = cur_cls_scores * cur_iou_scores.clamp(0, 1)
mask_pred = torch.zeros((num_instances, num_points), dtype=torch.int, device='cuda')
mask_inds = cur_mask_scores > self.test_cfg.mask_score_thr
cur_proposals_idx = proposals_idx[mask_inds].long()
mask_pred[cur_proposals_idx[:, 0], cur_proposals_idx[:, 1]] = 1
# filter low score instance
inds = cur_cls_scores > self.test_cfg.cls_score_thr
cls_pred = cls_pred[inds]
score_pred = score_pred[inds]
mask_pred = mask_pred[inds]
# filter too small instances
npoint = mask_pred.sum(1)
inds = npoint >= self.test_cfg.min_npoint
cls_pred = cls_pred[inds]
score_pred = score_pred[inds]
mask_pred = mask_pred[inds]
cls_pred_list.append(cls_pred)
score_pred_list.append(score_pred)
mask_pred_list.append(mask_pred)
cls_pred = torch.cat(cls_pred_list).cpu().numpy()
score_pred = torch.cat(score_pred_list).cpu().numpy()
mask_pred = torch.cat(mask_pred_list).cpu().numpy()
instances = []
for i in range(cls_pred.shape[0]):
pred = {}
pred['scan_id'] = scan_id
pred['label_id'] = cls_pred[i]
pred['conf'] = score_pred[i]
# rle encode mask to save memory
pred['pred_mask'] = rle_encode(mask_pred[i])
instances.append(pred)
return instances
def get_gt_instances(self, semantic_labels, instance_labels):
"""Get gt instances for evaluation."""
# convert to evaluation format 0: ignore, 1->N: valid
label_shift = self.semantic_classes - self.instance_classes
semantic_labels = semantic_labels - label_shift + 1
semantic_labels[semantic_labels < 0] = 0
instance_labels += 1
ignore_inds = instance_labels < 0
# scannet encoding rule
gt_ins = semantic_labels * 1000 + instance_labels
gt_ins[ignore_inds] = 0
gt_ins = gt_ins.cpu().numpy()
return gt_ins
@force_fp32(apply_to='feats')
def clusters_voxelization(self,
clusters_idx,
clusters_offset,
feats,
coords,
scale,
spatial_shape,
rand_quantize=False):
batch_idx = clusters_idx[:, 0].cuda().long()
c_idxs = clusters_idx[:, 1].cuda()
feats = feats[c_idxs.long()]
coords = coords[c_idxs.long()]
coords_min = sec_min(coords, clusters_offset.cuda())
coords_max = sec_max(coords, clusters_offset.cuda())
# 0.01 to ensure voxel_coords < spatial_shape
clusters_scale = 1 / ((coords_max - coords_min) / spatial_shape).max(1)[0] - 0.01
clusters_scale = torch.clamp(clusters_scale, min=None, max=scale)
coords_min = coords_min * clusters_scale[:, None]
coords_max = coords_max * clusters_scale[:, None]
clusters_scale = clusters_scale[batch_idx]
coords = coords * clusters_scale[:, None]
if rand_quantize:
# after this, coords.long() will have some randomness
range = coords_max - coords_min
coords_min -= torch.clamp(spatial_shape - range - 0.001, min=0) * torch.rand(3).cuda()
coords_min -= torch.clamp(spatial_shape - range + 0.001, max=0) * torch.rand(3).cuda()
coords_min = coords_min[batch_idx]
coords -= coords_min
assert coords.shape.numel() == ((coords >= 0) * (coords < spatial_shape)).sum()
coords = coords.long()
coords = torch.cat([clusters_idx[:, 0].view(-1, 1).long(), coords.cpu()], 1)
out_coords, inp_map, out_map = voxelization_idx(coords, int(clusters_idx[-1, 0]) + 1)
out_feats = voxelization(feats, out_map.cuda())
spatial_shape = [spatial_shape] * 3
voxelization_feats = spconv.SparseConvTensor(out_feats,
out_coords.int().cuda(), spatial_shape,
int(clusters_idx[-1, 0]) + 1)
return voxelization_feats, inp_map
def get_batch_offsets(self, batch_idxs, bs):
batch_offsets = torch.zeros(bs + 1).int().cuda()
for i in range(bs):
batch_offsets[i + 1] = batch_offsets[i] + (batch_idxs == i).sum()
assert batch_offsets[-1] == batch_idxs.shape[0]
return batch_offsets
@force_fp32(apply_to=('x'))
def global_pool(self, x, expand=False):
indices = x.indices[:, 0]
batch_counts = torch.bincount(indices)
batch_offset = torch.cumsum(batch_counts, dim=0)
pad = batch_offset.new_full((1, ), 0)
batch_offset = torch.cat([pad, batch_offset]).int()
x_pool = global_avg_pool(x.features, batch_offset)
if not expand:
return x_pool
x_pool_expand = x_pool[indices.long()]
x.features = torch.cat((x.features, x_pool_expand), dim=1)
return x
|
11553296
|
import torch
import numpy as np
from mmdet.core import bbox2result
from mmdet.models.builder import DETECTORS
from ...core.utils import flip_tensor
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def imshow_gpu_tensor(self, tensor):#调试中显示表标签图
from PIL import Image
from torchvision import transforms
device=tensor[0].device
mean= torch.tensor([123.675, 116.28, 103.53])
std= torch.tensor([58.395, 57.12, 57.375])
mean=mean.to(device)
std=std.to(device)
tensor = (tensor[0].squeeze() * std[:,None,None]) + mean[:,None,None]
tensor=tensor[0:1]
if len(tensor.shape)==4:
image = tensor.permute(0,2, 3,1).cpu().clone().numpy()
else:
image = tensor.permute(1, 2,0).cpu().clone().numpy()
image = image.astype(np.uint8).squeeze()
image = transforms.ToPILImage()(image)
image = image.resize((256, 256),Image.ANTIALIAS)
image.show(image)
# image.save('./img.jpg')
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
# self.imshow_gpu_tensor(img)
return x
def merge_aug_results(self, aug_results, with_nms):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
with_nms (bool): If True, do nms before return boxes.
Returns:
tuple: (out_bboxes, out_labels)
"""
recovered_bboxes, aug_labels = [], []
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=True):
"""Augment testing of CenterNet. Aug test must have flipped image pair,
and unlike CornerNet, it will perform an averaging operation on the
feature map instead of detecting bbox.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: True.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
# Feature map averaging
center_heatmap_preds[0] = (
center_heatmap_preds[0][0:1] +
flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2
wh_preds[0] = (wh_preds[0][0:1] +
flip_tensor(wh_preds[0][1:2], flip_direction)) / 2
bbox_list = self.bbox_head.get_bboxes(
center_heatmap_preds,
wh_preds, [offset_preds[0][0:1]],
img_metas[ind],
rescale=rescale,
with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if nms_cfg is None:
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
|
11553302
|
import hashlib
import logging
import subprocess
import tarfile # For sending through a channel.
from typing import List
import gevent
import pytest
import os
from dateutil.parser import parse as dateparse
from volttron.platform.messaging.health import STATUS_GOOD, STATUS_BAD, \
STATUS_UNKNOWN
from volttron.platform.vip.agent import Agent, RPC
from volttron.platform.vip.agent.subsystems.query import Query
from volttron.platform import jsonapi
from volttrontesting.utils.platformwrapper import PlatformWrapper
logging.basicConfig(level=logging.DEBUG)
_log = logging.getLogger(__name__)
class ChannelSender(Agent):
def __init__(self, **kwargs):
super(ChannelSender, self).__init__(**kwargs)
self.sent_data = None
self.responses = []
self.reciever_file_path = "/tmp/sentfile.tar"
def send_file(self, to_peer, file_to_send):
_log.debug(f"Sending file to peer {to_peer}")
channel_name = "sending_file_channel"
channel = self.vip.channel(to_peer, channel_name)
_log.debug("Calling setup_send_file on receiver.")
self.vip.rpc.call(to_peer, "setup_send_file", channel_name)
gevent.sleep(0.5)
_log.debug("After calling rpc method!")
sha512 = hashlib.sha512()
with open(file_to_send, "rb") as infile:
first = True
while True:
with gevent.Timeout(120):
_log.debug("Attempting to read from channel")
# Protocol should be either a fetch or checksum
my_data = channel.recv()
op, size = jsonapi.loadb(my_data)
# op, size = channel.recv_multipart()
#_log.debug(f"Op size is {op} {size}")
if first:
first = False
if op != 'fetch':
channel.close(linger=0)
del channel
raise ValueError("Invalid protocol detected should be [b'fetch', size] where size is the amount of data to retrieve.")
if op == 'fetch':
chunk = infile.read(size)
if chunk:
sha512.update(chunk)
# _log.debug(f"Sending chunk: {chunk}")
channel.send(chunk)
else:
channel.send(b'complete')
break
elif op == 'checksum':
_log.debug(f"Sending checksum: {sha512.hexdigest()}")
channel.send(sha512.hexdigest().encode('utf-8'))
_log.debug("Complete sending of file. Closing channel.")
gevent.sleep(0.5)
channel.close(linger=0)
del channel
def do_send(self, peer, channel_name, data):
_log.debug(f"Creating connection to {peer} using channel {channel_name}")
# First create a channel and verify the connection
channel = self.vip.channel.create(peer, channel_name)
# Note no get at the end, because the greenlet should continue for
# a while.
_log.debug(f"Calling setup_channel on peer: {peer}")
self.vip.rpc.call(peer, 'setup_channel', channel_name)
with gevent.Timeout(10):
resp = channel.recv()
_log.debug(f"Sender got initial {resp}")
self.responses.append(resp)
# bytes are required to send across the zmq message bus.
channel.send(data)
with gevent.Timeout(10):
resp = channel.recv()
self.responses.append(resp)
channel.close(linger=0)
del channel
class ChannelReceiver(Agent):
def __init__(self, **kwargs):
super(ChannelReceiver, self).__init__(**kwargs)
self.the_channel = None
self.the_data = None
self.receiver_file_path = "/tmp/myreceived.tar"
@RPC.export
def setup_send_file(self, channel_name):
_log.debug("Setup send file executed!")
BYTES_REQUESTED = 1024
peer = self.vip.rpc.context.vip_message.peer
_log.debug(f"Creating channel to peer {peer} named: {channel_name}")
channel = self.vip.channel(peer, channel_name)
_log.debug("Sending data back to peer contact.")
make_fetch_request = jsonapi.dumpb(['fetch', BYTES_REQUESTED])
make_checksum_request = jsonapi.dumpb(['checksum', ''])
# channel.send(data_str) # .send_multipart(serialize_frames(['fetch', BYTES_REQUESTED]))
# # channel.send(BYTES_REQUESTED)
# data = channel.recv()
# _log.debug(f"data received {len(data)}")
with open(self.receiver_file_path, "wb") as fout:
sha512 = hashlib.sha512()
while True:
_log.debug("Receiver sending fetch")
channel.send(make_fetch_request)
# chunk binary representation of the bytes read from
# the other side of the connectoin
chunk = channel.recv()
if chunk == b'complete':
_log.debug("Completed file")
break
_log.debug("Receiver sending checksum")
channel.send(make_checksum_request)
checksum = channel.recv()
_log.debug(f"The checksum returned was: {checksum}")
sha512.update(chunk)
_log.debug(f"Received checksum: {checksum}")
_log.debug(f"Expected checksum: {sha512.hexdigest()}")
assert checksum.decode('utf-8') == sha512.hexdigest(), "Invalid checksum detected."
fout.write(chunk)
_log.debug("File completed!")
channel.close(linger=0)
del channel
@RPC.export
def setup_channel(self, channel_name):
"""
Start the processing of data coming through the channel. For this
test the sender will send data when we write to them the word
send_it
"""
peer = self.vip.rpc.context.vip_message.peer
_log.debug(f"Creating channel to peer {peer} named: {channel_name}")
channel = self.vip.channel(peer, channel_name)
gevent.sleep(0.1)
_log.debug("Sending data back to peer contact.")
channel.send(b'send_it')
# channel.send('send_it')
while True:
with gevent.Timeout(10):
self.the_data = channel.recv()
if self.the_data:
_log.debug(f"Receiver got the data {self.the_data}".encode('utf-8'))
channel.send(f"got {self.the_data}".encode('utf-8'))
gevent.sleep(0.1)
break
channel.close(linger=0)
del channel
@pytest.mark.agent
def test_channel_send_data(volttron_instance: PlatformWrapper):
if not volttron_instance.messagebus == "zmq":
pytest.skip("Channel only available for zmq message bus")
return
data = "x" * 50
sender = volttron_instance.build_agent(agent_class=ChannelSender,
identity="sender_agent", enable_channel=True)
receiver = volttron_instance.build_agent(agent_class=ChannelReceiver,
identity="receiver_agent", enable_channel=True)
sender.do_send(peer=receiver.core.identity, data=data.encode('utf-8'), channel_name="foo_data")
assert sender.responses
assert receiver.the_data
assert receiver.the_data == data.encode('utf-8')
sender.core.stop()
receiver.core.stop()
@pytest.mark.agent
def test_channel_send_file(volttron_instance: PlatformWrapper):
if not volttron_instance.messagebus == "zmq":
pytest.skip("Channel only available for zmq message bus")
return
# Create
with tarfile.open("/tmp/tmptar.tar", mode="w") as tf:
for x in range(1, 50):
with open(f"/tmp/data{x}", "w") as fin:
fin.write("x" * 50)
tf.add(f"/tmp/data{x}")
os.remove(f"/tmp/data{x}")
sender = volttron_instance.build_agent(agent_class=ChannelSender,
identity="sender_agent", enable_channel=True)
receiver = volttron_instance.build_agent(agent_class=ChannelReceiver,
identity="receiver_agent", enable_channel=True)
if os.path.exists(receiver.receiver_file_path):
os.remove(receiver.receiver_file_path)
sender.send_file(receiver.core.identity, "/tmp/tmptar.tar")
assert os.path.isfile(receiver.receiver_file_path), f"Couldn't find file {receiver.receiver_file_path}"
assert hashlib.sha256(open("/tmp/tmptar.tar", 'rb').read()).hexdigest() == hashlib.sha256(open(receiver.receiver_file_path, 'rb').read()).hexdigest()
sender.core.stop()
receiver.core.stop()
@pytest.mark.agent
def test_agent_can_get_platform_version(volttron_instance):
agent = volttron_instance.build_agent()
query = Query(agent.core)
response = subprocess.check_output(['volttron', "--version"],
stderr=subprocess.STDOUT, universal_newlines=True)
assert response.strip()
_, version = response.strip().split(" ")
platform_version = query.query("platform-version").get(timeout=2)
assert str(version) == str(platform_version)
@pytest.mark.agent
def test_agent_status_set_when_created(volttron_instance):
agent = volttron_instance.build_agent()
assert agent.vip.health.get_status() is not None
assert isinstance(agent.vip.health.get_status(), dict)
l = agent.vip.health.get_status()
assert l['status'] == STATUS_GOOD
assert l['context'] is None
assert isinstance(agent.vip.health.get_status_json(), str)
l = jsonapi.loads(agent.vip.health.get_status_json())
assert l['status'] == STATUS_GOOD
assert l['context'] is None
assert agent.vip.health.get_status_value() == STATUS_GOOD
@pytest.mark.agent
def test_agent_status_changes(volttron_instance):
unknown_message = "This is unknown"
bad_message = "Bad kitty"
agent = volttron_instance.build_agent()
agent.vip.health.set_status(STATUS_UNKNOWN, unknown_message)
r = agent.vip.health.get_status()
assert unknown_message == r['context']
assert STATUS_UNKNOWN == r['status']
agent.vip.health.set_status(STATUS_BAD, bad_message)
r = agent.vip.health.get_status()
assert bad_message == r['context']
assert STATUS_BAD == r['status']
@pytest.mark.agent
def test_agent_health_last_update_increases(volttron_instance):
agent = volttron_instance.build_agent()
s = agent.vip.health.get_status()
dt = dateparse(s['last_updated'], fuzzy=True)
agent.vip.health.set_status(STATUS_UNKNOWN, 'Unknown now!')
gevent.sleep(1)
s = agent.vip.health.get_status()
dt2 = dateparse(s['last_updated'], fuzzy=True)
assert dt < dt2
|
11553333
|
import numpy as np
from SALib.sample import sobol_sequence
from ..src.doe import DOE
from ..src.data import *
import pandas as pd
class doe_SALib(DOE):
"""
DOE-Module wrap for SALib
"""
def __init__(self, num, variable, method='sobol'):
""" Initialize """
self.__name__ = "SALib" # Name module
super().__init__(num,variable) # Initialize base-class
self.method = method # Initialize method of samping
func = getattr(self,method) # Select your method of sampling
self.DATA(func(),self.keys) # Run your method of sampling
def sobol(self):
""" Method: Sobol sequence generator """
points = sobol_sequence.sample(self.num,self.dim) # Create [0,1] self.dim-dimnesional hypercube
for i, bound in enumerate(self.variable.values()): # Stretch the hypercube towards your bounds
points[:,i] = points[:,i] * (bound[1] - bound[0]) + bound[0]
return points
|
11553342
|
from __future__ import annotations
from jsonclasses import jsonclass, types
@jsonclass
class LinkedOwner:
id: str = types.str.primary.required
permissions: list[LinkedPermission] = types.listof('LinkedPermission').linkedby('owner')
@jsonclass(
can_read=types.getop.isobj(types.this.fval('owner'))
)
class LinkedPermission:
id: str = types.str.primary.required
owner: LinkedOwner = types.objof('LinkedOwner').linkto.required
|
11553375
|
from PikaObj import *
class List(TinyObj):
def __init__():
pass
# add an arg after the end of list
def append(arg: any):
pass
# get an arg by the index
def get(i: int) -> any:
pass
# set an arg by the index
def set(i: int, arg: any):
pass
# get the length of list
def len() -> int:
pass
# support for loop
def __iter__() -> any:
pass
# support for loop
def __next__() -> any:
pass
# support list[] = val
def __set__():
pass
# support val = list[]
def __get__() -> any:
pass
class Dict(TinyObj):
def __init__():
pass
# get an arg by the key
def get(key: str) -> any:
pass
# set an arg by the key
def set(key: str, arg: any):
pass
# remove an arg by the key
def remove(key: str):
pass
def __iter__() -> any:
pass
def __next__() -> any:
pass
# support dict[] = val
def __set__():
pass
# support val = dict[]
def __get__() -> any:
pass
class String(TinyObj):
def set(s:str):
pass
def get()->str:
pass
def __iter__() -> any:
pass
def __next__() -> any:
pass
# support string[] = val
def __set__():
pass
# support val = string[]
def __get__() -> any:
pass
|
11553379
|
from bitmovin.resources.models import H264CodecConfiguration
from ..rest_service import RestService
class H264(RestService):
BASE_ENDPOINT_URL = 'encoding/configurations/video/h264'
def __init__(self, http_client):
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=H264CodecConfiguration)
|
11553393
|
import h5py
import json
import os.path
import numpy as np
import torch
import torch.nn.init
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
class sclevrDataset(Dataset):
"""clevr dataset."""
def __init__(self, opt, mode = 'train'):
"""
Args:
qa_dir (string): Path to the h5 file with annotations.
img_dir (string): Path to the h5 file with image features
mode (string): Mode of train or test
"""
self.qa_dir = opt.qa_dir
self.imgFolder = self.qa_dir + '/sclevr/images/'
# qa h5
if mode == 'train':
file = h5py.File(os.path.join(self.qa_dir, 'annotation_sclevr_train.h5'), 'r')
self.qas = {}
self.qas['question'] = torch.from_numpy(np.int64(file['/ques_train'][:]))
self.qas['question_id'] = torch.from_numpy(np.int64(file['/question_id_train'][:]))
self.qas['img_id'] = torch.from_numpy(np.int32(file['/img_id_train'][:]))
self.qas['answers'] = file['/answers'][:]
file.close()
self.types = json.load(open(os.path.join(self.qa_dir, 'sclevr_train_type.json'), 'r'))
self.img_info = json.load(open(os.path.join(self.qa_dir, 'sclevr_train_info.json'), 'r'))
else:
file = h5py.File(os.path.join(self.qa_dir, 'annotation_sclevr_test.h5'), 'r')
self.qas = {}
self.qas['question'] = torch.from_numpy(np.int64(file['/ques_test'][:]))
self.qas['question_id'] = torch.from_numpy(np.int64(file['/question_id_test'][:]))
self.qas['img_id'] = torch.from_numpy(np.int32(file['/img_id_test'][:]))
self.qas['answers'] = file['/answers'][:]
file.close()
self.types = json.load(open(os.path.join(self.qa_dir, 'sclevr_test_type.json'), 'r'))
self.img_info = json.load(open(os.path.join(self.qa_dir, 'sclevr_test_info.json'), 'r'))
# train_test json
vocab = json.load(open(os.path.join(self.qa_dir, 'Vocab.json'), 'r'))
ansVocab = json.load(open(os.path.join(self.qa_dir, 'AnsVocab.json'), 'r'))
self.opt = {
'vocab_size' : len(vocab) , \
'out_vocab_size' : len(ansVocab), \
'sent_len' : self.qas['question'].size(1)
}
self.mode = mode
self.preprocess = transforms.Compose([
transforms.Resize((128,128)),
transforms.ToTensor()
])
print(' * sclevr-%s loaded' % mode)
def __len__(self):
return self.qas['question'].size(0)
def __getitem__(self, idx):
img_id = self.qas['img_id'][idx]
answer = self.qas['answers'][idx][0] - 1
answer = answer.item()
qid = self.qas['question_id'][idx]
def id2imgName(img_id, qid):
if self.mode == 'train': return self.imgFolder+'/train/%d.png' % img_id
else: return self.imgFolder + '/test/%d.png' % img_id
def load_image(img_name):
img_tensor = self.preprocess(Image.open(img_name).convert('RGB'))
return img_tensor
img_name = id2imgName(img_id, qid)
img = load_image(img_name)
img_info = self.img_info[img_id]
return {
'question' : self.qas['question'][idx], \
'qid' : qid , \
'answer' : answer , \
'image' : load_image(img_name) , \
'img_name' : id2imgName(img_id, qid)
}
|
11553399
|
from .random_erasing import RandomErasing
from .pad_crop import padcrop
from .autoaug import ImageNetPolicy
from .augmix import AugMix
from .half_crop import HalfCrop
import torchvision.transforms as transforms
__transforms_factory_before = {
'autoaug': ImageNetPolicy(prob=0.5),
'randomflip': transforms.RandomHorizontalFlip(p=0.5),
'padcrop': padcrop,
'colorjitor': transforms.ColorJitter(brightness=0.25, contrast=0.15, saturation=0.25, hue=0),
'augmix': AugMix(prob=0.5),
'halfcrop': HalfCrop(prob=0.5, keep_range=[0.5, 1.5])
}
__transforms_factory_after = {
'rea': RandomErasing(probability=0.5)
}
__KWARGS = ['total_epochs', 'mean', 'std']
def build_transforms(img_size, transforms_list, **kwargs):
for transform in transforms_list:
assert transform in __transforms_factory_before.keys() or transform in __transforms_factory_after.keys(), \
'Expect transforms in {} and {}, got {}'.format(__transforms_factory_before.keys(), __transforms_factory_after.keys(), transform)
# for key in kwargs.keys():
# assert key in __KWARGS, 'expect parameter in {} but got {}'.format(__KWARGS, key)
results = [transforms.Resize(img_size, interpolation=3)]
for transform in transforms_list:
if transform in __transforms_factory_before.keys():
if transform == 'padcrop':
results.append(__transforms_factory_before[transform](img_size))
else:
results.append(__transforms_factory_before[transform])
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if 'mean' in kwargs.keys():
mean = kwargs['mean']
print('set mean {}'.format(mean))
if 'std' in kwargs.keys():
std = kwargs['std']
print('set std {}'.format(std))
results.extend(# totensor --> normalize
[transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
for transform in transforms_list:
if transform in __transforms_factory_after.keys():
results.append(__transforms_factory_after[transform])
return transforms.Compose(results)
|
11553436
|
import pandas as pd
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def sentiment_scores(X):
"""
Calculate sentiment indicators from `TextBlob <https://textblob.readthedocs.io/en/dev/>`_ (polarity and
subjectivity) and `vaderSentiment <https://pypi.org/project/vaderSentiment/>`_ (positive, negative and neutral
sentiments and compound score).
:param X: A dictionary, ``pandas.DataFrame``, tuple or list with the text strings. If it is a dictionary
(``pandas.DataFrame``), it must have a single key (column).
:return: A ``pandas.DataFrame`` with the sentiment scores for each text record. Shape [n_samples, 6].
"""
vader_analyser = SentimentIntensityAnalyzer()
X = pd.DataFrame(X).copy().rename(lambda x: 'predictor', axis='columns')
text_blob_scores = []
vader_scores = []
for i in X.index:
text = X.loc[i, 'predictor']
if text is None or str(text) == 'nan':
text = ''
text_blob_scores.append(TextBlob(text).sentiment)
vader_scores.append(vader_analyser.polarity_scores(text))
text_blob_scores_df = pd.DataFrame(text_blob_scores)
text_blob_scores_df.columns = 'text_blob_' + text_blob_scores_df.columns
text_blob_scores_df.index = X.index
vader_scores_df = pd.DataFrame.from_dict(vader_scores)
vader_scores_df.columns = 'vader_' + vader_scores_df.columns
vader_scores_df.index = X.index
all_scores = pd.concat([text_blob_scores_df, vader_scores_df], axis=1, ignore_index=False)
return all_scores
|
11553447
|
from datasets import fashion200k
from datasets import fashion_iq
from datasets import shoes
from datasets import vocabulary
import numpy as np
import tensorflow as tf
# python read_glove.py --dataset='fashion_iq' --data_path='datasets/fashion_iq/image_data'
# python read_glove.py --dataset='shoes' --data_path=''
# python read_glove.py --dataset='fashion200k' --data_path='datasets/fashion200k'
tf.app.flags.DEFINE_string(
'glove_size', "42B")
tf.app.flags.DEFINE_string(
'dataset', "fashion200k")
tf.app.flags.DEFINE_string(
'data_path', None, 'path of dataset.')
tf.app.flags.DEFINE_string(
'data_split', "train", 'either "train" or "test".')
tf.app.flags.DEFINE_string(
'subset', None, 'can be "dress" or "shirt" or "toptee".')
tf.app.flags.DEFINE_boolean(
'remove_rare_words', False, 'whether to remove the rare words.')
FLAGS = tf.app.flags.FLAGS
########### read dataset
print("Construct dataset")
if FLAGS.dataset == "fashion200k":
trainset = fashion200k.fashion200k(path=FLAGS.data_path, split=FLAGS.data_split)
elif FLAGS.dataset == "fashion_iq":
trainset = fashion_iq.fashion_iq(path=FLAGS.data_path, split=FLAGS.data_split, subset=FLAGS.subset)
elif FLAGS.dataset == "shoes":
trainset = shoes.shoes(path=FLAGS.data_path, split=FLAGS.data_split)
else:
raise ValueError("dataset is unknown.")
num_images = len(trainset.filenames)
### initialize the relations between source and target
if FLAGS.dataset == "fashion_iq":
trainset.generate_queries_(subset=FLAGS.subset)
all_texts = trainset.get_all_texts(subset=FLAGS.subset)
elif FLAGS.dataset == "shoes":
trainset.generate_queries_()
all_texts = trainset.get_all_texts()
elif FLAGS.dataset == "fashion200k":
### initialize the relations between source and target
trainset.caption_index_init_()
all_texts = trainset.get_all_texts()
else:
raise ValueError("dataset is unknown.")
num_modif = trainset.num_modifiable_imgs
vocab = vocabulary.SimpleVocab()
for text in all_texts:
vocab.add_text_to_vocab(text)
if FLAGS.remove_rare_words:
print('Remove rare words')
vocab.threshold_rare_words()
vocab_size = vocab.get_size()
print("Number of samples = {}. Number of words = {}.".format(num_modif, vocab_size))
########### read glove
filename = "glove/glove." + FLAGS.glove_size + ".300d.txt"
glove_vocab = []
glove_embed = []
embedding_dict = {}
file = open(filename, 'r', encoding='UTF-8')
for line in file.readlines():
row = line.strip().split(' ')
vocab_word = row[0]
glove_vocab.append(vocab_word)
embed_vector = [float(i) for i in row[1:]] # convert to list of float
embedding_dict[vocab_word] = embed_vector
glove_embed.append(embed_vector)
glove_vectors = np.zeros((len(vocab.word2id), len(embedding_dict['the'])))
print(glove_vectors.shape)
all_words = list(vocab.word2id.keys())
glove_all_words = list(embedding_dict.keys())
dim = len(embedding_dict['the'])
mu, sigma = 0, 0.09 # mean and standard deviation
count = 0
for i in range(len(vocab.word2id)):
word = all_words[i]
idx = vocab.word2id[word]
if word not in glove_all_words:
count = count + 1
vec = np.random.normal(mu, sigma, dim)
else:
vec = np.asarray(embedding_dict[word])
glove_vectors[idx,:] = vec
filename = 'glove/' + FLAGS.dataset + '.' + FLAGS.glove_size + '.300d.npy'
print(count)
print(filename)
np.save(filename, glove_vectors)
print('Loaded GLOVE')
file.close()
|
11553450
|
from django.contrib.syndication.views import Feed
from django.utils import timezone
class LatestEntriesFeed(Feed):
def items(self):
now = timezone.now()
NewsItem = self.news_index.get_newsitem_model()
newsitem_list = NewsItem.objects.live().order_by('-date').filter(
newsindex=self.news_index, date__lte=now)[:20]
return newsitem_list
def item_link(self, item):
return item.full_url
def item_guid(self, item):
return item.full_url
item_guid_is_permalink = True
def item_pubdate(self, item):
return item.date
def __init__(self, news_index):
super(LatestEntriesFeed, self).__init__()
self.news_index = news_index
self.title = news_index.title
self.description = news_index.title
self.link = news_index.full_url
self.feed_url = self.link + news_index.reverse_subpage('feed')
|
11553451
|
import unittest
import os
import fudge
from fudge.inspector import arg
from fabric.contrib import project
class UploadProjectTestCase(unittest.TestCase):
"""Test case for :func: `fabric.contrib.project.upload_project`."""
fake_tmp = "testtempfolder"
def setUp(self):
fudge.clear_expectations()
# We need to mock out run, local, and put
self.fake_run = fudge.Fake('project.run', callable=True)
self.patched_run = fudge.patch_object(
project,
'run',
self.fake_run
)
self.fake_local = fudge.Fake('local', callable=True)
self.patched_local = fudge.patch_object(
project,
'local',
self.fake_local
)
self.fake_put = fudge.Fake('put', callable=True)
self.patched_put = fudge.patch_object(
project,
'put',
self.fake_put
)
# We don't want to create temp folders
self.fake_mkdtemp = fudge.Fake(
'mkdtemp',
expect_call=True
).returns(self.fake_tmp)
self.patched_mkdtemp = fudge.patch_object(
project,
'mkdtemp',
self.fake_mkdtemp
)
def tearDown(self):
self.patched_run.restore()
self.patched_local.restore()
self.patched_put.restore()
fudge.clear_expectations()
@fudge.with_fakes
def test_temp_folder_is_used(self):
"""A unique temp folder is used for creating the archive to upload."""
# Exercise
project.upload_project()
@fudge.with_fakes
def test_project_is_archived_locally(self):
"""The project should be archived locally before being uploaded."""
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(arg.startswith("tar -czf")).next_call()
# Exercise
project.upload_project()
@fudge.with_fakes
def test_current_directory_is_uploaded_by_default(self):
"""By default the project uploaded is the current working directory."""
cwd_path, cwd_name = os.path.split(os.getcwd())
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C %s %s" % (cwd_path, cwd_name))
).next_call()
# Exercise
project.upload_project()
@fudge.with_fakes
def test_path_to_local_project_can_be_specified(self):
"""It should be possible to specify which local folder to upload."""
project_path = "path/to/my/project"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C path/to/my project")
).next_call()
# Exercise
project.upload_project(local_dir=project_path)
@fudge.with_fakes
def test_path_to_local_project_no_separator(self):
"""Local folder can have no path separator (in current directory)."""
project_path = "testpath"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C . testpath")
).next_call()
# Exercise
project.upload_project(local_dir=project_path)
@fudge.with_fakes
def test_path_to_local_project_can_end_in_separator(self):
"""A local path ending in a separator should be handled correctly."""
project_path = "path/to/my"
base = "project"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_local.with_args(
arg.endswith("-C %s %s" % (project_path, base))
).next_call()
# Exercise
project.upload_project(local_dir="%s/%s/" % (project_path, base))
@fudge.with_fakes
def test_default_remote_folder_is_home(self):
"""Project is uploaded to remote home by default."""
local_dir = "folder"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_put.with_args(
"%s/folder.tar.gz" % self.fake_tmp, "folder.tar.gz", use_sudo=False
).next_call()
# Exercise
project.upload_project(local_dir=local_dir)
@fudge.with_fakes
def test_path_to_remote_folder_can_be_specified(self):
"""It should be possible to specify which local folder to upload to."""
local_dir = "folder"
remote_path = "path/to/remote/folder"
# local() is called more than once so we need an extra next_call()
# otherwise fudge compares the args to the last call to local()
self.fake_put.with_args(
"%s/folder.tar.gz" % self.fake_tmp, "%s/folder.tar.gz" % remote_path, use_sudo=False
).next_call()
# Exercise
project.upload_project(local_dir=local_dir, remote_dir=remote_path)
|
11553466
|
from typing import List, Optional
import torch
import torch.nn as nn
from torecsys.layers import FMLayer, DNNLayer
from torecsys.models.ctr import CtrBaseModel
class FactorizationMachineSupportedNeuralNetworkModel(CtrBaseModel):
r"""
Model class of Factorization Machine supported Neural Network (FMNN).
Factorization Machine supported Neural Network is a stack of Factorization Machine and Dense Network, with the
following calculation:
#. Calculate features interactions by factorization machine:
:math:`y_{FM} = \text{Sigmoid} ( w_{0} + \sum_{i=1}^{N} w_{i} x_{i} + \sum_{i=1}^{N} \sum_{j=i+1}^{N}
<v_{i}, v_{j}> x_{i} x_{j} )`
#. Feed interactions' representation to dense network:
:math:`y_{i} = \text{Activation} ( w_{i} y_{i - 1} + b_{i} )`, where
:math:`y_{0} = y_{FM}` for the inputs of the first layer in dense network.
:Reference:
#. `<NAME> et al, 2016. Deep Learning over Multi-field Categorical Data: A Case Study on User Response
Prediction <https://arxiv.org/abs/1601.02376>`_.
"""
def __init__(self,
embed_size: int,
num_fields: int,
deep_output_size: int,
deep_layer_sizes: List[int],
fm_dropout_p: Optional[float] = 0.0,
deep_dropout_p: Optional[List[float]] = None,
deep_activation: Optional[nn.Module] = nn.ReLU()):
"""
Initialize FactorizationMachineSupportedNeuralNetworkModel
Args:
embed_size (int): size of embedding tensor
num_fields (int): number of inputs' fields
deep_output_size (int): output size of dense network
deep_layer_sizes (List[int]): layer sizes of dense network
fm_dropout_p (float, optional): probability of Dropout in FM. Defaults to 0.0
deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
"""
super().__init__()
self.fm = FMLayer(fm_dropout_p)
cat_size = num_fields + embed_size
self.deep = DNNLayer(
inputs_size=cat_size,
output_size=deep_output_size,
layer_sizes=deep_layer_sizes,
dropout_p=deep_dropout_p,
activation=deep_activation
)
def forward(self, feat_inputs: torch.Tensor, emb_inputs: torch.Tensor) -> torch.Tensor:
"""
Forward calculation of FactorizationMachineSupportedNeuralNetworkModel
Args:
feat_inputs (T), shape = (B, N, 1), data_type = torch.float: linear Features tensors
emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors
Returns:
T, shape = (B, O), data_type = torch.float: output of FactorizationMachineSupportedNeuralNetworkModel
"""
# Name the feat_inputs tensor for flatten
feat_inputs.names = ('B', 'N', 'E',)
# squeeze feat_inputs to shape = (B, N)
if feat_inputs.dim() == 2:
fm_first = feat_inputs
fm_first.names = ('B', 'O',)
elif feat_inputs.dim() == 3:
# reshape feat_inputs from (B, N, 1) to (B, O = N)
fm_first = feat_inputs.flatten(('N', 'E',), 'O')
else:
raise ValueError('Dimension of feat_inputs can only be 2 or 3')
# pass to fm layer where its returns' shape = (B, O = E)
fm_second = self.fm(emb_inputs)
# concat into a tensor with shape = (B, O = N + E)
fm_out = torch.cat([fm_first, fm_second], dim='O')
# feed-forward to deep neural network, return shape = (B, O)
outputs = self.deep(fm_out)
# since autograd does not support Named Tensor at this stage, drop the name of output tensor.
outputs = outputs.rename(None)
return outputs
|
11553484
|
import time
import json
from uuid import uuid4
from enum import Enum
from threading import Thread
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
from clouds.models import StaticModel, Image, Instance, Mount, INSTANCE_STATUS, INSTANCE_OPERATION, OPERATION_STATUS, InstanceBlueprint, InstanceOperation, OperatableMixin, OperationModel, Group, GroupOperation
from django.utils.functional import cached_property
from django.db import transaction
from clouds.base.models import M2MOperatableMixin, M2MOperationModel
from engines.drivers.ambari import Driver
import pkgutil
from django.conf import settings
class COMPONENT_STATUS(Enum):
null=0 #unknown
active=1
block=2
suspend=3
stop=4
breakdown=5
pause=6
instance_lost=7
class COMPONENT_TYPE(Enum):
master="master"
slave="slave"
client="client"
class COMPONENT_OPERATION(Enum):
start="start"
stop="stop"
# restart="restart"
class Engine(StaticModel):#TODO make Engine customizable in the ui
uuid=models.UUIDField(auto_created=True, default=uuid4, editable=False)
name=models.CharField(max_length=100,unique=True)
description=models.TextField(max_length=5120,blank=True,default='')
public=models.BooleanField(default=True, editable=False)
def get_host(self, cluster):
return cluster.get_instances().first()
# hostname=self.stack_set.first().driver.get_engine_host(cluster.portal, self.name)
# return cluster.find_instance(hostname)
import importlib
class Stack(StaticModel):
name=models.CharField(max_length=100,unique=True,verbose_name='version')
engines=models.ManyToManyField(Engine,blank=True)
meta=models.TextField(max_length=5120,default="",blank=True,verbose_name='meta data in json format')
public=models.BooleanField(default=True, editable=False)
@cached_property
def meta_data(self):
return json.loads(self.meta)
class Scale(StaticModel):
init_blueprints=models.ManyToManyField(InstanceBlueprint,related_name="initialized_scales",verbose_name='initial blueprints')
step_blueprints=models.ManyToManyField(InstanceBlueprint,related_name="stepped_scales",blank=True,verbose_name='scale-out blueprints')
stack=models.ForeignKey(Stack,on_delete=models.PROTECT)
_remedy_script=models.TextField(max_length=5120,default="",blank=True,verbose_name='initial remedy script')
_remedy_script_scale_out=models.TextField(max_length=5120,default="",blank=True,verbose_name='scale-out remedy script')
_remedy_script_scale_in=models.TextField(max_length=5120,default="",blank=True,verbose_name='scale-in remedy script')
auto=models.BooleanField(default=False)
def __str__(self):
return "{}/{}".format(
self.name,
'auto' if self.auto else 'manual',
)
@cached_property
def remedy_script_scale_out(self):
return "###scale-out remedy {}: {}###\n{}\n".format(
self._meta.verbose_name,
self.name,
self._remedy_script_scale_out
) if self._remedy_script_scale_out else ""
@cached_property
def remedy_script_scale_in(self):
return "###scale-in remedy {}: {}###\n{}\n".format(
self._meta.verbose_name,
self.name,
self._remedy_script_scale_in
) if self._remedy_script_scale_in else ""
@cached_property
def init_size(self):
q=0
for ib in self.init_blueprints.all():
q+=ib.quantity
return q
@cached_property
def step_size(self):
q=0
for ib in self.step_blueprints.all():
q+=ib.quantity
return q
@cached_property
def available_engines(self):
return self.stack.engines.all()
def scale(self, owner, current_step=0, remark=None):
step=Group(owner=owner)
step.save()
if not current_step:#init
if not remark: remark='initialized from scale: {}'.format(self.name)
step.remark=remark
for ib in self.init_blueprints.all():
inss=ib.launch(owner=owner, remark=remark)
step.instances.add(*inss)
else:
if not remark: remark='scaled from scale: {}'.format(self.name)
step.remark=remark
for ib in self.step_blueprints.all():
next_number=ib.quantity*(current_step-1)+1
if self.init_blueprints.filter(pk=ib.pk).exists():
next_number+=ib.quantity
inss=ib.launch(owner=owner, next_number=next_number, remark=remark)
step.instances.add(*inss)
step.remark=remark
step.save()
return step
def scales_of_user(self):
excluded_blueprints=InstanceBlueprint.objects.exclude(pk__in=self.blueprints())
return Scale.objects.exclude(
Q(init_blueprints__in=excluded_blueprints) | Q(step_blueprints__in=excluded_blueprints)
).filter(enabled=True).filter(Q(public=True) | Q(owner=self)).distinct()
User.scales=scales_of_user
class Cluster(models.Model,M2MOperatableMixin):
uuid=models.UUIDField(auto_created=True, default=uuid4, editable=False)
name=models.CharField(max_length=50)
scale=models.ForeignKey(Scale,on_delete=models.PROTECT)
engines=models.ManyToManyField(Engine,blank=True)
steps=models.ManyToManyField(Group,blank=True,editable=False)
remedy_script_todo=models.TextField(max_length=51200,default="",blank=True)
remark = models.CharField(blank=True,null=True,max_length=100)
public=models.BooleanField(default=False)
owner=models.ForeignKey(User,on_delete=models.PROTECT,editable=False)
created_time=models.DateTimeField(auto_now_add=True)
built_time=models.DateTimeField(blank=True, null=True, editable=False)
status= models.PositiveIntegerField(choices=[(status.value,status.name) for status in INSTANCE_STATUS],default=INSTANCE_STATUS.building.value,editable=False)
deleting = models.BooleanField(default=False,editable=False)
class Meta:
unique_together = ('name', 'owner')
def __str__(self):
return "{}".format(self.name)
@staticmethod
def get_operation_model():
return ClusterOperation
@property
def operatables(self):
return self.steps.all()
@cached_property
def portal(self):#TODO formalize, opt perf.
if not self.ready: raise Exception('cluster not ready')
mi=self.get_ready_instances().filter(image__name__contains='master1')
if mi.exists(): return "http://"+str(mi[0].ipv4)+':8080'
return None
@cached_property
def driver(self):
return Driver(self.portal)
def import_engine(self):
stack=self.scale.stack
for e in self.driver.stack_engines:
eg, created=Engine.objects.get_or_create(
name=e.name,
defaults={
'owner': self.owner,
'description': e.description,
'remark': 'auto imported',
'enabled': stack.enabled
}
)
stack.engines.add(eg)
@property
def metrics(self):
return self.driver.metrics
@property
def ready(self):
return self.built_time
@property
def building(self):
return self.built_time and not self.ready
def get_ready_steps(self):
return self.steps.filter(built_time__isnull=False)
def get_instances(self):
return Instance.objects.filter(group__in=self.steps.filter()).distinct()
def get_ready_instances(self):
return Instance.objects.filter(group__in=self.get_ready_steps()).distinct()
@cached_property
def engines_unselected(self):
return Engine.objects.all().difference(self.engines.all())
@transaction.atomic
def scale_one_step(self):# the only way to scale cluster
cluster=self.__class__.objects.select_for_update().get(pk=self.pk)
step=cluster.scale.scale(
owner=cluster.owner,
current_step=cluster.steps.count(),
remark='cluster: '+cluster.name
)
cluster.steps.add(step)
def delete(self, *args, **kwargs):
if not self.ready:
print('WARNNING: delete {} under building'.format(self._meta.verbose_name))
operatables=self.operatables
if operatables.exists():
self.deleting=True
self.save()
for operatable in operatables:
operatable.destroy_script_todo=self.scale.remedy_script_scale_in
operatable.save()
operatable.delete()
else:
super().delete(*args, **kwargs)
# def start(self):
# return utils.ambari_service_start_all('admin','admin',self.portal)#TODO use credential args
# def stop(self):
# return utils.ambari_service_stop_all('admin','admin',self.portal)
# def add_selected_engines(self):
# for e in self.engines.all():
# #set maintaince mode instead of removing
# utils.ambari_service_maintenance_off('admin','admin',self.portal,e.name.upper())
# def remove_unselected_engines(self):
# for e in self.engines_unselected:
# #set maintaince mode instead of removing
# utils.ambari_service_maintenance_on('admin','admin',self.portal,e.name.upper())
# @cached_property
# def init_size(self):
# size=0
# for ib in self.blueprint.instance_blueprints.all():
# size+=ib.quantity
# return size
def find_instance(self,hostname):
return self.get_instances().filter(hostname__contains=hostname).first()
def clusters_of_user(self):
return Cluster.objects.filter(Q(public=True) | Q(owner=self))
User.clusters=clusters_of_user
def steps_of_user(self):
return Group.objects.filter(cluster__in=self.clusters()).distinct()
User.steps=steps_of_user
class ClusterOperation(M2MOperationModel):
target=models.ForeignKey(Cluster,on_delete=models.CASCADE)
class Meta:
verbose_name='cluster operation'
@staticmethod
def get_sub_operation_model():
return GroupOperation
class StepOperation(GroupOperation):
class Meta(GroupOperation.Meta):
proxy = True
verbose_name = 'operation'
def __str__(self):
return "Cluster {}/Step {}/{}/{}".format(self.cluster,self.number,self.operation,self.status)
@cached_property
def cluster(self):
return self.target.cluster_set.first()
@cached_property
def number(self):
steps=self.cluster.steps.all()
l=steps.count()
target_pk=self.target.pk
for i in range(l):
if steps[i].pk==target_pk:
return i+1
|
11553506
|
import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer.functions.normalization import batch_normalization
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
from chainer.links import EmbedID
import chainer.functions as F
class ConditionalBatchNormalization(chainer.Chain):
"""
Conditional Batch Normalization
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
n_cat (int): the number of categories of categorical variable.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
avg_mean (numpy.ndarray or cupy.ndarray): Population mean.
avg_var (numpy.ndarray or cupy.ndarray): Population variance.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
"""
def __init__(self, size, n_cat, decay=0.9, eps=2e-5, dtype=numpy.float32):
super(ConditionalBatchNormalization, self).__init__()
self.avg_mean = numpy.zeros(size, dtype=dtype)
self.register_persistent('avg_mean')
self.avg_var = numpy.zeros(size, dtype=dtype)
self.register_persistent('avg_var')
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
self.n_cat = n_cat
def __call__(self, x, gamma, beta, **kwargs):
"""__call__(self, x, c, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluatino during training, and normalizes the
input using batch statistics.
.. warning::
``test`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
gamma (Variable): Input variable of gamma of shape
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config')
finetune, = argument.parse_kwargs(kwargs, ('finetune', False))
with cuda.get_device_from_id(self._device_id):
_gamma = variable.Variable(self.xp.ones(
self.avg_mean.shape, dtype=x.dtype))
with cuda.get_device_from_id(self._device_id):
_beta = variable.Variable(self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype))
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
ret = chainer.functions.batch_normalization(x, _gamma, _beta, eps=self.eps, running_mean=self.avg_mean,
running_var=self.avg_var, decay=decay)
else:
# Use running average statistics or fine-tuned statistics.
mean = variable.Variable(self.avg_mean)
var = variable.Variable(self.avg_var)
ret = batch_normalization.fixed_batch_normalization(
x, _gamma, _beta, mean, var, self.eps)
shape = ret.shape
ndim = len(shape)
gamma = F.broadcast_to(F.reshape(gamma, list(gamma.shape) + [1] * (ndim - len(gamma.shape))), shape)
beta = F.broadcast_to(F.reshape(beta, list(beta.shape) + [1] * (ndim - len(beta.shape))), shape)
return gamma * ret + beta
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
|
11553509
|
import simplejson as json
from django import template
from django.core.serializers import serialize
from django.db.models import QuerySet
from django.utils.encoding import force_str
from django.utils.functional import Promise
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def get_data(row, column):
return row[column['name']]
def jsonify(object):
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif isinstance(obj, Promise):
return force_str(obj)
if isinstance(object, QuerySet):
return serialize('json', object)
return mark_safe(json.dumps(object, use_decimal=True, default=date_handler))
register.filter('jsonify', jsonify)
|
11553534
|
from time import sleep
from abc import abstractmethod
from asciimatics.widgets import (
Frame, ListBox, Layout, Divider, Text, Button, Label, FileBrowser, RadioButtons, CheckBox, QRCode
)
from asciimatics.exceptions import NextScene
from asciimatics.event import KeyboardEvent, MouseEvent
from asciimatics.scene import Scene
from asciimatics.effects import Effect
from shadowlands.credstick import SignTxError
from shadowlands.tui.effects.message_dialog import MessageDialog
from decimal import Decimal
import pyperclip
from shadowlands.tui.debug import debug, end_debug
import pdb
from shadowlands.sl_dapp.sl_frame import SLFrame, SLWaitFrame, AskClipboardFrame
from shadowlands.sl_dapp.uniswap_frame import UniswapFrame
from shadowlands.sl_transaction_frame import SLTransactionFrame
from shadowlands.block_callback_mixin import BlockCallbackMixin
from cached_property import cached_property
import logging
class SLDapp(BlockCallbackMixin):
def __init__(self, screen, scene, eth_node, config, block_callback_watcher, destroy_window=None):
self._config_key = self.__module__
self._screen = screen
self._scene = scene
self._node = eth_node
self._config = config
self._block_listeners = []
self._block_callback_watcher = block_callback_watcher
block_callback_watcher.register_dapp(self)
self.initialize()
if destroy_window is not None:
destroy_window.close()
@property
def node(self):
return self._node
@property
def w3(self):
return self._node.w3
@property
def config(self):
return self._config
@property
def config_key(self):
return self._config_key
@config_key.setter
def config_key(self, key):
self._config_key = key
@property
def config_properties(self):
return self.config.dapp_config(self.config_key)
def save_config_property(self, property_key, value):
properties = dict(self.config_properties)
properties[property_key] = value
self.config.set_dapp_config(self.config_key, properties)
def load_config_property(self, property_key):
return self.config_properties[property_key]
@abstractmethod
def initialize(self):
pass
def _new_block_callback(self):
'''
This is the private version of new_block_callback.
It calls the dapp callback and all the slframe callbacks.
'''
super(SLDapp, self)._new_block_callback()
for slframe in self._block_listeners:
slframe._new_block_callback()
# Used in SLFrame close().
def remove_block_listener(self, frame):
self._block_listeners.remove(frame)
if len(self._block_listeners) == 0:
self.quit()
# call before starting your thread
def show_wait_frame(self, message="Please wait a moment..."):
preferred_width= len(message) + 6
self.waitframe = SLWaitFrame(self, message, 3, preferred_width)
self._scene.add_effect( self.waitframe )
def hide_wait_frame(self):
try:
self._scene.remove_effect( self.waitframe )
except:
# We need to be able to call this method without consequence
pass
# This should be called in a new thread, so sleeping is an OK thing to do.
# Should give the time for the UI to finish its current pass and remove the
# wait frame.
sleep(1)
def add_sl_frame(self, frame):
self._scene.add_effect(frame)
return frame
def add_transaction_dialog(self, tx_fn, title="Sign & Send Transaction", tx_value=0, destroy_window=None, gas_limit=300000, **kwargs):
tx_dialog = SLTransactionFrame(self, 20, 59, tx_fn, destroy_window=destroy_window, title=title, gas_limit=gas_limit, tx_value=tx_value, **kwargs)
self.add_sl_frame(tx_dialog)
def add_uniswap_frame(self, token_address, action='buy', buy_amount='', sell_amount=''):
u_frame = UniswapFrame(self, 17, 46, token_address, action=action, buy_amount=buy_amount, sell_amount=sell_amount)
self.add_sl_frame(u_frame)
# for internal use.
def add_resend_dialog(self, tx_dict, title="Sign & Send"):
# This class is duck typed to web3.py contract functions.
class TransactionFunction():
def __init__(self, dict):
self._dict = dict
def buildTransaction(self, tx_dict):
self._dict['gasPrice'] = tx_dict['gasPrice']
self._dict['gas'] = tx_dict['gas']
self._dict['value'] = tx_dict['value']
self._dict['chainId'] = tx_dict['chainId']
return self._dict
tx_fn = TransactionFunction(tx_dict)
self._scene.add_effect(
SLTransactionFrame(
self, 20, 59, tx_fn, title=title,
gas_limit=tx_dict['gas'],
tx_value=self.node.w3.fromWei(tx_dict['value'], 'ether')
)
)
def add_message_dialog(self, message, **kwargs):
preferred_width= len(message) + 6
self._scene.add_effect( MessageDialog(self._screen, message, width=preferred_width, **kwargs))
def quit(self):
# remove self as new_block_listener
self._block_callback_watcher.unregister_dapp(self)
def _update():
pass
def reset():
pass
def stop_frame():
pass
class ExitDapp(Exception):
pass
class RunDapp(Exception):
pass
|
11553555
|
import torch
def append_bias_ones(tensor):
"""Appends vector of ones to last dimension of tensor.
For examples, if the input is of shape [4, 6], then the outputs has shape
[4, 7] where the slice [:, -1] is a tensor of all ones.
"""
shape = list(tensor.shape[:-1]) + [1]
return torch.cat([tensor, tensor.new_ones(shape)], dim=-1)
def get_cov(a, b=None, scale=None):
"""Computes the empirical second moment of a 2D tensor
Reference:
- https://github.com/tensorflow/kfac/blob/master/kfac/python/ops/fisher_factors.py#L220
- https://arxiv.org/pdf/1602.01407.pdf#subsection.2.2
Args:
a (tensor): 2D tensor to compute second moment of using cov_a = a^T @ a.
b (tensor, optional): optional tensor of equal shape to a such that
cov_a = a^T @ b.
scale (float, optional): optional tensor to divide cov_a by. Default is
a.size(0).
Returns:
A square tensor representing the second moment of a.
"""
if len(a.shape) != 2:
raise ValueError('Input tensor must have 2 dimensions.')
if b is not None and a.shape != b.shape:
raise ValueError('Input tensors must have same shape. Got tensors of '
'shape {} and {}.'.format(a.shape, b.shape))
if scale is None:
scale = a.size(0)
if b is None:
cov_a = a.t() @ (a / scale)
return (cov_a + cov_a.t()) / 2.0
else:
return a.t() @ (b / scale)
def get_eigendecomp(tensor, clip=0.0, concat=True, symmetric=True):
"""Compute eigendecomposition of a block.
Args:
tensor: tensor of shape (x, x) to eigendecompose.
clip (float, optional): value to clip eigenvalues using
`torch.max(eigenvalues, clip)` such that the minimum eigenvalue is
`clip`. If `None`, no clipping is applied. (default: 0.0)
concat (bool, optional): concatenate eigenvalues to the last dim
of the eigenvectors. (default: False)
symmetric (bool, optional): is `tensor` symmetric. (default: False)
Returns:
Tensor of shape (x, x+1) where (0:x, 0:x) are the eigenvectors and
(:, -1) is the eigenvalues if `concat=True` else `tuple(eigenvectors,
eigenvalues)`.
"""
if symmetric:
d, Q = torch.symeig(tensor, eigenvectors=True)
else:
d, Q = torch.eig(tensor, eigenvectors=True)
d = d[:, 0]
if clip is not None:
d = torch.max(d, d.new_tensor([clip]))
if concat:
return torch.cat([Q, d.unsqueeze(-1)], -1)
else:
return Q, d
def get_inverse(tensor, damping=None, symmetric=True):
"""Compute inverse of tensor.
Args:
tensor: block of shape (x, x) to invert
damping (float, optional): optionally add `identity * damping` to
`tensor` before inverting.
symmetric (bool, optional): if True, `tensor` is symmetric and Cholesky
decomposition will be used for computing the inverse. (default: True)
Returns:
The inverse of tensor
"""
if damping is not None:
d = tensor.new(tensor.shape[0]).fill_(damping)
tensor = tensor + torch.diag(d)
if symmetric:
return torch.cholesky_inverse(torch.cholesky(tensor))
else:
return torch.inverse(tensor)
def get_elementwise_inverse(vector, damping=None):
"""Computes the reciprocal of each non-zero element of v"""
if damping is not None:
vector = vector + damping
mask = vector != 0.0
reciprocal = vector.clone()
reciprocal[mask] = torch.reciprocal(reciprocal[mask])
return reciprocal
def reshape_data(data_list, batch_first=True, collapse_dims=False):
"""Concat input/output data and clear buffers
Args:
data_list (list): list of tensors of equal, arbitrary shape where the
batch_dim is either 0 or 1 depending on self.batch_first.
batch_first (bool, optional): is batch dim first. (default: True)
collapse_dim (bool, optional): if True, collapse all but the last dim
together forming a 2D output tensor.
Returns:
Single tensor with all tensors from data_list concatenated across
batch_dim. Guarenteed to be 2D if collapse_dims=True.
"""
d = torch.cat(data_list, dim=int(not batch_first))
if collapse_dims and len(d.shape) > 2:
d = d.view(-1, d.shape[-1])
return d
def get_triu(tensor):
"""Returns flattened upper triangle of 2D tensor"""
if len(tensor.shape) != 2:
raise ValueError('triu(tensor) requires tensor to be 2 dimensional')
if tensor.shape[0] > tensor.shape[1]:
raise ValueError('tensor cannot have more rows than columns')
idxs = torch.triu_indices(tensor.shape[0], tensor.shape[1],
device=tensor.device)
return tensor[idxs[0], idxs[1]]
def fill_triu(shape, triu_tensor):
"""Reconstruct symmetric 2D tensor from flattened upper triangle
Usage:
>>> x = tensor.new_empty([10, 10])
>>> triu_x = get_triu(x)
>>> x_new = fill_triu([10, 10], triu_tensor)
>>> assert torch.equal(x, x_new) # true
Args:
shape (tuple): tuple(rows, cols) of size of output tensor
triu_tensor (tensor): flattened upper triangle of the tensor returned by
get_triu()
Returns:
Symmetric tensor with `shape` where the upper/lower triangles are filled
with the data in `triu_tensor`
"""
if len(shape) != 2:
raise ValueError('shape must be 2 dimensional')
rows, cols = shape
dst_tensor = triu_tensor.new_empty(shape)
idxs = torch.triu_indices(rows, cols, device=triu_tensor.device)
dst_tensor[idxs[0], idxs[1]] = triu_tensor
idxs = torch.triu_indices(rows, rows, 1, device=dst_tensor.device)
dst_tensor.transpose(0, 1)[idxs[0], idxs[1]] = dst_tensor[idxs[0], idxs[1]]
return dst_tensor
def update_running_avg(new, current, alpha=1.0):
"""Computes in-place running average
current = alpha*current + (1-alpha)*new
Args:
new (tensor): tensor to add to current average
current (tensor): tensor containing current average. Result will be
saved in place to this tensor.
alpha (float, optional): (default: 1.0)
"""
if alpha != 1:
current *= alpha / (1 - alpha)
current += new
current *= (1 - alpha)
|
11553622
|
from __future__ import division
from noise import snoise2
# TODO: add generator with Perlin noise, noise.pnoise2
LAKE_THRESHOLD = 0.35 # 0 to 1, fraction of water corners for water polygon
class SimplexIsland:
"""
Generate lands with Simplex noise.
"""
def __init__(self, islands_level=1.5, octaves=8, land_threshold=0):
self.octaves = octaves
self.land_threshold = land_threshold
self.islands_level = islands_level
def generate(self, map_obj):
# assign water for corners according to Perlin noise
for corner in map_obj.corners:
if corner.border:
corner.water = True
corner.ocean = True
else:
p = corner.point
val = snoise2(p[0] * self.islands_level, p[1] * self.islands_level,
self.octaves, base=map_obj.seed)
corner.water = val < self.land_threshold
ocean_polys = []
for center in map_obj.centers:
if center.border:
center.water = True
center.ocean = True
ocean_polys.append(center)
else:
water_count = 0
for corner in center.corners:
if corner.water:
water_count += 1
center.water = water_count >= LAKE_THRESHOLD * len(center.corners)
# fill center.ocean
while ocean_polys:
center = ocean_polys.pop()
for neighbor in center.neighbors:
if neighbor.water and not neighbor.ocean:
neighbor.ocean = True
ocean_polys.append(neighbor)
# fill center.coast
for center in map_obj.centers:
if not center.water:
center.coast = any(neigh.ocean for neigh in center.neighbors)
else:
# fix corner.water
for corner in center.corners:
corner.water = True
# fill corner.coast and corner.ocean
for corner in map_obj.corners:
if corner.water:
corner.ocean = any(neigh.ocean for neigh in corner.touches)
if corner.ocean:
corner.coast = any(not neigh.water for neigh in corner.touches)
# fix noise "artifacts"
if all(not neigh.water for neigh in corner.touches):
corner.water = False
|
11553624
|
from .data_dicts import LANGUAGE_DISTANCES
from typing import Dict, Tuple
TagTriple = Tuple[str, str, str]
_DISTANCE_CACHE: Dict[Tuple[TagTriple, TagTriple], int] = {}
DEFAULT_LANGUAGE_DISTANCE = LANGUAGE_DISTANCES["*"]["*"]
DEFAULT_SCRIPT_DISTANCE = LANGUAGE_DISTANCES["*_*"]["*_*"]
DEFAULT_TERRITORY_DISTANCE = 4
# Territory clusters used in territory matching:
# Maghreb (the western Arab world)
MAGHREB = {"MA", "DZ", "TN", "LY", "MR", "EH"}
# United States and its territories
US = {"AS", "GU", "MH", "MP", "PR", "UM", "US", "VI"}
# Special Autonomous Regions of China
CNSAR = {"HK", "MO"}
LATIN_AMERICA = {
"419",
# Central America
"013",
"BZ",
"CR",
"SV",
"GT",
"HN",
"MX",
"NI",
"PA",
# South America
"005",
"AR",
"BO",
"BR",
"CL",
"CO",
"EC",
"FK",
"GF",
"GY",
"PY",
"PE",
"SR",
"UY",
"VE",
}
# North and South America
AMERICAS = {
"019",
# Caribbean
"029",
"AI",
"AG",
"AW",
"BS",
"BB",
"VG",
"BQ",
"KY",
"CU",
"CW",
"DM",
"DO",
"GD",
"GP",
"HT",
"JM",
"MQ",
"MS",
"PR",
"SX",
"BL",
"KN",
"LC",
"MF",
"VC",
"TT",
"TC",
"VI",
# Northern America
"021",
"BM",
"CA",
"GL",
"PM",
"US",
# North America as a whole
"003",
} | LATIN_AMERICA
def tuple_distance_cached(desired: TagTriple, supported: TagTriple) -> int:
"""
Takes in triples of (language, script, territory), which can be derived by
'maximizing' a language tag. Returns a number from 0 to 135 indicating the
'distance' between these for the purposes of language matching.
"""
# First of all, if these are identical, return quickly:
if supported == desired:
return 0
# If we've already figured it out, return the cached distance.
if (desired, supported) in _DISTANCE_CACHE:
return _DISTANCE_CACHE[desired, supported]
else:
result = _tuple_distance(desired, supported)
_DISTANCE_CACHE[desired, supported] = result
return result
def _get2(dictionary: dict, key1: str, key2: str, default):
return dictionary.get(key1, {}).get(key2, default)
def _tuple_distance(desired: TagTriple, supported: TagTriple) -> int:
desired_language, desired_script, desired_territory = desired
supported_language, supported_script, supported_territory = supported
distance = 0
if desired_language != supported_language:
distance += _get2(
LANGUAGE_DISTANCES,
desired_language,
supported_language,
DEFAULT_LANGUAGE_DISTANCE,
)
desired_script_pair = f"{desired_language}_{desired_script}"
supported_script_pair = f"{supported_language}_{supported_script}"
if desired_script != supported_script:
# Scripts can match other scripts, but only when paired with a
# language. For example, there is no reason to assume someone who can
# read 'Latn' can read 'Cyrl', but there is plenty of reason to believe
# someone who can read 'sr-Latn' can read 'sr-Cyrl' because Serbian is
# a language written in two scripts.
distance += _get2(
LANGUAGE_DISTANCES,
desired_script_pair,
supported_script_pair,
DEFAULT_SCRIPT_DISTANCE,
)
if desired_territory != supported_territory:
# The rules for matching territories are too weird to implement the
# general case efficiently. Instead of implementing all the possible
# match rules the XML could define, instead we just reimplement the
# rules of CLDR 36.1 here in code.
tdist = DEFAULT_TERRITORY_DISTANCE
if desired_script_pair == supported_script_pair:
if desired_language == "ar":
if (desired_territory in MAGHREB) != (supported_territory in MAGHREB):
tdist = 5
elif desired_language == "en":
if (desired_territory == "GB") and (supported_territory not in US):
tdist = 3
elif (desired_territory not in US) and (supported_territory == "GB"):
tdist = 3
elif (desired_territory in US) != (supported_territory in US):
tdist = 5
# This is not a rule that's spelled out in CLDR, but is implied by things
# about territory containment mentioned in other standards. Numeric values
# for territories, like '003', represent broad regions that contain more
# specific territories.
#
# 419 is the numeric value most often seen in language codes, particularly
# 'es-419' for Latin American Spanish. If you have a language code that
# differs only in that its territory is more specific, like 'es-PY', it should
# be closer to a supported 'es-419' than anything with a territory difference.
#
# We can implement this for 419 without becoming responsible for keeping up
# with which countries/territories/regions contain others in the general case.
elif desired_territory in LATIN_AMERICA and supported_territory == "419":
tdist = 1
elif desired_language == "es" or desired_language == "pt":
if (desired_territory in AMERICAS) != (supported_territory in AMERICAS):
tdist = 5
elif desired_script_pair == "zh_Hant":
if (desired_territory in CNSAR) != (supported_territory in CNSAR):
tdist = 5
distance += tdist
return distance
|
11553625
|
from twisted.names import client, server, dns
from oonib.config import config
class DNSTestHelper(server.DNSServerFactory):
def __init__(self, authorities=None,
caches=None, clients=None,
verbose=0):
try:
host, port = config.helpers.dns.split(':')
port = int(port)
# XXX remove this when we have configuration file versioning.
# https://github.com/TheTorProject/ooni-probe/issues/190
except:
host, port = '8.8.8.8', 53
resolver = client.Resolver(servers=[(host, port)])
server.DNSServerFactory.__init__(self, authorities=authorities,
caches=caches, clients=[resolver],
verbose=verbose)
def handleQuery(self, message, protocol, address):
server.DNSServerFactory.handleQuery(self, message, protocol, address)
class DNSResolverDiscovery(server.DNSServerFactory):
"""
This test helper is used to discover the IP address of the resolver being
used by a ooniprobe client.
To use it you should set it up on a machine that has been delegated as the
authoritative name server for a specific subdomain.
You can do so by adding the following to your zone file:
mysubdomain IN NS ns.mysubdomain.example.org.
ns.mysubdomain.example.org IN A 10.42.42.42
Replace 10.42.42.42 with the IP address of the machine running oonib.
You will then be able to perform A lookups on subdomains of
mysubdomain.example.org and retrieve in the query answer section the IP
address of the resolver that was used for performing the request.
"""
def handleQuery(self, message, protocol, address):
query = message.queries[0]
if query.type == dns.A:
ans = dns.RRHeader(bytes(query.name),
payload=dns.Record_A(bytes(address[0]), 0))
message.answers = [ans]
message.answer = 1
self.sendReply(protocol, message, address)
|
11553642
|
class Juicer(object):
"""
Main class for all juicers.
"""
def __init__(self, name):
self.name = name
def juice(self, target_juice):
"""
This is the main function that processes the provided
target object.
:param target_juice: The target object which needs to be processed by the juicer.
:return: Depends on the specific juicers.
"""
raise NotImplementedError("Function not implemented.")
def getName(self):
"""
This function returns the name of this juicer.
:return: String representing name of this juicer.
"""
raise NotImplementedError("Function not implemented.")
|
11553654
|
import uuid
import json
import os
import pytest
import postgraas_server.backends.docker.postgres_instance_driver as pid
import postgraas_server.backends.postgres_cluster.postgres_cluster_driver as pgcd
import postgraas_server.configuration as configuration
from postgraas_server.backends.exceptions import PostgraasApiException
from postgraas_server.create_app import create_app
from postgraas_server.management_resources import DBInstance
DOCKER_CONFIG = {
"metadb":
{
"db_name": "postgraas",
"db_username": "postgraas",
"db_pwd": "<PASSWORD>",
"host": "localhost",
"port": "54321"
},
"backend":
{
"type": "docker"
}
}
CLUSTER_CONFIG = {
"metadb":
{
"db_name": "postgraas",
"db_username": "postgraas",
"db_pwd": "<PASSWORD>",
"host": "localhost",
"port": "54321"
},
"backend":
{
"type": "pg_cluster",
"host": os.environ.get('PGHOST', 'localhost'),
"port": os.environ.get('PGPORT', '5432'),
"database": os.environ.get('PGDATABASE', 'postgres'),
"username": os.environ.get('PGUSER', 'postgres'),
"password": os.environ.get('PGPASSWORD', '<PASSWORD>'),
}
}
CONFIGS = {
'docker': DOCKER_CONFIG,
'pg_cluster': CLUSTER_CONFIG,
}
def remove_digits(s):
return ''.join(c for c in s if not c.isdigit())
def delete_all_test_postgraas_container():
c = pid._docker_client()
for container in c.containers.list():
if container.name.startswith("tests_postgraas_"):
container.remove(force=True)
def delete_all_test_database_and_user(config):
con = pgcd._create_pg_connection(config)
cur = con.cursor()
cur.execute(
'''SELECT d.datname, u.usename
FROM pg_database d
JOIN pg_user u ON (d.datdba = u.usesysid);''')
for db in cur:
if db[0].startswith("tests_postgraas_"):
delete_test_database_and_user(db[0], db[1], config)
cur.execute(
'''SELECT u.usename
FROM pg_user u;''')
for db in cur:
if db[0].startswith("tests_postgraas_"):
pgcd.delete_user(db[0], config)
def delete_test_database_and_user(db_name, username, config):
pgcd.delete_database(db_name, config)
pgcd.delete_user(username, config)
@pytest.fixture(params=['docker', 'pg_cluster'])
def parametrized_setup(request, tmpdir):
from postgraas_server.management_resources import db
cfg = tmpdir.join('config')
with open(cfg.strpath, "w") as fp:
json.dump(CONFIGS[request.param], fp)
config = configuration.get_config(cfg.strpath)
this_app = create_app(config)
this_app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite://"
this_app.use_reloader = False
this_app.config['TESTING'] = True
ctx = this_app.app_context()
ctx.push()
db.create_all()
username, db_name = str(uuid.uuid4()).replace('-', '_'), str(uuid.uuid4()).replace('-', '_')
request.cls.this_app = this_app
request.cls.app_client = this_app.test_client()
request.cls.db_name = remove_digits(db_name)
request.cls.username = remove_digits(username)
request.cls.backend = request.param
try:
yield
except Exception:
pass
if request.param == 'docker':
delete_all_test_postgraas_container()
elif request.param == 'pg_cluster':
delete_all_test_database_and_user(config['backend'])
db.drop_all()
ctx.pop()
@pytest.mark.usefixtures('parametrized_setup')
class TestPostgraasApi():
def test_create_and_delete_postgres_instance(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db_username',
"db_pwd": '<PASSWORD>',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
self.this_app.postgraas_backend.delete(db_entry)
assert True
def test_create_postgraas_twice(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db_username',
"db_pwd": '<PASSWORD>',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
if self.backend == "pg_cluster":
assert excinfo.value.message == 'db or user already exists'
elif self.backend == "docker":
assert excinfo.value.message == 'Container exists already'
self.this_app.postgraas_backend.delete(db_entry)
assert True
@pytest.mark.xfail(reason='Username now valid due to hardening against SQL injections.')
def test_create_postgraas_bad_username(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db-bad username',
"db_pwd": '<PASSWORD>',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
if self.backend == "pg_cluster":
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
self.this_app.postgraas_backend.delete(db_entry)
assert 'syntax error at or near "-"' in excinfo.value.message
def test_delete_nonexisting_db(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db-bad username',
"db_pwd": '<PASSWORD>',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port'],
container_id="4n8nz48az49prdmdmprmr4doesnotexit"
)
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.delete(db_entry)
assert 'does not exist' in excinfo.value.message
|
11553683
|
import os
import os.path
import re
import shutil
import logging
import yaml
from piecrust.importing.base import FileWalkingImporter
logger = logging.getLogger(__name__)
class PieCrust1Importer(FileWalkingImporter):
name = 'piecrust1'
description = "Imports content from a PieCrust 1 website."
requires_website = False
def setupParser(self, parser, app):
super(PieCrust1Importer, self).setupParser(parser, app)
parser.add_argument('root_dir', nargs='?',
help="The root directory of the PieCrust 1 website.")
parser.add_argument('--upgrade', action='store_true',
help="Upgrade the current website in place.")
def importWebsite(self, app, args):
if args.root_dir and args.upgrade:
raise Exception("Can't specifiy both a root directory and `--upgrade`.")
if args.root_dir is None and not args.upgrade:
raise Exception("Need to specify either a root directory or `--upgrade`.")
if app.root_dir is None and not args.upgrade:
raise Exception("Need to run the import from inside a PieCrust 2 "
"website. Use `--upgrade` to upgrade from inside "
"a PieCrust 1 website.")
if app.root_dir is not None and args.upgrade:
raise Exception("Already in a PieCrust 2 website. Specify the "
"PieCrust 1 website to import from.")
src_root_dir = os.getcwd() if args.upgrade else args.root_dir
out_root_dir = src_root_dir if args.upgrade else app.root_dir
logger.debug("Importing PieCrust 1 site from: %s" % src_root_dir)
exclude = args.exclude or []
exclude += ['_cache', '_counter']
self._startWalk(src_root_dir, exclude, out_root_dir, args.upgrade)
if args.upgrade:
self._cleanEmptyDirectories(src_root_dir)
logger.info("The PieCrust website was successfully imported.")
def _importFile(self, full_fn, rel_fn, out_root_dir, is_move):
logger.debug("- %s" % rel_fn)
dest_path = rel_fn
convert_func = None
if rel_fn.replace('\\', '/') == '_content/config.yml':
dest_path = 'config.yml'
convert_func = self.convertConfig
elif rel_fn.startswith('_content'):
dest_path = rel_fn[len('_content/'):]
fn_dirname = os.path.dirname(rel_fn)
if not fn_dirname.endswith('-assets'):
convert_func = self.convertPage
else:
dest_path = 'assets/' + rel_fn
logger.debug(" %s -> %s" % (rel_fn, dest_path))
full_dest_path = os.path.join(out_root_dir, dest_path)
os.makedirs(os.path.dirname(full_dest_path), 0o755, True)
if convert_func is None:
if is_move:
shutil.move(full_fn, full_dest_path)
else:
shutil.copy2(full_fn, full_dest_path)
else:
with open(full_fn, 'r', encoding='utf8') as fp:
content = fp.read()
converted_content = convert_func(content)
with open(full_dest_path, 'w', encoding='utf8') as fp:
fp.write(converted_content)
if converted_content != content:
logger.warning("'%s' has been modified. The original version "
"has been kept for reference." % rel_fn)
shutil.copy2(full_fn, full_dest_path + '.orig')
if is_move:
os.remove(full_fn)
def _cleanEmptyDirectories(self, root_dir):
for item in os.listdir(root_dir):
if not os.path.isdir(item):
continue
file_count = 0
item_path = os.path.join(root_dir, item)
for _, __, filenames in os.walk(item_path):
file_count += len(filenames)
if file_count == 0:
logger.debug("Deleting empty directory: %s" % item)
shutil.rmtree(item_path)
def convertConfig(self, content):
config = yaml.safe_load(content)
sitec = config.setdefault('site', {})
if 'templates_dirs' in sitec:
tdc = sitec['templates_dirs']
cl = len('_content/')
if isinstance(tdc, str) and re.match(r'^_content[/\\]', tdc):
sitec['templates_dirs'] = tdc[cl:]
elif isinstance(tdc, list):
sitec['templates_dirs'] = list(map(
lambda d: d[cl:] if re.match(r'^_content[/\\]', d) else d,
tdc))
jinjac = config.setdefault('jinja', {})
jinjac['twig_compatibility'] = True
if 'baker' in config:
if 'skip_patterns' in config['baker']:
config['baker']['ignore'] = config['baker']['skip_patterns']
del config['baker']['skip_patterns']
if 'force_patterns' in config['baker']:
config['baker']['force'] = config['baker']['force_patterns']
del config['baker']['force_patterns']
content = yaml.dump(config, default_flow_style=False,
allow_unicode=True)
return content
def convertPage(self, content):
return content
|
11553686
|
from contextlib import contextmanager
from ctypes import cast, c_void_p, POINTER, create_string_buffer
from os import fstat, stat
from . import ffi
from .ffi import (
ARCHIVE_EOF, OPEN_CALLBACK, READ_CALLBACK, CLOSE_CALLBACK, SEEK_CALLBACK,
NO_OPEN_CB, NO_CLOSE_CB, page_size,
)
from .entry import ArchiveEntry, new_archive_entry
class ArchiveRead:
def __init__(self, archive_p):
self._pointer = archive_p
def __iter__(self):
"""Iterates through an archive's entries.
"""
archive_p = self._pointer
read_next_header2 = ffi.read_next_header2
with new_archive_entry() as entry_p:
entry = ArchiveEntry(archive_p, entry_p)
while 1:
r = read_next_header2(archive_p, entry_p)
if r == ARCHIVE_EOF:
return
yield entry
@contextmanager
def new_archive_read(format_name='all', filter_name='all', passphrase=None):
"""Creates an archive struct suitable for reading from an archive.
Returns a pointer if successful. Raises ArchiveError on error.
"""
archive_p = ffi.read_new()
try:
if passphrase:
if not isinstance(passphrase, bytes):
passphrase = passphrase.encode('utf-8')
try:
ffi.read_add_passphrase(archive_p, passphrase)
except AttributeError:
raise NotImplementedError(
f"the libarchive being used (version {ffi.version_number()}, "
f"path {ffi.libarchive_path}) doesn't support encryption"
)
ffi.get_read_filter_function(filter_name)(archive_p)
ffi.get_read_format_function(format_name)(archive_p)
yield archive_p
finally:
ffi.read_free(archive_p)
@contextmanager
def custom_reader(
read_func, format_name='all', filter_name='all',
open_func=None, seek_func=None, close_func=None,
block_size=page_size, archive_read_class=ArchiveRead, passphrase=None,
):
"""Read an archive using a custom function.
"""
open_cb = OPEN_CALLBACK(open_func) if open_func else NO_OPEN_CB
read_cb = READ_CALLBACK(read_func)
close_cb = CLOSE_CALLBACK(close_func) if close_func else NO_CLOSE_CB
seek_cb = SEEK_CALLBACK(seek_func)
with new_archive_read(format_name, filter_name, passphrase) as archive_p:
if seek_func:
ffi.read_set_seek_callback(archive_p, seek_cb)
ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
yield archive_read_class(archive_p)
@contextmanager
def fd_reader(
fd, format_name='all', filter_name='all', block_size=4096, passphrase=None,
):
"""Read an archive from a file descriptor.
"""
with new_archive_read(format_name, filter_name, passphrase) as archive_p:
try:
block_size = fstat(fd).st_blksize
except (OSError, AttributeError): # pragma: no cover
pass
ffi.read_open_fd(archive_p, fd, block_size)
yield ArchiveRead(archive_p)
@contextmanager
def file_reader(
path, format_name='all', filter_name='all', block_size=4096, passphrase=None,
):
"""Read an archive from a file.
"""
with new_archive_read(format_name, filter_name, passphrase) as archive_p:
try:
block_size = stat(path).st_blksize
except (OSError, AttributeError): # pragma: no cover
pass
ffi.read_open_filename_w(archive_p, path, block_size)
yield ArchiveRead(archive_p)
@contextmanager
def memory_reader(buf, format_name='all', filter_name='all', passphrase=None):
"""Read an archive from memory.
"""
with new_archive_read(format_name, filter_name, passphrase) as archive_p:
ffi.read_open_memory(archive_p, cast(buf, c_void_p), len(buf))
yield ArchiveRead(archive_p)
@contextmanager
def stream_reader(
stream, format_name='all', filter_name='all', block_size=page_size,
passphrase=None,
):
"""Read an archive from a stream.
The `stream` object must support the standard `readinto` method.
If `stream.seekable()` returns `True`, then an appropriate seek callback is
passed to libarchive.
"""
buf = create_string_buffer(block_size)
buf_p = cast(buf, c_void_p)
def read_func(archive_p, context, ptrptr):
# readinto the buffer, returns number of bytes read
length = stream.readinto(buf)
# write the address of the buffer into the pointer
ptrptr = cast(ptrptr, POINTER(c_void_p))
ptrptr[0] = buf_p
# tell libarchive how much data was written into the buffer
return length
def seek_func(archive_p, context, offset, whence):
stream.seek(offset, whence)
# tell libarchive the current position
return stream.tell()
open_cb = NO_OPEN_CB
read_cb = READ_CALLBACK(read_func)
close_cb = NO_CLOSE_CB
seek_cb = SEEK_CALLBACK(seek_func)
with new_archive_read(format_name, filter_name, passphrase) as archive_p:
if stream.seekable():
ffi.read_set_seek_callback(archive_p, seek_cb)
ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
yield ArchiveRead(archive_p)
seekable_stream_reader = stream_reader
|
11553712
|
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_text as text
config = tfds.translate.wmt.WmtConfig(
description="WMT 2019 translation task dataset.",
version="0.0.3",
language_pair=("zh", "en"),
subsets={
tfds.Split.TRAIN: ["newscommentary_v13"],
tfds.Split.VALIDATION: ["newsdev2017"],
}
)
builder = tfds.builder("wmt_translate", config=config)
print(builder.info)
builder.download_and_prepare()
datasets = builder.as_dataset(as_supervised=True)
train_dataset = datasets['train']
val_dataset = datasets['validation']
for zh, en in train_dataset.take(5):
print('zh: {}'.format(zh.numpy()))
print('en: {}'.format(en.numpy()))
# If you need NumPy arrays
# np_datasets = tfds.as_numpy(datasets)
|
11553748
|
import asyncio
import websockets
from .majsoul_pb2 import Wrapper
class MSJRpcChannel(object):
def __init__(self, endpoint):
self._endpoint = endpoint
self._req_events = {}
self._new_req_idx = 1;
self._res = {}
self._hooks = {}
def add_hook(self, msg_type, hook):
if not msg_type in self._hooks:
self._hooks[msg_type] = []
self._hooks[msg_type].append(hook)
def unwrap(self, wrapped):
wrapper = Wrapper()
wrapper.ParseFromString(wrapped)
return wrapper
def wrap(self, name, data):
wrapper = Wrapper()
wrapper.name = name
wrapper.data = data
return wrapper.SerializeToString()
async def connect(self):
self._ws = await websockets.connect(self._endpoint,
origin = "https://majsoul.union-game.com")
self._msg_dispatcher = asyncio.create_task(self.dispatch_msg())
async def close(self):
self._msg_dispatcher.cancel()
try:
await self._msg_dispatcher
except asyncio.CancelledError:
pass
finally:
await self._ws.close()
async def dispatch_msg(self):
while True:
msg = await self._ws.recv()
type_byte = msg[0]
if type_byte == 1: # NOTIFY
wrapper = self.unwrap(msg[1:])
for hook in self._hooks.get(wrapper.name, []):
asyncio.create_task(hook(wrapper.data))
elif type_byte == 2: # REQUEST
wrapper = self.unwrap(msg[3:])
for hook in self._hooks.get(wrapper.name, []):
asyncio.create_task(hook(wrapper.data))
elif type_byte == 3: #RESPONSE
idx = int.from_bytes(msg[1:3], 'little')
if not idx in self._req_events:
continue
self._res[idx] = msg
self._req_events[idx].set()
async def send_request(self, name, msg):
idx = self._new_req_idx
self._new_req_idx = (self._new_req_idx + 1) % 60007
wrapped = self.wrap(name, msg)
pkt = b'\x02' + idx.to_bytes(2, 'little') + wrapped
evt = asyncio.Event()
self._req_events[idx] = evt
await self._ws.send(pkt)
await evt.wait()
if not idx in self._res:
return None
res = self._res[idx]
del self._res[idx]
if idx in self._req_events:
del self._req_events[idx]
body = self.unwrap(res[3:])
return body.data
class MSJRpcService(object): # Interface class for all MSJRpc services
def __init__(self, channel):
self._channel = channel
def get_package_name(self):
raise NotImplementedError
def get_service_name(self):
raise NotImplementedError
def get_req_class(self, method):
raise NotImplementedError
def get_res_class(self, method):
raise NotImplementedError
async def call_method(self, method, req):
msg = req.SerializeToString()
name = '.{}.{}.{}'.format(self.get_package_name(), self.get_service_name(), method)
res_msg = await self._channel.send_request(name, msg)
res_class = self.get_res_class(method)
res = res_class()
res.ParseFromString(res_msg)
return res
|
11553769
|
from datadog import initialize, api
options = {
'api_key': '<DATADOG_API_KEY>',
'app_key': '<DATADOG_APPLICATION_KEY>'
}
initialize(**options)
# Cancel all downtimes with scope
api.Downtime.cancel_downtime_by_scope('env:testing')
|
11553784
|
import os
from abc import ABC, abstractmethod
from collections import OrderedDict, namedtuple
from typing import List
from spotty.config.container_config import ContainerConfig
from spotty.config.project_config import ProjectConfig
from spotty.config.tmp_dir_volume import TmpDirVolume
from spotty.config.validation import DEFAULT_CONTAINER_NAME, is_subdir
from spotty.config.abstract_instance_volume import AbstractInstanceVolume
from spotty.deployment.abstract_cloud_instance.file_structure import INSTANCE_SPOTTY_TMP_DIR, CONTAINERS_TMP_DIR
from spotty.utils import filter_list
VolumeMount = namedtuple('VolumeMount', ['name', 'host_path', 'mount_path', 'mode', 'hidden'])
class AbstractInstanceConfig(ABC):
def __init__(self, instance_config: dict, project_config: ProjectConfig):
self._project_config = project_config
# set instance parameters
self._name = instance_config['name']
self._provider_name = instance_config['provider']
self._params = self._validate_instance_params(instance_config['parameters'])
# get container config
container_configs = filter_list(project_config.containers, 'name', self.container_name)
if not container_configs:
raise ValueError('Container configuration with the name "%s" not found.' % self.container_name)
self._container_config = ContainerConfig(container_configs[0])
# get volumes
self._volumes = self._get_volumes()
# get container volume mounts
self._volume_mounts = self._get_volume_mounts(self._volumes)
# get the host project directory
self._host_project_dir = self._get_host_project_dir(self._volume_mounts)
@abstractmethod
def _validate_instance_params(self, params: dict) -> dict:
"""Validates instance parameters and fill missing ones with the default values."""
raise NotImplementedError
@abstractmethod
def _get_instance_volumes(self) -> List[AbstractInstanceVolume]:
"""Returns specific to the provider volumes that should be mounted on the host OS."""
raise NotImplementedError
@property
def project_config(self) -> ProjectConfig:
return self._project_config
@property
def container_config(self) -> ContainerConfig:
return self._container_config
@property
@abstractmethod
def user(self) -> str:
raise NotImplementedError
@property
def name(self) -> str:
"""Name of the instance."""
return self._name
@property
def provider_name(self):
"""Provider name."""
return self._provider_name
@property
def container_name(self) -> str:
return self._params['containerName'] if self._params['containerName'] else DEFAULT_CONTAINER_NAME
@property
def full_container_name(self) -> str:
"""A container name that is used in the "docker run" command."""
return ('spotty-%s-%s-%s' % (self.project_config.project_name, self.name, self.container_name)).lower()
@property
def docker_data_root(self) -> str:
"""Data root directory for Docker daemon."""
return self._params['dockerDataRoot']
@property
def local_ssh_port(self) -> int:
"""Local SSH port to connect to the instance (in case of a tunnel)."""
return self._params['localSshPort']
@property
def commands(self) -> str:
"""Commands that should be run once an instance is started."""
return self._params['commands']
@property
def host_project_dir(self):
"""Project directory on the host OS."""
return self._host_project_dir
@property
def volumes(self) -> List[AbstractInstanceVolume]:
return self._volumes
@property
def volume_mounts(self) -> List[VolumeMount]:
return self._volume_mounts
@property
def dockerfile_path(self):
"""Dockerfile path on the host OS."""
dockerfile_path = self.container_config.file
if dockerfile_path:
dockerfile_path = self.host_project_dir + '/' + dockerfile_path
return dockerfile_path
@property
def docker_context_path(self):
"""Docker build's context path on the host OS."""
dockerfile_path = self.dockerfile_path
if not dockerfile_path:
return ''
return os.path.dirname(dockerfile_path)
@property
def host_container_dir(self):
"""A temporary directory on the host OS that contains container-related files and directories."""
return '%s/%s' % (CONTAINERS_TMP_DIR, self.full_container_name)
@property
def host_logs_dir(self):
"""A directory mainly for the "spotty run" command logs."""
return self.host_container_dir + '/logs'
@property
def host_volumes_dir(self):
"""A directory with temporary volumes. If there is a Volume Mount in the configuration file
that doesn't have a corresponding instance volume, a temporary directory will be created
and attached to the container.
"""
return self.host_container_dir + '/volumes'
def _get_volumes(self) -> List[AbstractInstanceVolume]:
"""Returns volumes that should be mounted on the host OS."""
volumes = self._get_instance_volumes()
# create temporary volumes for the volume mounts that don't have corresponding
# volumes in the instance configuration
instance_volume_names = set(volume.name for volume in volumes)
for container_volume in self.container_config.volume_mounts:
if container_volume['name'] not in instance_volume_names:
volumes.append(TmpDirVolume(volume_config={
'name': container_volume['name'],
'parameters': {'path': '%s/%s' % (self.host_volumes_dir, container_volume['name'])}
}))
return volumes
def _get_volume_mounts(self, volumes: List[AbstractInstanceVolume]) \
-> List[VolumeMount]:
"""Returns container volume mounts and a path to the project directory on the host OS."""
# get mount directories for the volumes
host_paths = OrderedDict([(volume.name, volume.host_path) for volume in volumes])
# get container volumes mapping
volume_mounts = []
for container_volume in self.container_config.volume_mounts:
volume_mounts.append(VolumeMount(
name=container_volume['name'],
host_path=host_paths[container_volume['name']],
mount_path=container_volume['mountPath'],
mode='rw',
hidden=False,
))
return volume_mounts
def _get_host_project_dir(self, volume_mounts: List[VolumeMount]) -> str:
"""Returns the host project directory."""
host_project_dir = None
for volume_mount in sorted(volume_mounts, key=lambda x: len(x.mount_path), reverse=True):
if is_subdir(self.container_config.project_dir, volume_mount.mount_path):
# the project directory is a subdirectory of a Volume Mount directory
project_subdir = os.path.relpath(self.container_config.project_dir, volume_mount.mount_path)
host_project_dir = os.path.normpath(volume_mount.host_path + '/' + project_subdir)
break
# this should not be the case as the volume mount for the project directory should be added automatically
# if it doesn't exist in the configuration
assert host_project_dir is not None, 'A volume mount that contains the project directory not found.'
return host_project_dir
|
11553832
|
import os
import sys
import time
import pdb
import gc
import numpy as np
import faiss
import argparse
import resource
import benchmark.datasets
from benchmark.datasets import DATASETS
from benchmark.plotting import eval_range_search
####################################################################
# Index building functions
####################################################################
def two_level_clustering(xt, nc1, nc2, clustering_niter=25, spherical=False):
d = xt.shape[1]
print(f"2-level clustering of {xt.shape} nb clusters = {nc1}*{nc2} = {nc1*nc2}")
print("perform coarse training")
km = faiss.Kmeans(
d, nc1, verbose=True, niter=clustering_niter,
max_points_per_centroid=2000,
spherical=spherical
)
km.train(xt)
print()
# coarse centroids
centroids1 = km.centroids
print("assigning the training set")
t0 = time.time()
_, assign1 = km.assign(xt)
bc = np.bincount(assign1, minlength=nc1)
print(f"done in {time.time() - t0:.2f} s. Sizes of clusters {min(bc)}-{max(bc)}")
o = assign1.argsort()
del km
# train sub-clusters
i0 = 0
c2 = []
t0 = time.time()
for c1 in range(nc1):
print(f"[{time.time() - t0:.2f} s] training sub-cluster {c1}/{nc1}\r", end="", flush=True)
i1 = i0 + bc[c1]
subset = o[i0:i1]
assert np.all(assign1[subset] == c1)
km = faiss.Kmeans(d, nc2, spherical=spherical)
xtsub = xt[subset]
km.train(xtsub)
c2.append(km.centroids)
i0 = i1
print(f"done in {time.time() - t0:.2f} s")
return np.vstack(c2)
def unwind_index_ivf(index):
if isinstance(index, faiss.IndexPreTransform):
assert index.chain.size() == 1
vt = faiss.downcast_VectorTransform(index.chain.at(0))
index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index))
assert vt2 is None
return index_ivf, vt
if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine):
return unwind_index_ivf(faiss.downcast_index(index.base_index))
if isinstance(index, faiss.IndexIVF):
return index, None
else:
return None, None
def build_index(args, ds):
nq, d = ds.nq, ds.d
nb, d = ds.nq, ds.d
if args.buildthreads == -1:
print("Build-time number of threads:", faiss.omp_get_max_threads())
else:
print("Set build-time number of threads:", args.buildthreads)
faiss.omp_set_num_threads(args.buildthreads)
metric_type = (
faiss.METRIC_L2 if ds.distance() == "euclidean" else
faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else
1/0
)
print("metric type", metric_type)
index = faiss.index_factory(d, args.indexkey, metric_type)
index_ivf, vec_transform = unwind_index_ivf(index)
if vec_transform is None:
vec_transform = lambda x: x
else:
vec_transform = faiss.downcast_VectorTransform(vec_transform)
if args.by_residual != -1:
by_residual = args.by_residual == 1
print("setting by_residual = ", by_residual)
index_ivf.by_residual # check if field exists
index_ivf.by_residual = by_residual
if index_ivf:
print("Update add-time parameters")
# adjust default parameters used at add time for quantizers
# because otherwise the assignment is inaccurate
quantizer = faiss.downcast_index(index_ivf.quantizer)
if isinstance(quantizer, faiss.IndexRefine):
print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ")
quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64
print(quantizer.k_factor)
base_index = faiss.downcast_index(quantizer.base_index)
if isinstance(base_index, faiss.IndexIVF):
print(" update quantizer nprobe=", base_index.nprobe, end=" -> ")
base_index.nprobe = (
16 if base_index.nlist < 1e5 else
32 if base_index.nlist < 4e6 else
64)
print(base_index.nprobe)
elif isinstance(quantizer, faiss.IndexHNSW):
print(" update quantizer efSearch=", quantizer.hnsw.efSearch, end=" -> ")
if args.quantizer_add_efSearch > 0:
quantizer.hnsw.efSearch = args.quantizer_add_efSearch
else:
quantizer.hnsw.efSearch = 40 if index_ivf.nlist < 4e6 else 64
print(quantizer.hnsw.efSearch)
if args.quantizer_efConstruction != -1:
print(" update quantizer efConstruction=", quantizer.hnsw.efConstruction, end=" -> ")
quantizer.hnsw.efConstruction = args.quantizer_efConstruction
print(quantizer.hnsw.efConstruction)
index.verbose = True
if index_ivf:
index_ivf.verbose = True
index_ivf.quantizer.verbose = True
index_ivf.cp.verbose = True
maxtrain = args.maxtrain
if maxtrain == 0:
if 'IMI' in args.indexkey:
maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2))
elif index_ivf:
maxtrain = 50 * index_ivf.nlist
else:
# just guess...
maxtrain = 256 * 100
maxtrain = max(maxtrain, 256 * 100)
print("setting maxtrain to %d" % maxtrain)
# train on dataset
print(f"getting first {maxtrain} dataset vectors for training")
xt2 = next(ds.get_dataset_iterator(bs=maxtrain))
print("train, size", xt2.shape)
assert np.all(np.isfinite(xt2))
t0 = time.time()
if (isinstance(vec_transform, faiss.OPQMatrix) and
isinstance(index_ivf, faiss.IndexIVFPQFastScan)):
print(" Forcing OPQ training PQ to PQ4")
ref_pq = index_ivf.pq
training_pq = faiss.ProductQuantizer(
ref_pq.d, ref_pq.M, ref_pq.nbits
)
vec_transform.pq
vec_transform.pq = training_pq
if args.clustering_niter >= 0:
print(("setting nb of clustering iterations to %d" %
args.clustering_niter))
index_ivf.cp.niter = args.clustering_niter
train_index = None
if args.train_on_gpu:
print("add a training index on GPU")
train_index = faiss.index_cpu_to_all_gpus(
faiss.IndexFlatL2(index_ivf.d))
index_ivf.clustering_index = train_index
if args.two_level_clustering:
sqrt_nlist = int(np.sqrt(index_ivf.nlist))
assert sqrt_nlist ** 2 == index_ivf.nlist
centroids_trainset = xt2
if isinstance(vec_transform, faiss.VectorTransform):
print(" training vector transform")
vec_transform.train(xt2)
print(" transform trainset")
centroids_trainset = vec_transform.apply_py(centroids_trainset)
centroids = two_level_clustering(
centroids_trainset, sqrt_nlist, sqrt_nlist,
spherical=(metric_type == faiss.METRIC_INNER_PRODUCT)
)
if not index_ivf.quantizer.is_trained:
print(" training quantizer")
index_ivf.quantizer.train(centroids)
print(" add centroids to quantizer")
index_ivf.quantizer.add(centroids)
index.train(xt2)
print(" Total train time %.3f s" % (time.time() - t0))
if train_index is not None:
del train_index
index_ivf.clustering_index = None
gc.collect()
print("adding")
t0 = time.time()
if args.add_bs == -1:
index.add(sanitize(ds.get_database()))
else:
i0 = 0
nsplit = args.add_splits
for sno in range(nsplit):
print(f"============== SPLIT {sno}/{nsplit}")
for xblock in ds.get_dataset_iterator(bs=args.add_bs, split=(nsplit, sno)):
i1 = i0 + len(xblock)
print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % (
i0, i1, ds.nb, time.time() - t0,
faiss.get_mem_usage_kb()))
index.add(xblock)
i0 = i1
gc.collect()
if sno == args.stop_at_split:
print("stopping at split", sno)
break
print(" add in %.3f s" % (time.time() - t0))
if args.indexfile:
print("storing", args.indexfile)
faiss.write_index(index, args.indexfile)
return index
####################################################################
# Evaluation functions
####################################################################
def compute_inter(a, b):
nq, rank = a.shape
ninter = sum(
np.intersect1d(a[i, :rank], b[i, :rank]).size
for i in range(nq)
)
return ninter / a.size
def knn_search_batched(index, xq, k, bs):
D, I = [], []
for i0 in range(0, len(xq), bs):
Di, Ii = index.search(xq[i0:i0 + bs], k)
D.append(Di)
I.append(Ii)
return np.vstack(D), np.vstack(I)
def eval_setting_knn(index, xq, gt, k=0, inter=False, min_time=3.0, query_bs=-1):
nq = xq.shape[0]
gt_I, gt_D = gt
ivf_stats = faiss.cvar.indexIVF_stats
ivf_stats.reset()
nrun = 0
t0 = time.time()
while True:
if query_bs == -1:
D, I = index.search(xq, k)
else:
D, I = knn_search_batched(index, xq, k, query_bs)
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun)
if inter:
rank = k
inter_measure = compute_inter(gt_I[:, :rank], I[:, :rank])
print("%.4f" % inter_measure, end=' ')
else:
for rank in 1, 10, 100:
n_ok = (I[:, :rank] == gt_I[:, :1]).sum()
print("%.4f" % (n_ok / float(nq)), end=' ')
print(" %9.5f " % ms_per_query, end=' ')
if ivf_stats.search_time == 0:
# happens for IVFPQFastScan where the stats are not logged by default
print("%12d %5.2f " % (ivf_stats.ndis / nrun, 0.0), end=' ')
else:
pc_quantizer = ivf_stats.quantization_time / ivf_stats.search_time * 100
print("%12d %5.2f " % (ivf_stats.ndis / nrun, pc_quantizer), end=' ')
print(nrun)
def eval_setting_range(index, xq, gt, radius=0, inter=False, min_time=3.0, query_bs=-1):
nq = xq.shape[0]
gt_nres, gt_I, gt_D = gt
gt_lims = np.zeros(nq + 1, dtype=int)
gt_lims[1:] = np.cumsum(gt_nres)
ivf_stats = faiss.cvar.indexIVF_stats
ivf_stats.reset()
nrun = 0
t0 = time.time()
while True:
if query_bs == -1:
lims, D, I = index.range_search(xq, radius)
else:
raise NotImplemented
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun)
ap = eval_range_search.compute_AP((gt_lims, gt_I, gt_D), (lims, I, D))
print("%.4f" % ap, end=' ')
print(" %9.5f " % ms_per_query, end=' ')
print("%12d %5d " % (ivf_stats.ndis / nrun, D.size), end=' ')
print(nrun)
def result_header(ds, args):
# setup the Criterion object
if ds.search_type() == "range":
header = (
'%-40s AP time(ms/q) nb distances nb_res #runs' %
"parameters"
)
crit = None
elif args.inter:
print("Optimize for intersection @ ", args.k)
crit = faiss.IntersectionCriterion(ds.nq, args.k)
header = (
'%-40s inter@%3d time(ms/q) nb distances %%quantization #runs' %
("parameters", args.k)
)
else:
print("Optimize for 1-recall @ 1")
crit = faiss.OneRecallAtRCriterion(ds.nq, 1)
header = (
'%-40s R@1 R@10 R@100 time(ms/q) nb distances %%quantization #runs' %
"parameters"
)
return header, crit
def op_compute_bounds(ps, ops, cno):
# lower_bound_t = 0.0
# upper_bound_perf = 1.0
bounds = np.array([0, 1], dtype="float64")
sp = faiss.swig_ptr
for i in range(ops.all_pts.size()):
ps.update_bounds(cno, ops.all_pts.at(i), sp(bounds[1:2]), sp(bounds[0:1]))
# lower_bound_t, upper_bound_perf
return bounds[0], bounds[1]
def explore_parameter_space_range(index, xq, gt, ps, radius):
""" exploration of the parameter space for range search, using the
Average Precision as criterion
"""
n_experiments = ps.n_experiments
n_comb = ps.n_combinations()
min_time = ps.min_test_duration
verbose = ps.verbose
gt_nres, gt_I, gt_D = gt
gt_lims = np.zeros(len(gt_nres) + 1, dtype=int)
gt_lims[1:] = np.cumsum(gt_nres)
gt = (gt_lims, gt_I, gt_D)
ops = faiss.OperatingPoints()
def run_1_experiment(cno):
ps.set_index_parameters(index, cno)
nrun = 0
t0 = time.time()
while True:
lims, D, I = index.range_search(xq, radius)
nrun += 1
t1 = time.time()
if t1 - t0 > min_time:
break
t_search = (t1 - t0) / nrun
perf = eval_range_search.compute_AP(gt, (lims, I, D))
keep = ops.add(perf, t_search, ps.combination_name(cno), cno)
return len(D), perf, t_search, nrun, keep
if n_experiments == 0:
# means exhaustive run
for cno in range(n_comb):
nres, perf, t_search, nrun, keep = run_1_experiment(cno)
if verbose:
print(" %d/%d: %s nres=%d perf=%.3f t=%.3f s %s" % (
cno, n_comb,
ps.combination_name(cno),
nres, perf, t_search, "*" if keep else ""))
return ops
n_experiments = min(n_experiments, n_comb)
perm = np.zeros(n_experiments, int)
# make sure the slowest and fastest experiment are run
perm[0] = 0
perm[1] = n_comb - 1
rs = np.random.RandomState(1234)
perm[2:] = 1 + rs.choice(n_comb - 2, n_experiments - 2, replace=False)
for xp, cno in enumerate(perm):
cno = int(cno)
if verbose:
print(" %d/%d: cno=%d %s " % (
xp, n_experiments, cno, ps.combination_name(cno)),
end="", flush=True)
# check if we can skip this experiment
lower_bound_t, upper_bound_perf = op_compute_bounds(ps, ops, cno)
best_t = ops.t_for_perf(upper_bound_perf)
if verbose:
print("bounds [perf<=%.3f t>=%.3f] " % (
upper_bound_perf, lower_bound_t),
end="skip\n" if best_t <= lower_bound_t else " "
)
if best_t <= lower_bound_t:
continue
nres, perf, t_search, nrun, keep = run_1_experiment(cno)
if verbose:
print(" nres %d perf %.3f t %.3f (%d %s) %s" % (
nres, perf, t_search, nrun,
"runs" if nrun >= 2 else "run",
"*" if keep else ""))
return ops
####################################################################
# Driver functions
####################################################################
def run_experiments_searchparams(ds, index, args):
"""
Evaluate a predefined set of runtime parameters
"""
k = args.k
xq = ds.get_queries()
nq = len(xq)
ps = faiss.ParameterSpace()
ps.initialize(index)
header, _ = result_header(ds, args)
searchparams = args.searchparams
print(f"Running evaluation on {len(searchparams)} searchparams")
print(header)
maxw = max(max(len(p) for p in searchparams), 40)
for params in searchparams:
ps.set_index_parameters(index, params)
print(params.ljust(maxw), end=' ')
sys.stdout.flush()
if ds.search_type() == "knn":
eval_setting_knn(
index, xq, ds.get_groundtruth(k=args.k),
k=args.k,
inter=args.inter, min_time=args.min_test_duration,
query_bs=args.query_bs
)
else:
eval_setting_range(
index, xq, ds.get_groundtruth(k=args.k),
radius=args.radius,
inter=args.inter, min_time=args.min_test_duration,
query_bs=args.query_bs
)
def run_experiments_autotune(ds, index, args):
""" Explore the space of parameters and keep Pareto-optimal ones. """
k = args.k
xq = ds.get_queries()
nq = len(xq)
ps = faiss.ParameterSpace()
ps.initialize(index)
ps.n_experiments = args.n_autotune
ps.min_test_duration = args.min_test_duration
for kv in args.autotune_max:
k, vmax = kv.split(':')
vmax = float(vmax)
print("limiting %s to %g" % (k, vmax))
pr = ps.add_range(k)
values = faiss.vector_to_array(pr.values)
values = np.array([v for v in values if v < vmax])
faiss.copy_array_to_vector(values, pr.values)
for kv in args.autotune_range:
k, vals = kv.split(':')
vals = np.fromstring(vals, sep=',')
print("setting %s to %s" % (k, vals))
pr = ps.add_range(k)
faiss.copy_array_to_vector(vals, pr.values)
header, crit = result_header(ds, args)
# then we let Faiss find the optimal parameters by itself
print("exploring operating points, %d threads" % faiss.omp_get_max_threads());
ps.display()
t0 = time.time()
if ds.search_type() == "knn":
# by default, the criterion will request only 1 NN
crit.nnn = args.k
gt_I, gt_D = ds.get_groundtruth(k=args.k)
crit.set_groundtruth(None, gt_I.astype('int64'))
op = ps.explore(index, xq, crit)
elif ds.search_type() == "range":
op = explore_parameter_space_range(
index, xq, ds.get_groundtruth(), ps, args.radius
)
else:
assert False
print("Done in %.3f s, available OPs:" % (time.time() - t0))
op.display()
print("Re-running evaluation on selected OPs")
print(header)
opv = op.optimal_pts
maxw = max(max(len(opv.at(i).key) for i in range(opv.size())), 40)
for i in range(opv.size()):
opt = opv.at(i)
ps.set_index_parameters(index, opt.key)
print(opt.key.ljust(maxw), end=' ')
sys.stdout.flush()
if ds.search_type() == "knn":
eval_setting_knn(
index, xq, ds.get_groundtruth(k=args.k),
k=args.k,
inter=args.inter, min_time=args.min_test_duration
)
else:
eval_setting_range(
index, xq, ds.get_groundtruth(k=args.k),
radius=args.radius,
inter=args.inter, min_time=args.min_test_duration
)
class DatasetWrapInPairwiseQuantization:
def __init__(self, ds, C):
self.ds = ds
self.C = C
self.Cq = np.linalg.inv(C.T)
# xb_pw = np.ascontiguousarray((C @ xb.T).T)
# xq_pw = np.ascontiguousarray((Cq @ xq.T).T)
# copy fields
for name in "nb d nq dtype distance search_type get_groundtruth".split():
setattr(self, name, getattr(ds, name))
def get_dataset(self):
return self.ds.get_dataset() @ self.C.T
def get_queries(self):
return self.ds.get_queries() @ self.Cq.T
def get_dataset_iterator(self, bs=512, split=(1,0)):
for xb in self.ds.get_dataset_iterator(bs=bs, split=split):
yield xb @ self.C.T
####################################################################
# Main
####################################################################
def main():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('What to do')
aa('--build', default=False, action="store_true")
aa('--search', default=False, action="store_true")
aa('--prepare', default=False, action="store_true",
help="call prepare() to download the dataset before computing")
group = parser.add_argument_group('dataset options')
aa('--dataset', choices=DATASETS.keys(), required=True)
aa('--basedir', help="override basedir for dataset")
aa('--pairwise_quantization', default="",
help="load/store pairwise quantization matrix")
aa('--query_bs', default=-1, type=int,
help='perform queries in batches of this size')
group = parser.add_argument_group('index construction')
aa('--indexkey', default='HNSW32', help='index_factory type')
aa('--by_residual', default=-1, type=int,
help="set if index should use residuals (default=unchanged)")
aa('--M0', default=-1, type=int, help='size of base level')
aa('--maxtrain', default=0, type=int,
help='maximum number of training points (0 to set automatically)')
aa('--indexfile', default='', help='file to read or write index from')
aa('--add_bs', default=100000, type=int,
help='add elements index by batches of this size')
aa('--add_splits', default=1, type=int,
help="Do adds in this many splits (otherwise risk of OOM for large datasets)")
aa('--stop_at_split', default=-1, type=int,
help="stop at this split (for debugging)")
aa('--no_precomputed_tables', action='store_true', default=False,
help='disable precomputed tables (uses less memory)')
aa('--clustering_niter', default=-1, type=int,
help='number of clustering iterations (-1 = leave default)')
aa('--two_level_clustering', action="store_true", default=False,
help='perform a 2-level tree clustering')
aa('--train_on_gpu', default=False, action='store_true',
help='do training on GPU')
aa('--quantizer_efConstruction', default=-1, type=int,
help="override the efClustering of the quantizer")
aa('--quantizer_add_efSearch', default=-1, type=int,
help="override the efSearch of the quantizer at add time")
aa('--buildthreads', default=-1, type=int,
help='nb of threads to use at build time')
group = parser.add_argument_group('searching')
aa('--k', default=10, type=int, help='nb of nearest neighbors')
aa('--radius', default=96237, type=float, help='radius for range search')
aa('--inter', default=True, action='store_true',
help='use intersection measure instead of 1-recall as metric')
aa('--searchthreads', default=-1, type=int,
help='nb of threads to use at search time')
aa('--searchparams', nargs='+', default=['autotune'],
help="search parameters to use (can be autotune or a list of params)")
aa('--n_autotune', default=500, type=int,
help="max nb of autotune experiments")
aa('--autotune_max', default=[], nargs='*',
help='set max value for autotune variables format "var:val" (exclusive)')
aa('--autotune_range', default=[], nargs='*',
help='set complete autotune range, format "var:val1,val2,..."')
aa('--min_test_duration', default=3.0, type=float,
help='run test at least for so long to avoid jitter')
aa('--parallel_mode', default=-1, type=int,
help="set search-time parallel mode for IVF indexes")
group = parser.add_argument_group('computation options')
aa("--maxRAM", default=-1, type=int, help="set max RSS in GB (avoid OOM crash)")
args = parser.parse_args()
print("args=", args)
if args.basedir:
print("setting datasets basedir to", args.basedir)
benchmark.datasets.BASEDIR
benchmark.datasets.BASEDIR = args.basedir
if args.maxRAM > 0:
print("setting max RSS to", args.maxRAM, "GiB")
resource.setrlimit(
resource.RLIMIT_DATA, (args.maxRAM * 1024 ** 3, resource.RLIM_INFINITY)
)
os.system('echo -n "nb processors "; '
'cat /proc/cpuinfo | grep ^processor | wc -l; '
'cat /proc/cpuinfo | grep ^"model name" | tail -1')
ds = DATASETS[args.dataset]()
print(ds)
nq, d = ds.nq, ds.d
nb, d = ds.nq, ds.d
if args.prepare:
print("downloading dataset...")
ds.prepare()
print("dataset ready")
if not (args.build or args.search):
return
if args.pairwise_quantization:
if os.path.exists(args.pairwise_quantization):
print("loading pairwise quantization matrix", args.pairwise_quantization)
C = np.load(args.pairwise_quantization)
else:
print("training pairwise quantization")
xq_train = ds.get_query_train()
G = xq_train.T @ xq_train
C = np.linalg.cholesky(G).T
print("store matrix in", args.pairwise_quantization)
np.save(args.pairwise_quantization, C)
# Cq = np.linalg.inv(C.T)
# xb_pw = np.ascontiguousarray((C @ xb.T).T)
# xq_pw = np.ascontiguousarray((Cq @ xq.T).T)
ds = DatasetWrapInPairwiseQuantization(ds, C)
if args.build:
print("build index, key=", args.indexkey)
index = build_index(args, ds)
else:
print("reading", args.indexfile)
index = faiss.read_index(args.indexfile)
index_ivf, vec_transform = unwind_index_ivf(index)
if vec_transform is None:
vec_transform = lambda x: x
if index_ivf is not None:
print("imbalance_factor=", index_ivf.invlists.imbalance_factor())
if args.no_precomputed_tables:
if isinstance(index_ivf, faiss.IndexIVFPQ):
print("disabling precomputed table")
index_ivf.use_precomputed_table = -1
index_ivf.precomputed_table.clear()
if args.indexfile:
print("index size on disk: ", os.stat(args.indexfile).st_size)
print("current RSS:", faiss.get_mem_usage_kb() * 1024)
precomputed_table_size = 0
if hasattr(index_ivf, 'precomputed_table'):
precomputed_table_size = index_ivf.precomputed_table.size() * 4
print("precomputed tables size:", precomputed_table_size)
if args.search:
if args.searchthreads == -1:
print("Search threads:", faiss.omp_get_max_threads())
else:
print("Setting nb of threads to", args.searchthreads)
faiss.omp_set_num_threads(args.searchthreads)
if args.parallel_mode != -1:
print("setting IVF parallel mode to", args.parallel_mode)
index_ivf.parallel_mode
index_ivf.parallel_mode = args.parallel_mode
if args.searchparams == ["autotune"]:
run_experiments_autotune(ds, index, args)
else:
run_experiments_searchparams(ds, index, args)
if __name__ == "__main__":
main()
|
11553848
|
from ...utilities import db, moocdb_utils
from common import *
from datetime import datetime
def GetForumPosts(vars):
output_items = []
post_ctid = moocdb_utils.GetCollaborationTypeMap(vars)['forum_post']
comment_ctid = moocdb_utils.GetCollaborationTypeMap(vars)['forum_comment']
forum_num_posts = 0
posts_num_children = {}
fc_coll = vars['cons']['forum_contents']['forum_contents']
fu_coll = vars['cons']['forum_users']['forum_users']
posts = fc_coll.find({"course_id": vars['source']['course_id']})
for p in posts:
ctid = post_ctid if p["_type"] == "CommentThread" else comment_ctid
parent_id = None
if 'parent_ids' in p.keys() and len(p['parent_ids']) >0: parent_id = str(p['parent_ids'][0])
elif p['_type'] == 'Comment': parent_id = str(p['comment_thread_id'])
if parent_id not in posts_num_children.keys(): posts_num_children[parent_id] = 0
posts_num_children[parent_id] += 1
r = fu_coll.find({'_id': p['author_id']}).limit(1)
if r.count() == 0: continue # Ex: user was somehow deleted from the forum_users database
email = r[0]['email'].lower()
if email not in vars['id_maps']['users'].keys(): continue # No user with an openedx email matching the forum_users email was found
output_items.append({
'original_id': str(p['_id']),
'resource_original_id': None,
'collaboration_type_id': ctid,
'collaboration_parent_original_id': parent_id,
'collaboration_child_number': posts_num_children[parent_id],
'user_original_id': email,
'collaboration_content': p['body'].encode('utf-8'),
'collaboration_timestamp': p['created_at'],
})
return output_items
|
11553856
|
import os
import sys
import pickle
import argparse
import numpy as np
import time
from sklearn.metrics import balanced_accuracy_score, mean_squared_error
sys.path.append(os.getcwd())
from tpot import TPOTClassifier, TPOTRegressor
from mindware.datasets.utils import load_data, load_train_test_data
from mindware.components.utils.constants import MULTICLASS_CLS, REGRESSION
parser = argparse.ArgumentParser()
dataset_set = 'diabetes,spectf,credit,ionosphere,lymphography,pc4,' \
'messidor_features,winequality_red,winequality_white,splice,spambase,amazon_employee'
parser.add_argument('--datasets', type=str, default='diabetes')
parser.add_argument('--rep_num', type=int, default=5)
parser.add_argument('--start_id', type=int, default=0)
parser.add_argument('--time_cost', type=int, default=600)
parser.add_argument('--n_job', type=int, default=1)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--task_type', type=str, default='cls', choices=['cls', 'rgs'])
parser.add_argument('--space_type', type=str, default='large', choices=['large', 'small', 'extremely_small'])
max_eval_time = 5
save_dir = './data/exp_sys/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def evaluate_tpot(dataset, task_type, run_id, time_limit, seed=1, use_fe=True):
n_job = args.n_job
# Construct the ML model.
if not use_fe:
from mindware.utils.tpot_config import classifier_config_dict
config = classifier_config_dict
_task_type = MULTICLASS_CLS if task_type == 'cls' else REGRESSION
if task_type == 'cls':
if space_type == 'large':
from tpot.config.classifier import classifier_config_dict
elif space_type == 'small':
from tpot.config.classifier_small import classifier_config_dict
else:
from tpot.config.classifier_extremely_small import classifier_config_dict
config_dict = classifier_config_dict
else:
if space_type == 'large':
from tpot.config.regressor import regressor_config_dict
elif space_type == 'small':
from tpot.config.regressor_small import regressor_config_dict
else:
from tpot.config.regressor_extremely_small import regressor_config_dict
config_dict = regressor_config_dict
if task_type == 'cls':
automl = TPOTClassifier(config_dict=config_dict, generations=10000, population_size=20,
verbosity=2, n_jobs=n_job, cv=0.2,
scoring='balanced_accuracy',
max_eval_time_mins=max_eval_time,
max_time_mins=int(time_limit / 60),
random_state=seed)
raw_data, test_raw_data = load_train_test_data(dataset, task_type=_task_type)
X_train, y_train = raw_data.data
X_test, y_test = test_raw_data.data
X_train, y_train = X_train.astype('float64'), y_train.astype('int')
X_test, y_test = X_test.astype('float64'), y_test.astype('int')
else:
automl = TPOTRegressor(config_dict=config_dict, generations=10000, population_size=20,
verbosity=2, n_jobs=n_job, cv=0.2,
scoring='neg_mean_squared_error',
max_eval_time_mins=max_eval_time,
max_time_mins=int(time_limit / 60),
random_state=seed)
raw_data, test_raw_data = load_train_test_data(dataset, task_type=_task_type)
X_train, y_train = raw_data.data
X_test, y_test = test_raw_data.data
X_train, y_train = X_train.astype('float64'), y_train.astype('float64')
X_test, y_test = X_test.astype('float64'), y_test.astype('float64')
start_time = time.time()
automl.fit(X_train, y_train)
y_hat = automl.predict(X_test)
pareto_front = automl._pareto_front
if task_type == 'cls':
score_func = balanced_accuracy_score
else:
score_func = mean_squared_error
valid_score = max([pareto_front.keys[x].wvalues[1] for x in range(len(pareto_front.keys))])
test_score = score_func(y_test, y_hat)
print('Run ID : %d' % run_id)
print('Dataset : %s' % dataset)
print('Val/Test score : %f - %f' % (valid_score, test_score))
scores = automl.scores
times = automl.times
_space_type = '%s_' % space_type if space_type != 'large' else ''
save_path = save_dir + '%s%s_tpot_%s_false_%d_1_%d.pkl' % (_space_type, task_type, dataset, time_limit, run_id)
with open(save_path, 'wb') as f:
pickle.dump([dataset, valid_score, test_score, times, scores, start_time], f)
if __name__ == "__main__":
args = parser.parse_args()
dataset_str = args.datasets
time_limit = args.time_cost
start_id = args.start_id
rep = args.rep_num
task_type = args.task_type
space_type = args.space_type
np.random.seed(args.seed)
seeds = np.random.randint(low=1, high=10000, size=start_id + args.rep_num)
dataset_list = list()
if dataset_str == 'all':
dataset_list = dataset_set
else:
dataset_list = dataset_str.split(',')
for dataset in dataset_list:
for run_id in range(start_id, start_id + rep):
seed = int(seeds[run_id])
evaluate_tpot(dataset, task_type, run_id, time_limit, seed)
|
11553875
|
from os.path import abspath
from os.path import dirname
import numpy as np
def generate_train_targets():
"""Used to generate the targets for each training set.
Note: The target datasets are already pre-generated in each folder as train_target.csv
"""
folders = ["phm", "cmapss_1", "cmapss_2", "cmapss_3", "cmapss_4"]
module_path = dirname(abspath(__file__))
for folder in folders:
dataset = np.loadtxt(module_path + "/../" + folder + "/train.csv")
target = []
for unit in xrange(1, int(dataset[dataset.shape[0] - 1, 0])+1):
target += (dataset[np.where(dataset[:, 0] == unit)[0]][::-1, 1] - 1).tolist()
np.savetxt(module_path + "/../" + folder + "/train_target.csv", target)
|
11553877
|
import oss2
from aliyunsdkcore.client import AcsClient
import ci.util
def _credentials(alicloud_cfg: str):
if isinstance(alicloud_cfg, str):
cfg_factory = ci.util.ctx().cfg_factory()
alicloud_cfg = cfg_factory.alicloud(alicloud_cfg)
return alicloud_cfg
def oss_auth(alicloud_cfg: str):
cred = _credentials(alicloud_cfg)
return oss2.Auth(cred.access_key_id(), cred.access_key_secret())
def acs_client(alicloud_cfg: str):
cred = _credentials(alicloud_cfg)
return AcsClient(
ak=cred.access_key_id(),
secret=cred.access_key_secret(),
region_id=cred.region()
)
|
11553948
|
import unittest
import const
import json
import jwsmodify_mitmproxy_addon as jma
class TestJWSModifyMethods(unittest.TestCase):
def test_can_find_raw_jws(self):
self.assertTrue(jma.extract_jws_payload(const.RAW_JWS) is not None)
def test_can_find_jws_in_json(self):
my_obj = {
'jws': str(const.RAW_JWS, 'utf-8'),
'foo': 'bar',
'baz': 2,
}
my_obj_str = json.dumps(my_obj)
print(my_obj_str)
self.assertTrue(jma.extract_jws_payload(my_obj_str) is not None)
if __name__ == '__main__':
unittest.main()
|
11553958
|
import tvm
from tvm import te, arith
from tensorizer.intrinsics import INTRINSICS
import numpy as np
n, c, h, w = 1, 192, 18, 18
kh, kw, ic, ko = 3, 3, c, 192
a = te.placeholder((n, c // 16, h, w, 16), 'float16')
b = te.placeholder((ko // 16, ic // 16, kh, kw, 16, 16), 'float16')
rc = te.reduce_axis((0, c), 'rc')
rh = te.reduce_axis((0, kh), 'rh')
rw = te.reduce_axis((0, kw), 'rw')
conv = te.compute((n, ko // 16, h - kh + 1, w - kw + 1, 16),
lambda batch, o_chunk, x, y, ob:
te.sum(a[batch, rc // 16, x + rh, y + rw, rc % 16].astype('float32') *
b[o_chunk, rc // 16, rh, rw, rc % 16, ob].astype('float32'), axis=[rc, rh, rw]))
from tensorizer.intrinsics.pattern import mm_tensorcore
sch = tvm.te.create_schedule(conv.op)
info = list(arith._ffi_api.MatchTensorizer(conv.op, mm_tensorcore()))
print(info)
#assert info
#print(info)
def schedule_fetcher(sch, buffer, y, x):
axes = list(sch[buffer].op.axis)
fused = sch[buffer].fuse(*axes[:-1])
yo, yi = sch[buffer].split(fused, nparts=y)
yio, yii = sch[buffer].split(yi, nparts=x)
sch[buffer].bind(yo, te.thread_axis('threadIdx.y'))
sch[buffer].bind(yio, te.thread_axis('threadIdx.x'))
xo, xi = sch[buffer].split(axes[-1], 8)
sch[buffer].vectorize(xi)
rc = sch[conv].op.reduce_axis[0]
rco, rci = sch[conv].split(rc, 64)
rcio, rcii = sch[conv].split(rci, 16)
rf = sch.rfactor(conv, rcio)
cc = sch.cache_write(rf, 'wmma.accumulator')
batch, oc, x, y, ob = list(sch[conv].op.axis)
xy = sch[conv].fuse(x, y)
oco, oci = sch[conv].split(oc, 2)
xyo, xyi = sch[conv].split(xy, 32)
obo, obi = sch[conv].split(ob, 4)
sch[conv].bind(obo, te.thread_axis('threadIdx.y'))
sch[conv].bind(xyi, te.thread_axis('threadIdx.x'))
sch[conv].vectorize(obi)
sch[conv].reorder(batch, oco, xyo, oci, xyi)
sch[conv].bind(oco, te.thread_axis('blockIdx.y'))
sch[conv].bind(xyo, te.thread_axis('blockIdx.x'))
sch[rf].compute_at(sch[conv], xyo)
rco, batch, oc, x, y, ob = list(sch[rf].op.axis)
xy = sch[rf].fuse(x, y)
xyo, xyi = sch[rf].split(xy, 32)
oo, oi = sch[rf].split(ob, 16)
xyio, xyii = sch[rf].split(xyi, 16)
oio, oii = sch[rf].split(oi, 16)
oco, oci = sch[rf].split(oc, 2)
sch[rf].reorder(batch, xyo, oco, rco, oo, xyio, oci, oio, xyii, oii)
sch[rf].pragma(xyio, 'tensorize', 'tensorcore.store_c')
sch[rf].bind(rco, te.thread_axis('threadIdx.y'))
sch[cc].compute_at(sch[rf], rco)
cri, cb, coc, cx, cy, cob = sch[cc].op.axis
cxy = sch[cc].fuse(cx, cy)
crh, crw, crco, crci = sch[cc].op.reduce_axis
cxyo, cxyi = sch[cc].split(cxy, 16)
crcio, crcii = sch[cc].split(crci, 16)
#print(cb, crh, crw, crco, coc, cx, cyo, cyi, cob, crci, sep='\n')
sch[cc].reorder(cb, crco, crcio, crh, crw, cxyo, coc, cxyi, cob, crcii)
sch[cc].pragma(cxyo, 'tensorize', 'tensorcore')
print(tvm.lower(sch, [a, b, conv], simple_mode=True))
a_reuse = sch.cache_read(a, 'shared', [cc])
sch[a_reuse].compute_at(sch[cc], crcio)
schedule_fetcher(sch, a_reuse, 4, 32)
a_shared = sch.cache_read(a_reuse, 'shared', [cc])
sch[a_shared].compute_at(sch[cc], crw)
schedule_fetcher(sch, a_shared, 4, 32)
aa = sch.cache_read(a_shared, 'wmma.matrix_a', [cc])
#aa = sch.cache_read(a, 'wmma.matrix_a', [cc])
sch[aa].compute_at(sch[cc], crw)
a23 = sch[aa].fuse(sch[aa].op.axis[2], sch[aa].op.axis[3])
a23o, a23i = sch[aa].split(a23, 16)
sch[aa].pragma(a23o, 'tensorize', 'tensorcore.load_a')
bb = sch.cache_read(b, 'wmma.matrix_b', [cc])
sch[bb].compute_at(sch[cc], crw)
sch[bb].pragma(sch[bb].op.axis[0], 'tensorize', 'tensorcore.load_b')
def tracer(module, info, is_before):
import time
global timing
if bool(is_before):
timing = time.time()
else:
print('Executes: ', info.name, (time.time() - timing) * 1000)
np_a = np.random.randn(n, c // 16, h, w, 16).astype('float16')
np_b = np.random.randn(ko // 16, ic // 16, kh, kw, 16, 16).astype('float16')
#np_a = (np.arange(n * (c // 16) * h * w * 16) % 7).astype('float16')
#np_b = (np.arange((ko // 16) * kh * kw * ic * 16) % 7).astype('float16')
#np_a.shape = (n, c // 16, h, w, 16)
#np_b.shape = (ko // 16, kh, kw, ic, 16)
np_c = np.random.randn(n, ko // 16, h - kh + 1, w - kw + 1, 16).astype('float32')
nd_a = tvm.nd.array(np_a, tvm.gpu())
nd_b = tvm.nd.array(np_b, tvm.gpu())
nd_c = tvm.nd.array(np_c, tvm.gpu())
import tensorizer
with tvm.transform.PassContext(opt_level=4, config={'tir.add_lower_pass': [(1, tensorizer.rewrite)]}):
#with tvm.transform.PassContext(opt_level=4):
ir = tvm.lower(sch, [a, b, conv])
print(ir)
module = tvm.build(sch, [a, b, conv], 'nvptx')
fte = module.time_evaluator(module.entry_name, ctx=tvm.gpu(), number=1, repeat=10)
res = fte(nd_a, nd_b, nd_c).results
print('exec: ', np.mean(res) * 1e6)
import functools, operator
elem_c = functools.reduce(operator.mul, np_c.shape, 1)
coef_b = functools.reduce(operator.mul, [ic, kh, kw], 1)
print(elem_c * coef_b / np.mean(res) / 1e9)
vanilla = tvm.te.create_schedule(conv.op)
print(*vanilla[conv].op.reduce_axis, sep='\n')
vb, vc, vx, vy, vob = vanilla[conv].op.axis
vrc, vrh, vrw = vanilla[conv].op.reduce_axis
vxo, vxi = vanilla[conv].split(vx, 32)
vyo, vyi = vanilla[conv].split(vy, 4)
fusion = vanilla[conv].fuse(vb, vc, vxo)
vanilla[conv].reorder(fusion, vxi, vyo, vrc, vrh, vrw, vyi, vob)
vanilla[conv].unroll(vyi)
vanilla[conv].vectorize(vob)
vanilla[conv].parallel(fusion)
#print(tvm.lower(vanilla, [a, b, conv], simple_mode=True))
vanilla = tvm.build(vanilla, [a, b, conv])
cpu_a = tvm.nd.array(np_a, tvm.cpu())
cpu_b = tvm.nd.array(np_b, tvm.cpu())
cpu_c = tvm.nd.array(np_c, tvm.cpu())
vanilla(cpu_a, cpu_b, cpu_c)
#res = cpu_c.asnumpy()
#ref = nd_c.asnumpy()
#for ax0 in range(n):
# for ax1 in range(ko // 16):
# for ax2 in range(h - kh + 1):
# for ax3 in range(w - kw + 1):
# for ax4 in range(16):
# assert abs(res[ax0, ax1, ax2, ax3, ax4] - ref[ax0, ax1, ax2, ax3, ax4]) < 1e-3, \
# (ax0, ax1, ax2, ax3, ax4, res[ax0, ax1, ax2, ax3, ax4], ref[ax0, ax1, ax2, ax3, ax4])
np.testing.assert_allclose(cpu_c.asnumpy(), nd_c.asnumpy(), atol=1e-3, rtol=1e-3)
print('correctness yes!')
|
11553998
|
from pybuilder.core import init, use_plugin
@init
def initialize(project):
project.set_property("run_unit_tests_propagate_stdout", True)
project.set_property("run_unit_tests_propagate_stderr", True)
project.set_property("verbose", True)
use_plugin("exec")
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
default_task = ["clean"]
|
11554008
|
from rest_framework.settings import api_settings
from rest_framework.throttling import (
UserRateThrottle as DefaultUserRateThrottle)
class UserRateThrottle(DefaultUserRateThrottle):
"""
Subclass of the original UserRateThrottle which applies settings
at init time, rather than class definition time. This allows settings
overrides to work during tests.
"""
def __init__(self):
self.THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES
super().__init__()
# The following classes allow us to define multiple throttle rates.
class BurstRateThrottle(UserRateThrottle):
scope = 'burst'
class SustainedRateThrottle(UserRateThrottle):
scope = 'sustained'
|
11554046
|
import socket
import inspect
import contextlib
from unittest.mock import patch
from dffml.util.testing.source import SourceTest
from dffml.util.asynctestcase import AsyncTestCase
from dffml_source_mysql.source import MySQLSourceConfig, MySQLSource
from dffml_source_mysql.util.mysql_docker import mysql, DOCKER_ENV
class TestMySQLSource(AsyncTestCase, SourceTest):
SQL_SETUP = """
CREATE TABLE IF NOT EXISTS `record_data` (
`key` varchar(100) NOT NULL,
`PetalLength` float DEFAULT NULL,
`PetalWidth` float DEFAULT NULL,
`SepalLength` float DEFAULT NULL,
`SepalWidth` float DEFAULT NULL,
`flower_type` varchar(100) DEFAULT NULL,
`flower_confidence` float DEFAULT NULL,
PRIMARY KEY (`key`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._exit_stack = contextlib.ExitStack()
cls.exit_stack = cls._exit_stack.__enter__()
cls.container_ip, cls.ca = cls.exit_stack.enter_context(mysql())
cls.source_config = MySQLSourceConfig(
host="mysql.unittest",
port=3306,
user=DOCKER_ENV["MYSQL_USER"],
password=DOCKER_ENV["MYSQL_PASSWORD"],
db=DOCKER_ENV["MYSQL_DATABASE"],
key="key",
features={
k: k
for k in [
"PetalLength",
"PetalWidth",
"SepalLength",
"SepalWidth",
]
},
predictions={"target_name": ("flower_type", "flower_confidence")},
ca=cls.ca,
init=cls.SQL_SETUP,
record="SELECT * FROM record_data WHERE `key`=%s",
records="SELECT * FROM record_data",
update=inspect.cleandoc(
"""
INSERT INTO record_data
(
`key`,
`PetalLength`,
`PetalWidth`,
`SepalLength`,
`SepalWidth`,
`flower_type`,
`flower_confidence`
)
VALUES (%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE
`PetalLength`=%s,
`PetalWidth`=%s,
`SepalLength`=%s,
`SepalWidth`=%s,
`flower_type`=%s,
`flower_confidence`=%s
"""
),
)
# Make it so that when the client tries to connect to mysql.unittest the
# address it gets back is the one for the container
cls.exit_stack.enter_context(
patch(
"socket.getaddrinfo",
return_value=[
(
socket.AF_INET,
socket.SOCK_STREAM,
6,
"",
(cls.container_ip, 3306),
)
],
)
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._exit_stack.__exit__(None, None, None)
async def setUpSource(self):
return MySQLSource(self.source_config)
|
11554069
|
from os import environ
INTEGRATION = 'INTEGRATION'
UNIT = 'UNIT'
MODE = environ.get('TEST_MODE', UNIT)
|
11554101
|
import matplotlib.pyplot as plt
from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable
from brancher.transformations import truncate_model
from brancher.visualizations import plot_density
# Normal model
mu = NormalVariable(0., 1., "mu")
x = NormalVariable(mu, 0.1, "x")
model = ProbabilisticModel([x])
# decision rule
model_statistics = lambda dic: dic[x].data
truncation_rule = lambda a: ((a > 0.5) & (a < 0.6)) | ((a > -0.6) & (a < -0.5))
# Truncated model
truncated_model = truncate_model(model, truncation_rule, model_statistics)
plot_density(truncated_model, variables=["mu", "x"], number_samples=10000)
plt.show()
|
11554130
|
import argparse
import fasttext
import json
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--documents",
type=str,
default="../training_data/training-data.json",
help="path to documents",
)
parser.add_argument(
"-train", "--train_document", type=str, default="./train.txt", help="path to train document"
)
parser.add_argument(
"-test", "--test_document", type=str, default="./test.txt", help="path to test document"
)
args = parser.parse_args()
def cos_sim(v1, v2):
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
# load train data
issues_open = open(args.documents, "r")
issues_load = json.load(issues_open)
# load test data
with open(args.test_document) as f:
test = f.readlines()
# add label
terms = ""
label_num = 0
for issue in issues_load:
if issue["html_url"].split("/")[-2] == "issues":
terms += f"__label__{issue['number']} {issue['body']}\n"
label_num += 1
with open(args.train_document, "w") as f:
f.write(terms)
# train model
with open(args.train_document) as f:
trains = f.readlines()
model = fasttext.train_supervised(input=args.train_document)
# test model
test_word_vector = np.mean([model[x] for word in test for x in word.split()], axis=0)
results = []
for train in trains:
result = {}
train_word_vector = np.mean(
[model[x] for word in train.split()[1:] for x in word.split()], axis=0
)
prob = cos_sim(train_word_vector, test_word_vector)
result["probability"] = prob
result["label"] = train.split()[0]
results.append(result)
results = sorted(results, key=lambda x: x["probability"], reverse=True)
suggestions = []
for result in results:
for issue in issues_load:
if issue["number"] == int(result["label"].split("__")[-1]):
suggestion = {}
suggestion["html_url"] = issue["html_url"]
suggestion["title"] = issue["title"]
suggestion["number"] = int(issue["html_url"].split("/")[-1])
suggestion["probability"] = float(result["probability"])
suggestions.append(suggestion)
suggestions = json.dumps(suggestions, indent=4)
with open("suggestions.json", "w") as f:
f.write(suggestions)
print(suggestions)
|
11554132
|
from .assertion import Assertion
from .model import Model
from .policy import Policy
from .function import FunctionMap
|
11554150
|
from .annotation import Annotation
from .shortcut_helper.shortcut_helper import Shortcut, ShortcutHelper
from .utils import AdditionalOutputElement
__all__ = ["Annotation", "AdditionalOutputElement", "Shortcut", "ShortcutHelper"]
|
11554173
|
from drf_triad_permissions.permissions import get_triad_permission
class Policy:
allow = None
deny = None
@classmethod
def expand(cls, default_resource=None):
return [get_triad_permission(allow=cls.allow, deny=cls.deny, default_resource=default_resource)]
class BasicPolicy(Policy):
allow = [
"{resource}::all::{action}",
"{resource}::new::create",
"{resource}::id:{obj.id}::{action}",
]
|
11554178
|
from seamless.core import context, cell, StructuredCell
ctx = context(toplevel=True)
ctx.data = cell("mixed")
ctx.sc = StructuredCell(
data=ctx.data
)
data = ctx.sc.handle
data.set(20)
ctx.compute()
print(data.data, ctx.data.value)
data.set(data + 1)
ctx.compute()
print(data.data, ctx.data.value)
print(type(data))
data += 1
print(type(data))
ctx.compute()
print(data.data, ctx.data.value)
|
11554238
|
from django.conf import settings
from django.utils.http import urlencode
def keycloak_logout(request):
logout_url = settings.OIDC_OP_LOGOUT_ENDPOINT
return_to_url = request.build_absolute_uri(settings.LOGOUT_REDIRECT_URL)
return logout_url + '?' + urlencode({'redirect_uri': return_to_url, 'client_id': settings.OIDC_RP_CLIENT_ID})
|
11554243
|
import torch
import torch.optim as O
import torch.nn as nn
from tasks.snli.third_party import datasets
from tasks.snli.third_party import models
import datetime
from prettytable import PrettyTable
from tasks.snli.third_party.utils import *
from pdb import set_trace
class Evaluate():
def __init__(self, args=None, with_predefined=True):
print("program execution start: {}".format(datetime.datetime.now()))
if args is not None:
self.args = args
else:
self.args = parse_args()
self.device = get_device(self.args.gpu)
self.logger = get_logger(self.args, "evaluate")
self.dataset_options = {
'batch_size': self.args.batch_size,
'device': self.device
}
self.dataset = datasets.__dict__[self.args.dataset](self.dataset_options)
self.with_predefined = with_predefined
if self.with_predefined:
self.validation_accuracy, self.model_options, model_dict = self.load_model()
self.model = models.__dict__[self.args.model](self.model_options)
self.model.to(self.device)
self.model.load_state_dict(model_dict)
else:
self.model = None
self.model_options = None
self.validation_accuracy = None
self.criterion = nn.CrossEntropyLoss(reduction = 'sum')
self.test_accuracy = -1
print("resource preparation done: {}".format(datetime.datetime.now()))
def load_model(self):
xxx
model = torch.load('{}/{}/{}/best-{}-{}-params.pt'.format(self.args.results_dir, self.args.model, self.args.dataset, self.args.model, self.args.dataset))
return model['accuracy'], model['options'], model['model_dict']
def print_confusion_matrix(self, labels, confusion_matrix):
table = PrettyTable()
table.field_names = ["confusion-matrix"] + ["{}-pred".format(label) for label in labels] + ["total"]
row_label = ["{}-actual".format(label) for label in labels] + ["total"]
row_sum = confusion_matrix.sum(dim=1).view(len(labels), 1)
confusion_matrix = torch.cat((confusion_matrix, row_sum), dim=1)
col_sum = confusion_matrix.sum(dim=0).view(1, len(labels)+1)
confusion_matrix = torch.cat((confusion_matrix, col_sum), dim=0)
confusion_matrix = confusion_matrix.tolist()
for i, row in enumerate(confusion_matrix):
table.add_row([row_label[i]] + [int(count) for count in row] )
print(table)
self.logger.info(table)
def label_wise_accuracy(self, lable_map, confusion_matrix):
table = PrettyTable()
table.field_names = ["label", "accuracy"]
for label, value in lable_map.items():
acc = round((100. * confusion_matrix[value][value]/confusion_matrix[value].sum()).item(), 3)
table.add_row([label, acc])
print(table)
self.logger.info(table)
def evaluate(self):
assert self.model is not None
self.model.eval(); self.dataset.test_iter.init_epoch()
n_correct, n_total, n_loss = 0, 0, 0
labels = self.dataset.labels().copy()
confusion_matrix = torch.zeros(len(labels), len(labels))
with torch.no_grad():
for batch_idx, batch in enumerate(self.dataset.test_iter):
answer = self.model(batch)
loss = self.criterion(answer, batch.label)
pred = torch.max(answer, 1)[1].view(batch.label.size())
for t, p in zip(batch.label, pred):
confusion_matrix[t.long()][p.long()] += 1
n_correct += (pred == batch.label).sum().item()
n_total += batch.batch_size
n_loss += loss.item()
test_loss = n_loss/n_total
test_acc = 100. * n_correct/n_total
return test_loss, test_acc, confusion_matrix
def execute(self):
_, test_acc, confusion_matrix = self.evaluate()
table = PrettyTable()
table.field_names = ["data", "accuracy"]
if self.validation_accuracy is not None:
table.add_row(["validation", round(self.validation_accuracy, 3)])
table.add_row(["test", round(test_acc, 3)])
print(table)
self.logger.info(table)
lable_map = self.dataset.labels()
self.label_wise_accuracy(lable_map, confusion_matrix)
self.print_confusion_matrix(lable_map.keys(), confusion_matrix)
return test_acc
#task = Evaluate()
#task.execute()
|
11554370
|
import gspread
import yaml
class GSheets
def PULL_FROM_GSHEETS(count):
with open('Config/Cloud_Config.yaml', 'r') as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
Sheet = doc["GCloud"]["Sheet"]
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
secrets ='Keys/GSheet_Keys/client_secret.json'
creds = ServiceAccountCredentials.from_json_keyfile_name(secrets, scope)
client = gspread.authorize(creds)
sheet = client.open(str(Sheet)).sheet1
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
go = json.dumps(list_of_hashes, sort_keys=True, indent=1)
pprint(go)
return go
def PUSH_TOO_GSHEETS():
print("test")
|
11554396
|
import contextlib
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (FairseqMultiModel, register_model,
register_model_architecture)
from fairseq.models.nat.nonautoregressive_transformer import NATransformerModel
from fairseq.models.transformer import (Embedding, Linear, TransformerDecoder,
TransformerEncoder)
from fairseq.modules import FairseqDropout, LayerNorm
from nonauto.criterions.nat_loss import sequence_ctc_loss_with_logits
from nonauto.modules.gradrev import GradientReversal
from nonauto.modules.position_embedding import RelativePositionEmbedding
from nonauto.modules.reversible_utils import *
from .layers import *
from .model_utils import *
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@register_model("mREDER")
class MREDER(NATransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Univseral Tranformer
parser.add_argument('--encoder-cross-layer-sharing', action='store_true',
help='sharing parameters for encoder layers')
# REDER specific
parser.add_argument(
"--apply-bert-init", action="store_true",
help="use custom param initialization for BERT")
parser.add_argument(
"--self-attention-type", default="normal", choices=["normal", "relative"],
help="self attention type")
parser.add_argument(
"--out-norm-type", type=str, default="actnorm", choices=["layernorm", "actnorm"],
help="how to perform feature normalization for decoder output")
parser.add_argument(
"--upsampling", type=int, metavar='N', default=1,
help="upsampling ratio")
parser.add_argument(
"--ctc-loss", action="store_true", default=False,
help="enable fba loss")
parser.add_argument(
"--fba-loss", action="store_true", default=False,
help="enable fba loss")
parser.add_argument(
"--enable-fba-loss-after-update", type=int, metavar='N', default=100_000,
help="enable fba loss after some predefined updates")
parser.add_argument(
"--cycle-loss", action="store_true", default=False,
help="enable cycle loss")
parser.add_argument(
"--enable-cycle-loss-after-update", type=int, metavar='N', default=100_000,
help="enable cycle loss after some predefined updates")
parser.add_argument(
"--lang-adv-loss", action="store_true", default=False,
help="enable language adversarial loss")
parser.add_argument(
"--layer-norm-type", type=str, default="prenorm", choices=["prenorm", "sandwich"],
help="layer norm type")
parser.add_argument(
"--pretrained-checkpoint", type=str,
help="pretrained model checkpoint path.")
parser.add_argument(
"--share-all", action="store_true", default=False,
help="share model parameters for all languages")
parser.add_argument(
"--lang-end-inner-order", type=str, choices=['fs', 'sf'], default='sf',
help="order of operations of each layer, attention->FFN or FFN->attention ")
parser.add_argument(
"--split-embedding", action="store_true", default=False,
help="double the dimension of embedding so as not to copy embeddings before feeding to RevNet")
parser.add_argument(
"--feature-shuffle", action="store_true", default=False,
help="shuffle feature dimension-wise between each RevNet layer.")
def __init__(self, args, encoder, decoder, langs):
super().__init__(args, encoder, decoder)
delattr(self, 'decoder')
self.args = args
self.langs = langs
self._eval_lang_pair = None
self._reversed = False
self.enable_fba_loss = False
self.enable_cycle_loss = False
self._build_dictionaries_and_embeds()
self._build_output_projections()
self._build_fba_predictors()
self._build_lang_discriminator()
if hasattr(args, "pretrained_checkpoint"):
pretrained_loaded_state_dict = upgrade_state_dict_with_pretrained_weights(
state_dict=self.state_dict(),
pretrained_checkpoint=args.pretrained_checkpoint,
)
self.load_state_dict(pretrained_loaded_state_dict, strict=False)
def _build_dictionaries_and_embeds(self):
self.dicts = {
lang: self.encoder.dictionary for lang in self.langs
}
self.embed_tokens = nn.ModuleDict({
lang: self.encoder.embed_tokens for lang in self.langs
})
self.embed_positions = nn.ModuleDict({
lang: self.encoder.embed_positions for lang in self.langs
})
self.embed_scale = 1.0 if self.args.no_scale_embedding else math.sqrt(
self.args.encoder_embed_dim)
self.dropout_module = FairseqDropout(
self.args.dropout, module_name=self.__class__.__name__)
self.blank_index = getattr(
self.dicts[self.langs[0]], "blank_index", None)
def _build_output_projections(self):
def _build(weight):
output_projection = nn.Linear(
weight.shape[1],
weight.shape[0],
bias=False,
)
output_projection.weight = weight
return output_projection
self.output_projections = nn.ModuleDict({
lang: _build(self.embed_tokens[lang].weight) for lang in self.langs
})
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.split_embedding:
args.embed_dim = args.encoder_embed_dim * 2
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_all_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=task.langs,
embed_dim=getattr(args, "embed_dim", args.encoder_embed_dim),
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
raise ValueError(f"{cls.__name__} requires --share-all-embeddings")
shared_dict = task.dicts[task.langs[0]]
encoder = cls._build_encoder(
args, task, shared_dict, shared_encoder_embed_tokens)
decoder = SimpleDecoder(
args, shared_dict, shared_decoder_embed_tokens
)
return cls(args, encoder, decoder, task.langs)
@classmethod
def _build_encoder(cls, args, task, dicts, embed_tokens):
encoder = RevTransformerEncoder(
args, dicts, embed_tokens, task.langs)
return encoder
def _make_padding_mask(self, tokens, pad_mask_token=True):
padding_mask = tokens.eq(self.pad)
return padding_mask
def _convert_mask_to_lengths(self, mask):
return mask.long().sum(-1)
def _convert_lengths_to_mask(self, lens, maxlen=None):
# lens: (bsz)
maxlen = maxlen or lens.max()
lens = lens.view(-1)
mask = torch.arange(maxlen, device=lens.device)[
None, :] < lens[:, None]
return mask
@contextlib.contextmanager
def reverse(self):
old_value = self._reversed
self._reversed = not old_value
yield
self._reversed = old_value
@contextlib.contextmanager
def set_lang_pair(self, lang_pair):
old_value = self._eval_lang_pair
self._eval_lang_pair = lang_pair
yield
self._eval_lang_pair = old_value
@property
def lang_pair(self):
if not self._eval_lang_pair:
raise ValueError(
f"{self.__class__.__name__}._eval_lang_pair should be assigned a value.")
src, tgt = self._eval_lang_pair.split('-')
if self._reversed:
src, tgt = tgt, src
return src, tgt
# return (src, tgt) if not self._reverse else (tgt, src)
def _forward(self, src_tokens, tgt_tokens, feature_only=False, no_upsample=False, reduction='mean', **kwargs):
# maybe upsample for ctc
if not no_upsample:
src_tokens = self._maybe_upsample(src_tokens)
inp, emb, padding_mask = self._embed(src_tokens)
encoder_out = self._encode(inp, emb, padding_mask)
logits = self._decode(encoder_out, normalize=False)
if feature_only:
return None, {"encoder_out": encoder_out, "logits": logits}
# masks
logit_mask = ~self._make_padding_mask(src_tokens)
word_pred_mask = ~self._make_padding_mask(tgt_tokens)
# ctc loss
ctc_loss = sequence_ctc_loss_with_logits(
logits=logits,
logit_mask=logit_mask,
targets=tgt_tokens,
target_mask=word_pred_mask,
blank_index=self.blank_index,
label_smoothing=self.args.label_smoothing,
reduction=reduction
)
net_output = {
"word_pred_ctc": {
"loss": ctc_loss,
"nll_loss": True,
},
}
return net_output, {"encoder_out": encoder_out, "logits": logits}
def _embed(self, tokens, lang=None):
if not lang:
sl, tl = self.lang_pair
lang = sl
x = embed = self.embed_scale * self.embed_tokens[lang](tokens)
if self.embed_positions[lang] is not None:
x = embed + self.embed_positions[lang](tokens)
x = self.dropout_module(x)
padding_mask = self._make_padding_mask(tokens)
return x, embed, padding_mask
def _encode(self, inp, emb, padding_mask):
sl, tl = self.lang_pair
encoder_out = self.encoder.forward(
inp, emb, padding_mask,
src_lang=sl, tgt_lang=tl,
return_all_hiddens=self.args.fba_loss
)
return encoder_out
def _decode(self, encoder_out, normalize=False, reverse=False, prev_output_tokens=None, step=0):
sl, tl = self.lang_pair
feature = encoder_out.encoder_out
logits = self.output_projections[tl](feature)
return F.log_softmax(logits, -1) if normalize else logits
def _maybe_upsample(self, tokens):
if self.args.upsampling <= 1:
return tokens
def _us(x, s):
B = x.size(0)
_x = x.unsqueeze(-1).expand(B, -1, s).reshape(B, -1)
return _x
return _us(tokens, self.args.upsampling)
def forward(self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, **kwargs):
return self.forward_unidir(src_tokens, tgt_tokens, **kwargs)
def forward_unidir(self, src_tokens, tgt_tokens, **kwargs):
net_output, extras = self._forward(
src_tokens=src_tokens, tgt_tokens=tgt_tokens)
if self.args.fba_loss and self.enable_fba_loss:
fba_loss = self._compute_fba_loss(
src_tokens, tgt_tokens, extras["logits"], extras["encoder_out"])
net_output["fba_cos"] = {"loss": fba_loss, "factor": 0.5}
if self.args.cycle_loss and self.enable_cycle_loss:
cycle_loss_net_output = self._compute_cycle_loss(
src_tokens, extras["logits"], extras["encoder_out"].encoder_padding_mask)
net_output["cycle_ctc"] = {"factor": 0.5, **cycle_loss_net_output}
if self.args.lang_adv_loss:
lang_adv_out = self._compute_lang_adv_loss(extras["encoder_out"])
net_output["lang_adv"] = {"factor": 0.1, **lang_adv_out}
return net_output
# forward-backward agreement loss #
def _build_fba_predictors(self):
if self.args.fba_loss:
self.fba_loss_fn = cos_distance_loss
embed_dim = self.args.encoder_embed_dim
# self.predictors = nn.ModuleList([
# BottleneckFFN(embed_dim * 2, embed_dim // 2, dropout=0.0)
# for _ in range(self.args.encoder_layers*2+1)
# ])
self.predictors = nn.ModuleList([
nn.Identity()
for _ in range(self.args.encoder_layers*2+1)
])
def _representation_predict(self, hiddens, predict=False):
if not predict or not hasattr(self, "predictors"):
return hiddens
preds = []
for pp, hh in zip(self.predictors, hiddens):
preds.append(pp(hh))
return preds
def _compute_fba_loss(self, src_tokens, tgt_tokens, logits, forward_encoder_out):
fwd_states, bwd_states, mask = self._prepare_fba_loss(
src_tokens, tgt_tokens, logits, forward_encoder_out)
fwd_states_pred = self._representation_predict(fwd_states, predict=True)
bwd_states = self._representation_predict(bwd_states, predict=False)
fwd_states_pred = torch.stack(fwd_states_pred, 2)
bwd_states = torch.stack(bwd_states, 2)
fba_loss = self.fba_loss_fn(
output=fwd_states_pred,
target=bwd_states.detach(),
# target=bwd_states,
mask=mask,
)
return fba_loss
def _prepare_fba_loss(self, src_tokens, tgt_tokens, logits, forward_encoder_out):
src_padding_mask = forward_encoder_out.encoder_padding_mask
tgt_padding_mask = self._make_padding_mask(tgt_tokens)
src_lengths = self._convert_mask_to_lengths(~src_padding_mask)
tgt_lengths = self._convert_mask_to_lengths(~tgt_padding_mask)
fwd_states = forward_encoder_out.encoder_states
from nonauto.modules.ctc_utils import convert_alignment_to_symbol
from torch_imputer import best_alignment
log_prob = F.log_softmax(logits, dim=-1)
best_aligns = best_alignment(
log_prob.transpose(0, 1).float(),
tgt_tokens, src_lengths, tgt_lengths, self.blank_index, True)
aligned_tgt_tokens = convert_alignment_to_symbol(
best_aligns, tgt_tokens, self.blank_index, src_padding_mask, self.pad)
# aligned_tgt_tokens = self._maybe_upsample(src_tokens)
self.encoder.reuse_seed(True)
with self.reverse():
_, bwd_extra = self._forward(
src_tokens=aligned_tgt_tokens,
tgt_tokens=None,
feature_only=True,
no_upsample=True
)
bwd_states = bwd_extra["encoder_out"].encoder_states[::-1]
self.encoder.reuse_seed(False)
mask = ~src_padding_mask
return fwd_states, bwd_states, mask
# cycle consistency loss #
def _compute_cycle_loss(self, src_tokens, logits, src_padding_mask):
decoded_tokens = self._prepare_cycle_loss(logits, src_padding_mask)
with self.reverse():
rev_net_output, _ = self._forward(
src_tokens=decoded_tokens,
tgt_tokens=src_tokens,
)
return rev_net_output["word_pred_ctc"]
def _prepare_cycle_loss(self, logits, src_padding_mask):
_, batch_ctc_decoded_tokens = F.log_softmax(logits, -1).max(-1)
from nonauto.modules.ctc_utils import \
post_process_ctc_decoded_tokens_with_pad
decoded_tokens = post_process_ctc_decoded_tokens_with_pad(
batch_ctc_decoded_tokens,
self.pad, left_pad=False
)
return decoded_tokens
def _build_lang_discriminator(self):
if self.args.lang_adv_loss:
self.lang_dict = {self.langs[i]: i for i in range(len(self.langs))}
embed_dim = self.args.encoder_embed_dim
layers = [GradientReversal(),
nn.Dropout(0.2),
nn.Linear(embed_dim*2, embed_dim)]
for i in range(getattr(self.args, 'lang_dis_layers', 2)):
layers.append(nn.Linear(embed_dim, embed_dim))
layers.append(nn.LeakyReLU(0.2))
layers.append(nn.Dropout(0.2))
layers.append(nn.Linear(embed_dim, len(self.langs)))
self.lang_dis = nn.Sequential(*layers)
def _compute_lang_adv_loss(self, encoder_out):
sl, tl = self.lang_pair
# [bsz, L, 2d]
mid = encoder_out.encoder_mid
mask = ~encoder_out.encoder_padding_mask
mid_avg = tensor_mean_by_mask(mid, mask)
# [bsz, #lang]
logits = self.lang_dis(mid_avg)
bsz = logits.size(0)
lang_labels = logits.new_full(
(bsz, ), self.lang_dict[sl], dtype=torch.int64)
return {
"out": logits, "tgt": lang_labels
}
##### methods for decoding ######
def initialize_output_tokens(self, encoder_out, src_tokens, **kwargs):
if hasattr(self.args, "ctc_loss"):
initial_output_tokens = src_tokens.clone() # [B, T]
else:
initial_output_tokens = kwargs["sample"]["target"].clone()
initial_output_tokens = self._maybe_upsample(initial_output_tokens)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out.encoder_out)
return DecoderOut(
src_tokens=src_tokens.clone(),
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None
)
def forward_encoder(self, encoder_inputs):
tokens, lengths = encoder_inputs
tokens = self._maybe_upsample(tokens)
inp, emb, padding_mask = self._embed(tokens)
encoder_out = self._encode(inp, emb, padding_mask)
return encoder_out
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
src_tokens = decoder_out.src_tokens
# execute the decoder
output_masks = ~encoder_out.encoder_padding_mask
log_probs = self._decode(
encoder_out=encoder_out,
normalize=True,
step=step,
)
if getattr(self.args, 'ctc_decode_with_beam', 1) == 1:
output_scores, output_tokens = log_probs.max(-1)
output_tokens[~output_masks] = self.pad
output_scores[~output_masks] = 0.
else:
output_tokens, output_scores = self.ctc_beamsearch(
log_probs, output_masks,
src_tokens=src_tokens,
score_reconstruction_weight=getattr(
self.args, 'ctc_decode_score_reconstruction_weight', 0)
)
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=None
)
def ctc_beamsearch(
self, log_probs, probs_mask, src_tokens=None, score_reconstruction_weight=0
):
def _get_ctcdecoder():
if not hasattr(self, 'ctcdecoder'):
sl, tl = self.lang_pair
labels = self.dicts[tl].symbols
import multiprocessing
nproc = multiprocessing.cpu_count()
from ctcdecode import CTCBeamDecoder
ctcdecoder = CTCBeamDecoder(
labels,
model_path=None,
alpha=0,
beta=0,
cutoff_top_n=max(40, self.args.ctc_decode_with_beam),
cutoff_prob=1.0,
beam_width=self.args.ctc_decode_with_beam,
num_processes=nproc,
blank_id=self.blank_index,
log_probs_input=True
)
self.ctcdecoder = ctcdecoder
return self.ctcdecoder
decoder = _get_ctcdecoder()
probs_lens = probs_mask.int().sum(-1)
device = probs_lens.device
# BATCHSIZE x N_BEAMS X N_TIMESTEPS
beam_results, beam_scores, timesteps, out_lens = decoder.decode(
log_probs, probs_lens)
bbsz = beam_results.size(0) * beam_results.size(1)
beam_results = beam_results.to(device).long().view(bbsz, -1)
beam_scores = beam_scores.to(device).view(bbsz)
out_lens = out_lens.to(device).view(bbsz)
beam_mask = self._convert_lengths_to_mask(
out_lens, maxlen=beam_results.size(-1))
beam_results = beam_results.masked_fill_(~beam_mask, self.pad)
if score_reconstruction_weight > 0:
rec_scores = self.score_beam_reconstruction(
src_tokens, beam_results)
beam_scores += score_reconstruction_weight * rec_scores
beam_scores = - \
(beam_scores/out_lens).unsqueeze(-1).expand_as(beam_results)
return beam_results, beam_scores
def score_beam_reconstruction(self, src_tokens, beam_results):
old_ls, self.args.label_smoothing = self.args.label_smoothing, 0
beam_size = self.args.ctc_decode_with_beam
src_tokens_beam = src_tokens[:, None, :].repeat(
1, beam_size, 1).view(-1, src_tokens.size(-1))
with self.reverse():
rev_net_output, _ = self._forward(
src_tokens=beam_results,
tgt_tokens=src_tokens_beam,
reduction='batch_sum'
)
# bsz*beam
rec_scores = rev_net_output['word_pred_ctc']['loss'].clone()
del rev_net_output
del _
self.args.label_smoothing = old_ls
return rec_scores
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
class RevTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens, langs):
super().__init__(args, dictionary, embed_tokens)
self.args = args
self.embed_dim = embed_tokens.embedding_dim
self.langs = langs
self.build_ends()
self.build_out_norm()
def reuse_seed(self, value):
for lang, end in self.ends.items():
for layer in end.layers:
layer.reuse_seed = value
def build_ends(self):
delattr(self, 'layers')
self.ends = nn.ModuleDict({
lang: RevTransformerLangEnd(self.args) for lang in self.langs
})
if self.args.share_all:
for key in self.ends:
self.ends[key] = self.ends[self.langs[0]]
if self.args.lang_end_inner_order == 'fs':
for key in self.ends:
self.ends[key].forward, self.ends[key].reverse = self.ends[key].reverse, self.ends[key].forward
def build_out_norm(self):
feature_dim = self.args.encoder_embed_dim * 2
if self.args.out_norm_type == 'actnorm':
_actnorm = ActNorm(feature_dim)
self.actnorms = nn.ModuleDict({
lang: _actnorm for lang in self.langs
})
def in_norm(self, x, mask, lang):
if not self.args.split_embedding:
# concat same embedding to match feature dimension of revnet.
x = torch.cat([x, x], dim=-1)
if self.args.out_norm_type == 'actnorm':
o = self.actnorms[lang](x, mask)
return o
return x
def out_norm(self, x, mask, lang):
o = x
if self.args.out_norm_type == 'actnorm':
o = self.actnorms[lang].reverse(o, mask)
if not self.args.split_embedding:
# split feature to match embedding dimension
o_1, o_2 = torch.chunk(o, 2, -1)
o = (o_1 + o_2) / 2
return o
def forward(self, x, src_emb, src_padding_mask, src_lang='de', tgt_lang='en', return_all_hiddens=False):
src_end = self.ends[src_lang]
tgt_end = self.ends[tgt_lang]
# 1) x_emb -> source end
x = self.in_norm(x, ~src_padding_mask, lang=src_lang)
# 2) encoding: run source end to get intermidiate representation
mid, src_end_states = src_end.forward(x, src_padding_mask)
# 3) decoding: run target end to get output feature
out, tgt_end_states = tgt_end.reverse(mid, src_padding_mask)
# 4) target end -> y_emb
y = self.out_norm(out, ~src_padding_mask, lang=tgt_lang)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
encoder_states.extend(src_end_states)
encoder_states.extend(tgt_end_states)
return EncoderOut(
encoder_out=y, # B x T x C
encoder_mid=mid, # B x T x C
encoder_padding_mask=src_padding_mask, # B x T
encoder_embedding=src_emb, # B x T x C
encoder_states=encoder_states, # List[B x T x C]
src_tokens=None,
src_lengths=None,
)
def reverse(self, **kwargs):
src_lang, tgt_lang = kwargs.pop('src_lang'), kwargs.pop('tgt_lang')
return self.forward(src_lang=tgt_lang, tgt_lang=src_lang, **kwargs)
class SimpleDecoder(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens):
super(TransformerDecoder, self).__init__(dictionary)
self.share_input_output_embed = args.share_decoder_input_output_embed
embed_dim = args.encoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = self.embed_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
def forward(self, normalize, encoder_out, prev_output_tokens=None, step=0, **unused):
features = encoder_out.encoder_out
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
def output_layer(self, features):
"""Project features to the vocabulary size."""
return self.output_projection(features)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.max_target_positions
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
# args.encoder_normalize_before = getattr(
# args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(
args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
# REDER specific
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.self_attention_type = getattr(args, "self_attention_type", "relative")
args.upsampling = getattr(args, "upsampling", 2)
args.ctc_loss = getattr(args, 'ctc_loss', True)
@register_model_architecture("mREDER", "mREDER_wmt_en_de")
def REDER_wmt_en_de(args):
base_architecture(args)
@register_model_architecture("mREDER", "mREDER_wmt_en_de_big")
def REDER_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
base_architecture(args)
@register_model_architecture("mREDER", "mREDER_iwslt_de_en")
def rev_nat_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
|
11554455
|
from ..tweet_sentiment_classifier import Classifier
import numpy as np
import pickle as pkl
import json
import os
import tensorflow_hub as hub
import tensorflow as tf
try:
import bert
FullTokenizer = bert.bert_tokenization.FullTokenizer
except ImportError:
raise ImportError('Issue loading bert-for-tf, please ensure it is installed')
try:
import tensorflow_hub
except ImportError:
raise ImportError('Issue loading bert-for-tf, please ensure it is installed')
from tensorflow.keras.models import Model
import math
from random import choice
import pandas as pd
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
def get_masks(tokens, max_seq_len):
"""Mask for padding"""
if len(tokens) > max_seq_len:
raise IndexError("Token length more than max seq length!")
return [1] * len(tokens) + [0] * (max_seq_len - len(tokens))
def get_segments(tokens, max_seq_len):
"""Segments: 0 for the first sequence, 1 for the second"""
if len(tokens) > max_seq_len:
raise IndexError("Token length more than max seq length!")
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_len - len(tokens))
def get_ids(tokens, tokenizer, max_seq_len):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = token_ids + [0] * (max_seq_len - len(token_ids))
return input_ids
class BERT_Model(Classifier):
def __init__(self, model="bert_en_uncased_L-12_H-768_A-12/1", max_length=48, patience=10, early_stopping=True,
validation_split=0.2, max_iter=500, bootstrap=1,
batch_size=32, accuracy=0, activ='sigmoid', optimizer=tf.keras.optimizers.Adam,
learning_rate=1E-4, finetune_embeddings=True, hidden_neurons=0, **kwargs):
self.type = 'BERT_Model'
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.bert_models'
self.model = model
self.max_length = max_length
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.patience = patience
self.validation_split = validation_split
self.max_iter = max_iter
self.batch_size = batch_size
self.accuracy = accuracy
self.activ = activ
self.optimzier = optimizer
self.learning_rate = learning_rate
self.finetune_embeddings = finetune_embeddings
self.hidden_neurons = hidden_neurons
self.loss = 'binary_crossentropy'
self.tokenizer, self.vocab_file, self.classifier = None, None, None
input_word_ids = tf.keras.layers.Input(shape=(self.max_length,), dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(self.max_length,), dtype=tf.int32,
name="input_mask")
segment_ids = tf.keras.layers.Input(shape=(self.max_length,), dtype=tf.int32,
name="segment_ids")
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1",
trainable=self.finetune_embeddings)
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
bert_model = tf.keras.Model(inputs=[input_word_ids, input_mask, segment_ids],
outputs=[pooled_output, sequence_output])
if self.hidden_neurons is not 0:
hidden_layer = tf.keras.layers.Dense(units=self.hidden_neurons, kernel_initializer=tf.keras.initializers.glorot_uniform(seed=1),
activation='relu')(bert_model.outputs[0])
dropout_layer = tf.keras.layers.Dropout(0.25)(hidden_layer)
output_layer = tf.keras.layers.Dense(units=1,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=1),
activation=self.activ)(dropout_layer)
else:
output_layer = tf.keras.layers.Dense(units=1,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=1),
activation=self.activ)(bert_model.outputs[0])
self.classifier = tf.keras.Model(inputs=bert_model.inputs, outputs=output_layer)
self.classifier.compile(loss=self.loss, optimizer=self.optimzier(learning_rate=self.learning_rate, clipnorm=1), metrics=['acc'])
self.vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file, do_lower_case)
def preprocess(self, tweets, verbose=True):
sequences = ([["[CLS]"] + self.tokenizer.tokenize(tweet) + ["[SEP]"] for tweet in tweets])
input_ids = []
input_masks = []
input_segments = []
i = 0
"""
Tokenize data
"""
for sequence in sequences:
# TODO this is very slow, see if it can be sped up
if verbose and i % 100 == 0:
print('\r Processing tweet {}'.format(i), end=' ')
if len(sequence) > self.max_length:
sequence = sequence[-self.max_length:-1]
input_ids.append(get_ids(sequence, self.tokenizer, self.max_length))
input_masks.append(get_masks(sequence, self.max_length))
input_segments.append(get_segments(sequence, self.max_length))
i = i + 1
print('Preprocessed {} tweets'.format(i))
return [np.array(input_ids), np.array(input_masks), np.array(input_segments)]
def fit(self, train_data, y, weights=None, **kwargs):
if 1 < self.bootstrap < len(y):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
trainX = self.preprocess(train_data)
trainy = np.array(y)
if weights is None:
weights = np.ones(len(y))
es = []
if self.early_stopping:
es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
print('Fitting BERT classifier')
history = self.classifier.fit(trainX, trainy, sample_weight=weights, epochs=self.max_iter, batch_size=self.batch_size,
verbose=1, validation_split=self.validation_split, callbacks=es, steps_per_epoch=100000, validation_steps=10000)
self.accuracy = np.max(history.history['val_acc'])
return history
def predict_proba(self, data, **kwargs):
X = self.preprocess(data, verbose=False)
predictions = self.classifier.predict(X, **kwargs)
return predictions
def refine(self, train_data, y, weights=None, bootstrap=True, **kwargs):
if (bootstrap and 1 < self.bootstrap < len(y)):
train_data, y, weights = resample(train_data, y, weights, n_samples=self.bootstrap, stratify=y,
replace=False)
elif (bootstrap and self.bootstrap < 1):
n_samples = int(self.bootstrap * len(y))
train_data, y, weights = resample(train_data, y, weights, n_samples=n_samples, stratify=y,
replace=False)
trainX = self.preprocess(train_data)
trainy = np.array(y)
if weights is None:
weights = np.ones(len(y))
es = []
if self.early_stopping:
es.append(
tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=self.patience))
print('Fitting BERT classifier')
history = self.classifier.fit(trainX, trainy, sample_weight=weights, epochs=self.max_iter,
batch_size=self.batch_size,
verbose=1, validation_split=self.validation_split, callbacks=es)
self.accuracy = np.max(history.history['val_acc'])
return history
def predict(self, data, **kwargs):
predictions = self.predict_proba(data)
return np.round(predictions, **kwargs)
def export(self, filename):
"""
Saves the classifier to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'bootstrap': self.bootstrap,
'early_stopping': self.early_stopping,
'validation_split': float(self.validation_split),
'patience': int(self.patience),
'max_iter': int(self.max_iter),
'max_length': int(self.max_length),
'activ': self.activ,
'batch_size': self.batch_size,
'accuracy': float(self.accuracy),
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
model_json = self.classifier.to_json()
self.classifier.save_weights(filename + "/bert_model.h5")
def load_model(self, filename):
"""
Load a model from the disc
:param filename: (String) Path to file
"""
self.classifier.load_weights(filename + '/bert_model.h5')
self.classifier.compile(loss='binary_crossentropy',
optimizer=self.optimzier(learning_rate=self.learning_rate),
metrics=['acc'])
|
11554515
|
import os,rootpath
rootpath.append(pattern='main.py') # add the directory of main.py to PATH
import pprint
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import ObjectProperty,DictProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.logger import Logger
class TextViewer(BoxLayout):
"""docstring for TextViewer."""
data=DictProperty()
bundle_dir = rootpath.detect(pattern='main.py') # Obtain the dir of main.py
# Builder.load_file(bundle_dir +os.sep+'ui'+os.sep+'demo.kv')
def __init__(self):
super(TextViewer, self).__init__()
self.text_input=TextInput(id='text_area',readonly=True)
self.add_widget(self.text_input)
self.bind(data=self.refresh)
def refresh(self,*args):
self.text_input.text = pprint.pformat(self.data)
class Test(App):
"""docstring for Test."""
data=ObjectProperty()
plugins=DictProperty()
def __init__(self):
super(Test, self).__init__()
def build(self):
demo=TextViewer()
tup = ('spam', ('eggs', ('lumberjack', ('knights', ('ni', ('dead',('parrot', ('fresh fruit',))))))))
demo.data={'type':'img','content':'abcdefg','children': ['a' * 10, tup, ['a' * 30, 'b' * 30], ['c' * 20, 'd' * 20]]}
return demo
if __name__ == '__main__':
Test().run()
|
11554602
|
people = ['Nick', 'Rick', 'Roger', 'Syd']
ages = [23, 24, 23, 21]
for person, age in zip(people, ages):
print(person, age)
|
11554611
|
import glob
import json
import os
from notebook.utils import url_path_join as ujoin
from tornado.web import StaticFileHandler, RequestHandler
from . import zotero_oauth
def find_zotero_styles_dir():
pattern = os.path.expanduser('~/.zotero/zotero/*/zotero/styles/')
candidates = glob.glob(pattern)
if not candidates:
return None
for c in candidates:
if '.default' in c:
return c
return candidates[0]
class ListStylesHandler(RequestHandler):
def initialize(self, path):
self.path = path
def get(self):
files = [f for f in os.listdir(self.path) if f.endswith('.csl')]
self.finish(json.dumps(files))
def load_jupyter_server_extension(nbapp):
webapp = nbapp.web_app
base_url = webapp.settings['base_url']
zsd = find_zotero_styles_dir()
if zsd:
webapp.add_handlers(".*$", [
(ujoin(base_url, r"/cite2c/styles/?"), ListStylesHandler,
{'path': zsd}),
(ujoin(base_url, r"/cite2c/styles/(.+)"), StaticFileHandler,
{'path': zsd}),
])
else:
nbapp.log.warning('Could not find Zotero citation styles directory.')
webapp.add_handlers(".*$", zotero_oauth.handlers(base_url))
|
11554643
|
import torch
import torch.nn as nn
import numpy as np
from models.language import RNNEncoder, ModuleInputAttention
from models.modules import AttendRelationModule, AttendLocationModule, AttendNodeModule
from models.modules import MergeModule, TransferModule, NormAttnMap
class SGReason(nn.Module):
def __init__(self, opt):
super(SGReason, self).__init__()
# language
self.seq_encoder = RNNEncoder(vocab_size=opt['vocab_size'],
word_embedding_size=opt['word_embedding_size'],
hidden_size=opt['rnn_hidden_size'],
bidirectional=opt['bidirectional'] > 0,
input_dropout_p=opt['word_drop_out'],
dropout_p=opt['rnn_drop_out'],
n_layers=opt['rnn_num_layers'],
rnn_type=opt['rnn_type'],
variable_lengths=opt['variable_lengths'] > 0,
pretrain=True)
dim_word_emb = opt['word_embedding_size']
dim_word_cxt = opt['rnn_hidden_size'] * (2 if opt['bidirectional'] else 1)
# judge module weight for seq (node, relation, location)
self.weight_module_spo = nn.Sequential(nn.Linear(dim_word_cxt, 3),
nn.Sigmoid())
# module input attention
self.node_input_encoder = ModuleInputAttention(dim_word_cxt)
self.relation_input_encoder = ModuleInputAttention(dim_word_cxt)
self.location_input_encoder = ModuleInputAttention(dim_word_cxt)
self.obj_input_encoder = ModuleInputAttention(dim_word_cxt)
dim_vis_feat = opt['dim_input_vis_feat']
# module
self.node_module = AttendNodeModule(dim_vis_feat, opt['vis_init_norm'], opt['jemb_dim'],
dim_word_emb, opt['jemb_drop_out'])
self.relation_module = AttendRelationModule(dim_vis_feat, opt['vis_init_norm'], opt['jemb_dim'],
dim_word_emb, opt['jemb_drop_out'])
self.location_module = AttendLocationModule(opt['vis_init_norm'], opt['jemb_dim'],
dim_word_emb, opt['jemb_drop_out'])
self.min_value, self.max_value = -1, 1
self.sum_module = MergeModule()
self.sum_relation_module = TransferModule()
self.elimination = opt['elimination']
self.norm_fun = NormAttnMap()
self.need_location = False # expressions in Ref-Reasoning do not describe the absolute location
def forward(self, feature, cls, lfeat,
seq, seq_weight, seq_type, seq_rel, com_mask,
cxt_idx, cxt_idx_mask, cxt_lfeats):
''' language seq: seq(bs, num_seq, len_sent); seq_type(bs, num_seq){-1: None, 0: SPO, 1: S, 2:ALL};
seq_rel(bs, num_seq, num_seq){-1:None, 0:SS, 1:SO, 2:OS, 3:OO}
'''
bs, num_seq = seq.size(0), seq.size(1)
n = feature.size(1)
# cxt_feats (bs, n, 5, dim_feat)
num_cxt = cxt_idx.size(2)
offset_idx = torch.tensor(np.array(range(bs)) * n, requires_grad=False).cuda().long()
offset_idx = offset_idx.unsqueeze(1).unsqueeze(2).expand(bs, n, num_cxt)
cxt_feats = torch.index_select(feature.view(bs*n, -1), 0, (offset_idx+cxt_idx).view(-1))
cxt_feats = cxt_feats.view(bs, n, num_cxt, -1) * cxt_idx_mask.unsqueeze(3).float()
context, hidden, embeded, max_length = self.seq_encoder(seq.view(bs*num_seq, -1))
seq = seq[:, :, 0:max_length]
seq_weight = seq_weight[:, :, 0:max_length]
context = context.view(bs, num_seq, max_length, -1)
hidden = hidden.view(bs, num_seq, -1)
embeded = embeded.view(bs, num_seq, max_length, -1)
real_num_seq = torch.sum((seq_type != -1).float(), dim=1).long()
max_num_seq = torch.max(real_num_seq)
# module weights of each seq
weights_spo = self.weight_module_spo(hidden) # bs, num_seq, 3
weights_spo_expand = weights_spo.unsqueeze(2).expand(bs, num_seq, n, 3)
# attn each part
if self.elimination:
input_labels = (seq != 0).float() * (seq_weight == 1).float()
else:
input_labels = (seq != 0).float()
node_input_emb, node_input_attn = self.node_input_encoder(context, embeded, input_labels) # bs, num_seq, dim_word_embed
attn_node = self.node_module(feature, node_input_emb, cls) # bs, num_seq, n
relation_input_emb, relation_input_attn = self.relation_input_encoder(context, embeded, input_labels) # bs, num_seq, dim_word_embed
attn_relation = self.relation_module(cxt_feats, cxt_lfeats, relation_input_emb) # bs, num_seq, n, num_cxt
location_input_emb, location_input_attn = self.location_input_encoder(context, embeded, input_labels)
attn_location = self.location_module(lfeat, location_input_emb, cls) # bs, num_seq, n
obj_input_emb, obj_input_attn = self.obj_input_encoder(context, embeded, input_labels)
attn_obj = self.node_module(feature, obj_input_emb, cls)
global_sub_attn_map = torch.zeros((bs, num_seq, n), requires_grad=False).float().cuda()
global_obj_attn_map = torch.zeros((bs, num_seq, n), requires_grad=False).float().cuda()
for i in range(max_num_seq):
clone_global_sub_attn_map = global_sub_attn_map.clone()
clone_global_obj_attn_map = global_obj_attn_map.clone()
# seq type: S
s_attn_node_iter = weights_spo_expand[:, i, :, 0] * attn_node[:, i, :]
if self.need_location:
s_attn_location_iter = weights_spo_expand[:, i, :, 1] * attn_location[:, i, :]
s_attn_iter_s = s_attn_node_iter + s_attn_location_iter
s_attn_iter_o = s_attn_node_iter + s_attn_location_iter
s_attn_iter_s, s_attn_iter_s_norm = self.norm_fun(s_attn_iter_s)
s_attn_iter_o, s_attn_iter_o_norm = self.norm_fun(s_attn_iter_o)
else:
s_attn_iter_s = s_attn_node_iter
s_attn_iter_o = s_attn_iter_s
s_attn_iter_s, s_attn_iter_s_norm = self.norm_fun(s_attn_iter_s)
s_attn_iter_o, s_attn_iter_o_norm = self.norm_fun(s_attn_iter_o)
# seq type: SPO
spo_attn_node_iter = s_attn_node_iter
if self.need_location:
spo_attn_location_iter = s_attn_location_iter
spo_attn_relation, spo_attn_obj = self.sum_relation_module(attn_relation[:, i, :, :], cxt_idx,
clone_global_sub_attn_map,
(seq_rel[:, i, :] == 2).float(),
clone_global_obj_attn_map,
(seq_rel[:, i, :] == 3).float(),
attn_obj = attn_obj[:, i, :])
spo_attn_relation_iter = weights_spo_expand[:, i, :, 2] * spo_attn_relation
if self.need_location:
spo_attn_iter_s = spo_attn_node_iter + spo_attn_location_iter + spo_attn_relation_iter
spo_attn_iter_s, spo_attn_iter_s_norm = self.norm_fun(spo_attn_iter_s)
spo_attn_iter_o = spo_attn_obj * (seq_type[:, i] == 0).float().unsqueeze(1).expand(bs, n)
else:
spo_attn_iter_s = spo_attn_node_iter + spo_attn_relation_iter
spo_attn_iter_s, spo_attn_iter_s_norm = self.norm_fun(spo_attn_iter_s)
spo_attn_iter_o = spo_attn_obj * (seq_type[:, i] == 0).float().unsqueeze(1).expand(bs, n)
# combine
seq_type_s_expand = (seq_type[:, i] == 1).float().unsqueeze(1).expand(bs, n)
seq_type_spo_expand = (seq_type[:, i] == 0).float().unsqueeze(1).expand(bs, n)
attn_iter_s = s_attn_iter_s * seq_type_s_expand + spo_attn_iter_s * seq_type_spo_expand
attn_iter_o = s_attn_iter_o * seq_type_s_expand + spo_attn_iter_o * seq_type_spo_expand
# after rel with sub
attn_iter_s = self.sum_module(attn_iter_s, clone_global_sub_attn_map, clone_global_obj_attn_map,
(seq_rel[:, i, :] == 0).float(), (seq_rel[:, i, :] == 1).float())
attn_iter_s[(seq_type[:, i] == -1).unsqueeze(1).expand(bs, n)] = self.min_value
attn_iter_o[(seq_type[:, i] == -1).unsqueeze(1).expand(bs, n)] = self.min_value
attn_iter_s[(seq_type[:, i] == 2).unsqueeze(1).expand(bs, n)] = 0
attn_iter_o[(seq_type[:, i] == 2).unsqueeze(1).expand(bs, n)] = 0
global_sub_attn_map[:, i, :] = attn_iter_s
global_obj_attn_map[:, i, :] = attn_iter_o
com_mask_expand = (com_mask == 1).unsqueeze(2).expand(bs, num_seq, n).float()
score = torch.sum(com_mask_expand * global_sub_attn_map, dim=1)
return score
|
11554653
|
from my_lib import Object
import os
from my_lib import Object3
from my_lib import Object2
import sys
from third_party import lib15, lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, lib10, lib11, lib12, lib13, lib14
import sys
from __future__ import absolute_import
from third_party import lib3
print("Hey")
print("yo")
|
11554686
|
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow
from tensorflow.python.framework import tensor_util
from tensorflow.core.framework import attr_value_pb2
import sys
from ox.tensorflow.tensorflow_graph import *
import ox.common.IR.graph_pb2 as graph_pb2
from ox.rewriter.graph_matcher import *
from ox.common.DataStructure import *
from tensorflow.core.framework.node_def_pb2 import NodeDef
from ox.rewriter.rnn_utils import *
class UnitRewriterBase(object):
def __init__(self, graph, weights_dict):
self._graph = graph
self._weights_dict = weights_dict
def _rewrite_graph_by_pattern(self, pattern_name, graph_type):
pattern = rnn_patterns[graph_type][pattern_name]
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(self._graph.get_nodes()))
scope_names_dict = dict() # name: No.
for i in range(len(match_results)):
result = match_results[i]
top_pattern_name = pattern_name + '_' + str(i)
top_pattern = result._name_to_pattern[pattern_name]
self.create_scope(result, top_pattern, scope_names_dict)
top_op = result._pattern_to_op[top_pattern]
top_op.scope = top_op.scope + '/top'
# self.store_const_to_top(result)
# self.set_top_node_prop(result, pattern_name)
self.process_match_result(result, pattern_name)
def rewrite_graph(self, pattern_names, graph_type):
from six import string_types as _string_types
if isinstance(pattern_names, _string_types):
pattern_names = [pattern_names]
elif not isinstance(pattern_names, list):
raise ValueError
for pattern_name in pattern_names:
self._rewrite_graph_by_pattern(pattern_name, graph_type)
def run(self, pattern_names, graph_type):
self.rewrite_graph(pattern_names, graph_type)
def store_const_to_top(self, match_result):
top_node = list(match_result._pattern_to_op.values())[0]
kwargs = dict()
for pattern, op in match_result._pattern_to_op.items():
if pattern.name and pattern.type == 'Const':
if tensor_util.MakeNdarray(op.get_attr('value')).shape == (1, ):
kwargs[pattern.name] = np.asscalar(tensor_util.MakeNdarray(op.get_attr('value')))
else:
kwargs[pattern.name] = np.squeeze(tensor_util.MakeNdarray(op.get_attr('value')))
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def create_scope(self, result, pattern, scope_names_dict, parent_scope_name=''):
op = result._pattern_to_op[pattern]
if pattern.name:
# Do not include input op.
if 'input' in pattern.name.split('/')[-1]:
return
else:
no = scope_names_dict.get(pattern.name, 0)
scope_names_dict[pattern.name] = no + 1
if parent_scope_name:
current_scope_name = '/'.join([parent_scope_name, pattern.name]) + '_' + str(no)
else:
current_scope_name = pattern.name + '_' + str(no)
else:
current_scope_name = parent_scope_name
op.scope = current_scope_name
for sub_pattern in pattern.inputs:
self.create_scope(result, sub_pattern, scope_names_dict, current_scope_name)
def set_top_node_prop(self, match_result):
raise NotImplementedError
def process_match_result(self, match_result, pattern_name):
raise NotImplementedError
|
11554689
|
from time import strptime, mktime, gmtime
now = lambda: mktime(gmtime()) // 60
stamp = lambda x: mktime(strptime(x, '%d.%m.%Y %H:%M:%S')) // 60
print(int(now() - stamp('30.11.2017 22:00:58')))
|
11554705
|
from __future__ import print_function
# Object Registration
testutil.enable_extensible()
#@ Registration from C++, the parent class
\? testutil
#@ Registration from C++, a test module
\? sample_module_p_y
#@ Registration from C++, register_module function
\? register_module
#@ Registration from C++, register_function function
\? register_function
#@ Register, function(string)
def f1(data):
print("Python Function Definition: ", data)
shell.add_extension_object_member(testutil.sample_module_p_y, "stringFunction", f1,
{
"brief":"Brief description for stringFunction.",
"details": ["Detailed description for stringFunction"],
"parameters":
[
{
"name": "data",
"type": "string",
"brief": "Brief description for string parameter.",
"details": ["Detailed description for string parameter."],
"values": ["one", "two", "three"]
}
]
});
#@ Module help, function(string)
\? sample_module_p_y
#@ Help, function(string)
\? sample_module_p_y.string_function
#@ Usage, function(string)
testutil.sample_module_p_y.string_function(5)
testutil.sample_module_p_y.string_function("whatever")
testutil.sample_module_p_y.string_function("one")
#@ Register, function(dictionary)
def f2(data=None):
if data:
try:
print("Function data: ", data.myOption)
except IndexError:
print("Full data:", data)
else:
print("No function data available")
shell.add_extension_object_member(testutil.sample_module_p_y, "dictFunction", f2,
{
"brief":"Brief definition for dictFunction.",
"details": ["Detailed description for dictFunction"],
"parameters":
[
{
"name": "data",
"type": "dictionary",
"brief": "Short description for dictionary parameter.",
"required": False,
"details": ["Detailed description for dictionary parameter."],
"options": [
{
"name": "myOption",
"brief": "A sample option",
"type": "string",
"details": ["Details for the sample option"],
"values": ["test", "value"],
"required": True
}
]
}
]
});
#@ Module help, function(dictionary)
\? sample_module_p_y
#@ Help, function(dictionary)
\? sample_module_p_y.dict_function
#@ Usage, function(dictionary)
testutil.sample_module_p_y.dict_function({})
testutil.sample_module_p_y.dict_function({"someOption": 5})
testutil.sample_module_p_y.dict_function({"myOption": 5})
testutil.sample_module_p_y.dict_function({"myOption": "whatever"})
testutil.sample_module_p_y.dict_function()
testutil.sample_module_p_y.dict_function({"myOption": "test"})
#@ Register, function(dictionary), no option validation
shell.add_extension_object_member(testutil.sample_module_p_y, "freeDictFunction", f2,
{
"brief":"Brief definition for dictFunction.",
"details": ["Detailed description for dictFunction"],
"parameters":
[
{
"name": "data",
"type": "dictionary",
"brief": "Dictinary containing anything.",
"required": False,
"details": ["Detailed description for dictionary parameter."],
}
]
});
#@ Usage, function(dictionary), no option validation
testutil.sample_module_p_y.free_dict_function({})
testutil.sample_module_p_y.free_dict_function({"someOption": 5})
testutil.sample_module_p_y.free_dict_function({"myOption": 5})
testutil.sample_module_p_y.free_dict_function({"myOption": "whatever"})
testutil.sample_module_p_y.free_dict_function()
testutil.sample_module_p_y.free_dict_function({"myOption": "test"})
#@ Register, function(Session)
def f3(data):
print("Active Session:", data)
shell.add_extension_object_member(testutil.sample_module_p_y, "objectFunction1", f3,
{
"brief":"Brief definition for objectFunction.",
"details": ["Detailed description for objectFunction"],
"parameters":
[
{
"name": "session",
"type": "object",
"brief": "Short description for object parameter.",
"details": ["Detailed description for object parameter."],
"class": "Session"
}
]
});
#@ Module help, function(Session)
\? sample_module_p_y
#@ Help, function(Session)
\? sample_module_p_y.object_function1
#@ Usage, function(Session)
shell.connect(__mysqluripwd)
testutil.sample_module_p_y.object_function1(session)
session.close()
shell.connect(__uripwd)
testutil.sample_module_p_y.object_function1(session)
session.close()
#@ Register, function(Session and ClassicSession)
def f4(data):
print("Active Session:", data)
shell.add_extension_object_member(testutil.sample_module_p_y, "objectFunction2", f4,
{
"brief":"Brief definition for objectFunction.",
"details": ["Detailed description for objectFunction"],
"parameters":
[
{
"name": "session",
"type": "object",
"brief": "Short description for object parameter.",
"details": ["Detailed description for object parameter."],
"classes": ["Session", "ClassicSession"]
}
]
});
#@ Module help, function(Session and ClassicSession)
\? sample_module_p_y
#@ Help, function(Session and ClassicSession)
\? sample_module_p_y.object_function2
#@ Usage, function(Session and ClassicSession)
shell.connect(__mysqluripwd)
testutil.sample_module_p_y.object_function2(session)
session.close()
shell.connect(__uripwd)
testutil.sample_module_p_y.object_function2(session)
session.close()
#@ Registration errors, function definition
def f5(whatever):
pass
shell.add_extension_object_member("object", "function", f5);
shell.add_extension_object_member(shell, "function", f5);
shell.add_extension_object_member(testutil.sample_module_p_y, 25, f5);
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":"Brief definition for function.",
"details": ["Detailed description for function"],
"extra": "This will cause a failure"
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":5,
"details": ["Detailed description for function"]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":"Brief definition for function.",
"details": 45,
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":"Brief definition for function.",
"details": ["Detailed description for function", 34],
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":"Brief definition for function.",
"details": ["Detailed description for function"],
"parameters":34
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"brief":"Brief definition for function.",
"details": ["Detailed description for function"],
"parameters": [23]
});
#@ Registration errors, parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[{}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": 5,
}]
});
#@ Registration errors, integer parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "integer",
"class": "unexisting",
"classes": [],
"options": [],
"values": [1,2,3]
}]
});
#@ Registration errors, float parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "float",
"class": "unexisting",
"classes": [],
"options": [],
"values": [1,2,3]
}]
});
#@ Registration errors, bool parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "bool",
"class": "unexisting",
"classes": [],
"options": [],
"values": [1,2,3]
}]
});
#@ Registration errors, string parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "string",
"class": "unexisting",
"classes": [],
"options":[]
}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "string",
"values": 5
}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "string",
"values": [5]
}]
});
#@ Registration errors, dictionary parameters
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "dictionary",
"class": "unexisting",
"classes": [],
"values": []
}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "dictionary",
"options": [45]
}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "dictionary",
"options": [{}]
}]
});
#@ Registration errors, invalid identifiers
testutil.register_module("testutil", "my module")
shell.add_extension_object_member(testutil.sample_module_p_y, "my function", f5);
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "a sample",
"type": "string",
}]
});
shell.add_extension_object_member(testutil.sample_module_p_y, "function", f5,
{
"parameters":[
{
"name": "sample",
"type": "dictionary",
"options": [{
"name":'an invalid name',
"type":'string'
}]
}]
});
def f6(data):
print("Some random data:", data)
|
11554784
|
import torch
from mmdet.models import DETECTORS
from .two_stage import TwoStage3DDetector
@DETECTORS.register_module()
class BRNet(TwoStage3DDetector):
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None
):
super(BRNet, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained
)
def forward_train(self,
points,
img_metas,
gt_bboxes_3d,
gt_labels_3d,
pts_semantic_mask=None,
pts_instance_mask=None,
gt_bboxes_ignore=None):
points_cat = torch.stack(points)
feats_dict = self.extract_feat(points_cat)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(feats_dict, self.train_cfg.rpn.sample_mod)
feats_dict.update(rpn_outs)
rpn_loss_inputs = (points, gt_bboxes_3d, gt_labels_3d,
pts_semantic_mask, pts_instance_mask, img_metas)
rpn_losses = self.rpn_head.loss(
rpn_outs,
*rpn_loss_inputs,
gt_bboxes_ignore=gt_bboxes_ignore,
ret_target=True
)
feats_dict['targets'] = rpn_losses.pop('targets')
losses.update(rpn_losses)
# Generate rpn proposals
proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn)
proposal_inputs = (points, rpn_outs, img_metas)
proposal_list = self.rpn_head.get_bboxes(
*proposal_inputs, use_nms=proposal_cfg.use_nms
)
feats_dict['proposal_list'] = proposal_list
else:
raise NotImplementedError
roi_losses = self.roi_head.forward_train(
feats_dict, img_metas, points,
gt_bboxes_3d, gt_labels_3d,
pts_semantic_mask,
pts_instance_mask,
gt_bboxes_ignore
)
losses.update(roi_losses)
return losses
def simple_test(self, points, img_metas, imgs=None, rescale=None):
points_cat = torch.stack(points)
feats_dict = self.extract_feat(points_cat)
if self.with_rpn:
proposal_cfg = self.test_cfg.rpn
rpn_outs = self.rpn_head(feats_dict, proposal_cfg.sample_mod)
feats_dict.update(rpn_outs)
# Generate rpn proposals
proposal_list = self.rpn_head.get_bboxes(
points, rpn_outs, img_metas, use_nms=proposal_cfg.use_nms)
feats_dict['proposal_list'] = proposal_list
else:
raise NotImplementedError
return self.roi_head.simple_test(
feats_dict, img_metas, points_cat)
|
11554815
|
import csv
import random
from staticmap import StaticMap, CircleMarker
m = StaticMap(1000, 900, url_template='http://a.tile.stamen.com/toner/{z}/{x}/{y}.png')
def label_to_color(label):
alpha = 180
return {
'good': (0,153,102,alpha),
'moderate': (255,222,51,alpha),
'unhealthy for sensitive': (255,153,51,alpha),
'unhealthy': (204,0,51,alpha),
'very unhealthy': (102,0,153,alpha),
'hazardous': (126,0,35,alpha)
}[label]
with open('data/airVis.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
marker = CircleMarker((float(row[1]), float(row[0])), label_to_color(row[2]), 6)
m.add_marker(marker)
image = m.render(zoom=6, center=[15.2793976568, 50.5197351804])
image.save('marker-k3-minkowski.png')
random.random
|
11554838
|
import boshdata
import struct
import io
BFCAP = 12
BFMASK = (2 ** BFCAP) - 1
def byte2_unpack(numtuple):
"little-endian! receives a tuple like (11, 4095) and return corrosponding bytes"
num = numtuple[0] | (numtuple[1] << 4)
# Packing unsigned short int, should be 2-byte
return struct.pack(b"<H", num)
def byte2_decode(utf8string):
ret1, ret2, truncated = 0, 0, 0
# Bosh comes first
for i in range(0, len(boshdata.bosh)):
pattern = boshdata.bosh[i]
if utf8string.startswith(pattern):
ret1 = i
truncated += len(pattern)
utf8string = utf8string[len(pattern):]
break
# Fortune
for j in range(0, len(boshdata.fortunes)):
pattern = boshdata.fortunes[j]
if utf8string.startswith(pattern):
ret2 = j
truncated += len(pattern)
break
return byte2_unpack((ret1, ret2)), truncated
def decode_length_indicator(utf8seq):
"The length indicator is in little-endian."
rawbytes, truncated = byte2_decode(utf8seq)
datalen = struct.unpack("<H", rawbytes)[0]
return datalen, truncated
def utf8_decode(utf8data):
iobuffer = io.BytesIO(b"")
utf8data = utf8data.replace(" ", "").replace(" ", "")
utf8data = utf8data.replace("\r\n", "").replace("\n", "")
datalen, dl_trunc = decode_length_indicator(utf8data)
utf8data = utf8data[dl_trunc:]
while utf8data and iobuffer.tell() < datalen:
obytes, truncated = byte2_decode(utf8data)
if truncated == 0:
warnmsg = "WARNING: bad text since byte " + str(iobuffer.tell())
sys.stderr.write(warnmsg + '\n')
utf8data = utf8data[truncated:]
iobuffer.write(obytes)
iobuffer.seek(0)
return iobuffer.read(datalen)
if __name__ == "__main__":
import sys
#print(byte2_unpack((0x01, 0x626)))
#print(byte2_decode("带着这些问题, 我们来审视一下x。爱迪生曾经说过,天才是百分之一的勤奋加百分之九十九的汗水。这不禁令我深思。"))
desc_stdin = sys.stdin.fileno()
desc_stdout = sys.stdout.fileno()
new_in = open(desc_stdin, "rb", closefd=False)
st_in = b""
addn = new_in.read(1024)
while addn:
st_in += addn
addn = new_in.read(1024)
open(desc_stdout, "wb", closefd=False).write(utf8_decode(st_in.decode()))
#sys.stdout.write(bytes_encode(st_in))
|
11554893
|
import torch
from .base import CplxToCplx
from ... import cplx
class CplxDropout(torch.nn.Dropout2d, CplxToCplx):
r"""Complex 1d dropout layer: simultaneous dropout on both real and
imaginary parts.
See torch.nn.Dropout1d for reference on the input dimensions and arguments.
"""
def forward(self, input):
*head, n_last = input.shape
# shape -> [*shape, 2] : re-im are feature maps!
tensor = torch.stack([input.real, input.imag], dim=-1)
output = super().forward(tensor.reshape(-1, 1, 2))
# [-1, 1, 2] -> [*head, n_last * 2]
output = output.reshape(*head, -1)
# [*head, n_last * 2] -> [*head, n_last]
return cplx.from_interleaved_real(output, False, -1)
|
11554901
|
import json
from typing import Any, Mapping, Optional
import pulumi_consul as consul
import pulumi
class ConsulServiceDefault(pulumi.ComponentResource):
"""
Create a Service Default type of Consul Config Entry.
This is primarily for setting a non-default protocol
"""
def __init__(
self,
name: str,
service_name: str,
protocol: str,
additional_config_options: Mapping[Any, Any] = {},
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
super().__init__("grapl:ConsulServiceDefault", name, None, opts)
consul.ConfigEntry(
resource_name=f"{name}-{service_name}-service-defaults",
kind="service-defaults",
name=service_name,
config_json=json.dumps({"protocol": protocol, **additional_config_options}),
opts=pulumi.ResourceOptions.merge(
opts, pulumi.ResourceOptions(parent=self)
),
)
|
11554930
|
import torch
import torch.nn as nn
import math
from nets import base
class Net(nn.Module):
def __init__(self, in_channel, out_channel, n_colors=3, n_feats=64, n_resblocks=16, res_scale=0.1, scale=2):
super(Net, self).__init__()
self.conv_input = nn.Conv2d(in_channel, n_feats, kernel_size=3, stride=1, padding=1, bias=False)
self.downscale = nn.Sequential(
nn.Conv2d(n_feats, n_feats, kernel_size=4, stride=2, padding=1, bias=False),
nn.ReLU(True)
)
residual = [
base.Residual_Block(n_feats=n_feats, res_scale=res_scale) for _ in range(n_resblocks)
]
self.residual = nn.Sequential(*residual)
self.conv_mid = nn.Conv2d(n_feats, n_feats, kernel_size=3, stride=1, padding=1, bias=False)
self.upscale = nn.Sequential(
nn.Conv2d(n_feats, (scale**2)*n_feats, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
)
self.conv_output = nn.Conv2d(n_feats, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.conv_input(x)
skip = out
out = self.downscale(out)
out = self.residual(out)
out = self.conv_mid(out)
out = self.upscale(out)
out += skip
out = self.conv_output(out)
return out
|
11554938
|
fh = open('mbox-short.txt')
#The 'mbox-short.txt' file can be downloaded from the link: https://www.py4e.com/code3/mbox-short.txt
sum = 0
count = 0
for fx in fh:
fx = fx.rstrip()
if not fx.startswith("X-DSPAM-Confidence:") :
continue
fy = fx[19:]
count = count + 1
sum = sum + float(fy)
print ('Average spam confidence: ',sum/count)
|
11554939
|
import json
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, Optional, Type, TypeVar
from rotkehlchen.accounting.cost_basis import CostBasisInfo
from rotkehlchen.accounting.mixins.event import AccountingEventType
from rotkehlchen.accounting.pnl import PNL
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.fval import FVal
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.serialization.deserialize import deserialize_fval
from rotkehlchen.types import Location, Price, Timestamp
from rotkehlchen.utils.serialization import rlk_jsondumps
T = TypeVar('T', bound='ProcessedAccountingEvent')
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class ProcessedAccountingEvent:
"""An event after having been processed by accounting. This is what:
- Gets returned via the API
- Gets saved in the DB for saved reports
- Exported via CSV
"""
type: AccountingEventType
notes: str
location: Location
timestamp: Timestamp
asset: Asset
free_amount: FVal
taxable_amount: FVal
price: Price
pnl: PNL
cost_basis: Optional['CostBasisInfo']
index: int
# This is set only for some events to remember extra data that can be used later
# such as the transaction hash of an event
extra_data: Dict[str, Any] = field(default_factory=dict)
# These are set by calculate pnl and are only here to be remembered by the
# processed accounting event so that the CSV export formulas can be correctly made
count_entire_amount_spend: bool = field(init=False, default=False)
count_cost_basis_pnl: bool = field(init=False, default=False)
def to_string(self, ts_converter: Callable[[Timestamp], str]) -> str:
desc = f'{self.type.name} for {self.free_amount}/{self.taxable_amount} {self.asset.symbol} with price: {self.price} and PNL: {self.pnl}.' # noqa: E501
if self.cost_basis:
taxable, free = self.cost_basis.to_string(ts_converter)
desc += f'Cost basis. Taxable {taxable}. Free: {free}'
return desc
def to_exported_dict(
self,
ts_converter: Callable[[Timestamp], str],
eth_explorer: Optional[str],
for_api: bool,
) -> Dict[str, Any]:
"""These are the fields that will appear in CSV and report API
If `eth_explorer` is given then this is for exporting to CSV
If `for_api` is True then this is for exporting to the rest API
"""
exported_dict = {
'type': self.type.serialize(),
'notes': self.notes,
'location': str(self.location),
'timestamp': self.timestamp,
'asset': self.asset.identifier,
'free_amount': str(self.free_amount),
'taxable_amount': str(self.taxable_amount),
'price': str(self.price),
'pnl_taxable': str(self.pnl.taxable),
'pnl_free': str(self.pnl.free),
}
tx_hash = self.extra_data.get('tx_hash', None)
if eth_explorer:
taxable_basis = free_basis = ''
if self.cost_basis is not None:
taxable_basis, free_basis = self.cost_basis.to_string(ts_converter)
exported_dict['cost_basis_taxable'] = taxable_basis
exported_dict['cost_basis_free'] = free_basis
exported_dict['asset'] = str(self.asset)
if tx_hash:
exported_dict['notes'] = f'{eth_explorer}{tx_hash} -> {self.notes}'
else:
cost_basis = None
if self.cost_basis is not None:
cost_basis = self.cost_basis.serialize()
exported_dict['cost_basis'] = cost_basis
if for_api is True:
if tx_hash is not None:
exported_dict['notes'] = f'transaction {tx_hash} {self.notes}'
group_id = self.extra_data.get('group_id', None)
if group_id is not None:
exported_dict['group_id'] = group_id
return exported_dict
def serialize_to_dict(self, ts_converter: Callable[[Timestamp], str]) -> Dict[str, Any]:
"""This is used to serialize to dict for saving to the DB"""
data = self.to_exported_dict(
ts_converter=ts_converter,
eth_explorer=None,
for_api=False,
)
data['extra_data'] = self.extra_data
data['notes'] = self.notes # undo the tx_hash addition to notes before going to the DB
data['index'] = self.index
data['count_entire_amount_spend'] = self.count_entire_amount_spend
data['count_cost_basis_pnl'] = self.count_cost_basis_pnl
return data
def calculate_pnl(
self,
count_entire_amount_spend: bool,
count_cost_basis_pnl: bool,
) -> PNL:
"""Calculate PnL for this event and return it.
Only called for events that should have PnL counted
If count_entire_amount_spend is True then the entire amount is counted as a spend.
Which means an expense (negative pnl).
If count_cost_basis_pnl is True then the PnL between buying the asset amount
and spending it is calculated and added to PnL.
"""
self.count_entire_amount_spend = count_entire_amount_spend
self.count_cost_basis_pnl = count_cost_basis_pnl
taxable_bought_cost = taxfree_bought_cost = ZERO
taxable_value = self.taxable_amount * self.price
free_value = self.free_amount * self.price
self.pnl = PNL()
if count_entire_amount_spend:
# for fees and other types we also need to consider the entire amount as spent
self.pnl -= PNL(taxable=taxable_value + free_value, free=ZERO)
if self.asset.is_fiat() or count_cost_basis_pnl is False:
return self.pnl # no need to calculate spending pnl if asset is fiat
if self.cost_basis is not None:
taxable_bought_cost = self.cost_basis.taxable_bought_cost
taxfree_bought_cost = self.cost_basis.taxfree_bought_cost
self.pnl += PNL(
taxable=taxable_value - taxable_bought_cost,
free=free_value - taxfree_bought_cost,
)
return self.pnl
def serialize_for_db(self, ts_converter: Callable[[Timestamp], str]) -> str:
"""May raise:
- DeserializationError if something fails during conversion to the DB tuple
"""
json_data = self.serialize_to_dict(ts_converter)
try:
string_data = rlk_jsondumps(json_data)
except (OverflowError, ValueError, TypeError) as e:
raise DeserializationError(
f'Could not dump json to string for NamedJson. Error was {str(e)}',
) from e
return string_data
@classmethod
def deserialize_from_db(cls: Type[T], timestamp: Timestamp, stringified_json: str) -> T:
"""May raise:
- DeserializationError if something is wrong with reading this from the DB
"""
try:
data = json.loads(stringified_json)
except json.decoder.JSONDecodeError as e:
raise DeserializationError(
f'Could not decode processed accounting event json from the DB due to {str(e)}',
) from e
try:
pnl_taxable = deserialize_fval(data['pnl_taxable'], name='pnl_taxable', location='processed event decoding') # noqa: E501
pnl_free = deserialize_fval(data['pnl_free'], name='pnl_free', location='processed event decoding') # noqa: E501
if data['cost_basis'] is None:
cost_basis = None
else:
cost_basis = CostBasisInfo.deserialize(data['cost_basis'])
event = cls(
type=AccountingEventType.deserialize(data['type']),
notes=data['notes'],
location=Location.deserialize(data['location']),
timestamp=timestamp,
asset=Asset(data['asset']),
free_amount=deserialize_fval(data['free_amount'], name='free_amount', location='processed event decoding'), # noqa: E501
taxable_amount=deserialize_fval(data['taxable_amount'], name='taxable_amount', location='processed event decoding'), # noqa: E501
price=deserialize_price(data['price']),
pnl=PNL(free=pnl_free, taxable=pnl_taxable),
cost_basis=cost_basis,
index=data['index'],
extra_data=data['extra_data'],
)
event.count_cost_basis_pnl = data['count_cost_basis_pnl']
event.count_entire_amount_spend = data['count_entire_amount_spend']
return event
except KeyError as e:
raise DeserializationError(f'Could not decode processed accounting event json from the DB due to missing key {str(e)}') from e # noqa: E501
|
11555001
|
import pytest
import numpy as np
import pymaster as nmt
from .utils import normdiff
class BinTester(object):
def __init__(self):
self.nside = 1024
self.lmax = 2000
self.nlb = 4
self.bc = nmt.NmtBin(self.nside, nlb=4, lmax=self.lmax)
ells = np.arange(self.lmax - 4, dtype=int)+2
bpws = (ells - 2)//4
weights = 0.25*np.ones(self.lmax - 4)
fell = ells*(ells+1.)/(2*np.pi)
self.bv = nmt.NmtBin(nside=self.nside,
bpws=bpws, ells=ells,
weights=weights,
lmax=self.lmax)
self.bcf = nmt.NmtBin(nside=self.nside,
nlb=4, lmax=self.lmax,
is_Dell=True)
self.bvf1 = nmt.NmtBin(nside=self.nside,
bpws=bpws, ells=ells,
weights=weights,
lmax=self.lmax,
is_Dell=True)
self.bvf2 = nmt.NmtBin(nside=self.nside,
bpws=bpws, ells=ells,
weights=weights,
lmax=self.lmax,
f_ell=fell)
self.l_edges = np.arange(2, self.lmax+2, 4, dtype=int)
self.be = nmt.NmtBin.from_edges(self.l_edges[:-1], self.l_edges[1:])
BT = BinTester()
def test_bins_errors():
# Tests raised exceptions
ells = np.arange(BT.lmax - 4, dtype=int)+2
bpws = (ells - 2)//4
weights = 0.25*np.ones(BT.lmax - 4)
weights[16:20] = 0
with pytest.raises(RuntimeError):
nmt.NmtBin(nside=BT.nside,
bpws=bpws,
ells=ells,
weights=weights,
lmax=BT.lmax)
with pytest.raises(ValueError):
BT.bv.bin_cell(np.random.randn(3, 3, 3))
with pytest.raises(ValueError):
BT.bv.unbin_cell(np.random.randn(3, 3, 3))
with pytest.raises(KeyError):
nmt.NmtBin()
with pytest.raises(ValueError):
nmt.NmtBin(nlb=10)
with pytest.raises(KeyError):
nmt.NmtBin(nside=16, weights=1)
def test_bins_nell_list():
nlst = BT.be.get_nell_list()
assert len(nlst) == BT.be.get_n_bands()
assert (nlst == 4).all()
def test_bins_edges():
# Tests bandpowers generated from edges
assert BT.bc.get_n_bands() == BT.be.get_n_bands()
assert np.sum(np.fabs(BT.bc.get_effective_ells() -
BT.be.get_effective_ells())) < 1E-10
def test_min_max():
n = BT.be.get_n_bands()
assert (BT.be.get_ell_min(0) == BT.l_edges[0])
assert (BT.be.get_ell_max(0) == BT.l_edges[1] - 1)
assert (BT.be.get_ell_min(1) == BT.l_edges[1])
assert (BT.be.get_ell_max(1) == BT.l_edges[2] - 1)
assert (BT.be.get_ell_min(n - 1) == BT.l_edges[-2])
assert (BT.be.get_ell_max(n - 1) == BT.l_edges[-1] - 1)
def test_bins_constant():
# Tests constant bandpower initialization
assert (BT.bc.get_n_bands() == (BT.lmax - 2)//BT.nlb)
assert (BT.bc.get_ell_list(5)[2] == 2+BT.nlb*5+2)
b = nmt.NmtBin(nside=1024, nlb=4, lmax=2000)
assert (b.bin.ell_max == 2000)
def test_bins_variable():
# Tests variable bandpower initialization
assert (BT.bv.get_n_bands() == (BT.lmax - 2)//BT.nlb)
assert (BT.bv.get_n_bands() == BT.bc.get_n_bands())
for i in range(BT.bv.get_n_bands()):
ll1 = BT.bv.get_ell_list(i)
ll2 = BT.bc.get_ell_list(i)
wl1 = BT.bv.get_weight_list(i)
assert (ll1 == ll2).all()
assert np.fabs(np.sum(wl1) - 1.) < 1E-5
nbarr = np.arange(BT.bv.get_n_bands())
assert (normdiff(BT.bv.get_effective_ells(),
(2 + BT.nlb * nbarr +
0.5 * (BT.nlb - 1))) < 1E-5)
def test_unbin_from_edges():
bpw_edges = [0, 6, 12, 18, 24, 30, 36, 42, 48,
54, 60, 66, 72, 78, 84, 90, 96]
b = nmt.NmtBin.from_edges(bpw_edges[:-1], bpw_edges[1:])
cl_b = np.arange(len(bpw_edges)-1)
cl_u = b.unbin_cell(cl_b)
for i, (b1, b2) in enumerate(zip(bpw_edges[:-1],
bpw_edges[1:])):
assert np.all(cl_u[b1:b2] == i)
def test_bins_binning():
# Tests C_l binning and unbinning
cls = np.arange(BT.lmax+1, dtype=float)
cl_b = BT.bv.bin_cell(cls)
cl_u = BT.bv.unbin_cell(cl_b)
iend = 2+BT.nlb*((BT.lmax - 2)//BT.nlb)
cl_b_p = np.mean(cls[2:iend].reshape([-1, BT.nlb]), axis=1)
assert normdiff(cl_b_p, cl_b) < 1E-5
cl_u_p = (cl_b[:, None] * np.ones([len(cl_b), BT.nlb])).flatten()
assert normdiff(cl_u_p, cl_u[2:iend]) < 1E-5
def test_bins_binning_f_ell():
# Tests C_l binning and unbinning with ell-dependent prefactors
cls = np.arange(BT.lmax+1, dtype=float)
fell = cls * (cls + 1.) / 2 / np.pi
cl_b = BT.bcf.bin_cell(cls)
assert normdiff(cl_b, BT.bvf1.bin_cell(cls)) < 1E-5
assert normdiff(cl_b, BT.bvf2.bin_cell(cls)) < 1E-5
cl_u = BT.bcf.unbin_cell(cl_b)
assert normdiff(cl_u, BT.bvf1.unbin_cell(cl_b)) < 1E-5
assert normdiff(cl_u, BT.bvf2.unbin_cell(cl_b)) < 1E-5
iend = 2+BT.nlb*((BT.lmax - 2)//BT.nlb)
cl_b_p = np.mean((fell*cls)[2:iend].reshape([-1, BT.nlb]), axis=1)
assert normdiff(cl_b_p, cl_b) < 1E-5
cl_u_p = (cl_b[:, None] * np.ones([len(cl_b), BT.nlb])).flatten()
cl_u_p /= fell[2:2+BT.nlb*((BT.lmax - 2)//BT.nlb)]
assert normdiff(cl_u_p, cl_u[2:iend]) < 1E-5
|
11555060
|
import sys
import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_COLOR)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.iteritems():
txn.put(k, v)
def createDataset(outputPath, imageListFile, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
checkValid : if true, check the validity of every image
"""
with open(imageListFile) as fp:
imagePathList = fp.readlines()
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in xrange(nSamples):
imagePath = imagePathList[i].rstrip()
labelPath = imagePath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
with open(labelPath) as f:
label = f.readlines()
label = ''.join(label)
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'r') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
outputPath = sys.argv[1]
imageListFile = sys.argv[2]
createDataset(outputPath, imageListFile, checkValid=True)
|
11555155
|
from enum import Enum
from typing import List
class LINK(Enum):
CONTAINER = '<http://www.w3.org/ns/ldp#BasicContainer>; rel="type"'
RESOURCE = '<http://www.w3.org/ns/ldp#Resource>; rel="type"'
def append_slashes_at_end(url) -> str:
if url[-1] != '/':
url += '/'
return url
def remove_slashes_at_end(url) -> str:
if url[-1] == '/':
url = url[:-1]
return url
def get_root_url(url: str) -> str:
slash_count = 0
for i in range(len(url)):
if url[i] == '/':
slash_count += 1
if slash_count == 3:
break
if slash_count == 3:
return url[:i + 1]
else:
return append_slashes_at_end(url)
def get_parent_url(url) -> str:
url = remove_slashes_at_end(url)
if url.count('/') == 2: # is base url, no parent url, return it self
return append_slashes_at_end(url)
i = url.rindex('/')
return url[:i + 1]
def get_item_name(url) -> str:
url = remove_slashes_at_end(url)
if url.count('/') == 2: # is base url, no item name
return ''
i = url.rindex('/')
return url[i + 1:]
def are_folders(urls: List) -> bool:
pass
def are_files(urls: List) -> bool:
pass
|
11555166
|
import tkinter
class EraserWaiterView:
__DESCRIPTIONBORDER = 10
__DESCRIPTIONROW = 0
__BUTTONPADDING = 5
__NAMECOLUMN = 1
def __center(self):
self.__root.withdraw()
self.__root.update_idletasks()
x = (self.__root.winfo_screenwidth() - self.__root.winfo_reqwidth()) / 2
y = (self.__root.winfo_screenheight() - self.__root.winfo_reqheight()) / 2
self.__root.geometry("+%d+%d" % (x, y))
self.__root.deiconify()
def __init__(self, thingtopoll):
self.__agree = False;
self.__root = tkinter.Tk()
self.__root.wm_title('Naraeon SSD Tools - Secure Erase')
self.__mainloop = self.__root.mainloop
self.__thingtopoll = thingtopoll
frame = tkinter.Frame(self.__root)
frame.pack(fill=tkinter.BOTH)
self.__initMessageFrame(frame)
self.__poll()
self.__root.protocol("WM_DELETE_WINDOW", (lambda : 1-1)) #lambda for NOP
self.__center()
self.__mainloop()
def __initButtonFrame(self, frame):
self.__buttonFrame = tkinter.Frame(frame)
self.__buttonFrame.pack(anchor='center', pady=self.__BUTTONPADDING)
def __initMessageFrame(self, frame):
self.__messageFrame = tkinter.Frame(frame)
self.__messageFrame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=True)
tkinter.Label(self.__messageFrame, text='Wait for the completion.\n'
'This action cannot be undone, even if you close this window or restart the machine.\n'
'Force restarting would cause serious hardware failure.',
borderwidth=self.__DESCRIPTIONBORDER)\
.grid(row=self.__DESCRIPTIONROW, column=self.__NAMECOLUMN)
def __poll(self):
if not self.__thingtopoll():
self.__root.destroy()
else:
self.__root.after(500, self.__poll)
|
11555248
|
from numpy import zeros, float64, int64, array, random
import ctypes
import _ctypes
from math import ceil, floor
import sys
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
def c_char_ptr(x):
return x.ctypes.data_as(ctypes.POINTER(ctypes.c_char))
def c_long_ptr(x):
return x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
def ham(v):
return sum(v)
# CodeRedLib: a python wrapper for the c++ coreredlib.cpp
# coreredlib is compiled many time with various value of maxn
# make sure the value you want to use are listed in compile_cpp_core.sh
# Functions names match with the paper. They all act on the internal state:
# self.B : the basis
# self.E : The epipodal matrix
# self.P : The cumulative projector matrix P[i] = &_{j<i} ~ B[j] (has length k+1)
# (P[i] is the boolean complement of s_i from the paper)
# self.l : Epipodal length
class CodeRedLib(object):
def __init__(self, B, seed=None):
k, n = B.shape
self.k, self.n = k, n
if seed is None:
seed = random.randint(0,2**63)
nmax = 256 * int(ceil(n/256.))
self.lib = ctypes.cdll.LoadLibrary("./bin/coderedlib-%d.so"%nmax)
self.lib._setup(k, n, c_char_ptr(B), ctypes.c_long(seed))
self.B = zeros((k , n), dtype='bool')
self.E = zeros((k , n), dtype='bool')
self.P = zeros((k+1, n), dtype='bool')
self.l = zeros( k, dtype='int64')
self.update()
def update(self):
self.lib._export_all(c_char_ptr(self.B),
c_char_ptr(self.E),
c_char_ptr(self.P),
c_long_ptr(self.l))
# Check that the code is of full length
assert(sum(self.l)==self.n)
def LLL(self):
self.lib._LLL()
self.update()
def Randomize(self, light=True):
self.lib._Randomize(light)
self.update()
def Systematize(self):
self.lib._Systematize()
self.update()
def EpiSort(self):
self.lib._EpiSort()
self.update()
def SizeRedBasis(self):
self.lib._SizeRedBasis()
self.update()
def SemiSystematize(self):
self.lib._SemiSystematize()
self.update()
for k1 in range(self.k)[::-1]:
if self.l[k1] > 1:
return k1+1
return 0
def KillTwos(self):
self.lib._KillTwos()
self.update()
# Used to speed up LB/LBB experiments in large dimension by only
# vistining a (1+skip)^{1-w2} fraction of the enumerated space.
def set_skip(self, skip):
return self.lib._set_skip(int(floor(skip)))
def SizeRed(self, t):
return self.lib._SizeRed(c_char_ptr(t))
def LB(self, w2, goal_w=None, t=None, stats=False):
tt = zeros(self.n, dtype='bool') if t is None else 1 * t
_stats = zeros(self.n+1, dtype='int64') if stats else None
success = self.lib._LB(c_char_ptr(tt), w2,
0 if goal_w is None else goal_w,
c_long_ptr(_stats) if stats else None)
if stats:
return _stats
if success or goal_w is None:
return tt
def LBB(self, k1, w2, goal_w=None, t=None, stats=False):
tt = zeros(self.n, dtype='bool') if t is None else 1 * t
_stats = zeros(self.n+1, dtype='int64') if stats else None
success = self.lib._LBB(c_char_ptr(tt), k1, w2,
0 if goal_w is None else goal_w,
c_long_ptr(_stats) if stats else None)
if stats:
return _stats
if success or goal_w is None:
return tt
|
11555252
|
from ...suggestion.algorithm.base_hyperopt_algorithm import BaseHyperoptAlgorithm
class SimulateAnnealAlgorithm(BaseHyperoptAlgorithm):
"""
The implementation is based on https://github.com/tobegit3hub/advisor
Get the new suggested trials with simulate anneal algorithm.
"""
def __init__(self):
super(SimulateAnnealAlgorithm, self).__init__("anneal")
|
11555292
|
expected_output = {
"ospf3-neighbor-information": {
"ospf3-neighbor": [
{
"activity-timer": "34",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/0.0",
"neighbor-address": "fe80::250:56ff:fe8d:53c0",
"neighbor-adjacency-time": {"#text": "3w0d 17:06:45"},
"neighbor-id": "10.189.5.253",
"neighbor-priority": "128",
"neighbor-up-time": {"#text": "3w0d 17:06:45"},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "2",
},
{
"activity-timer": "31",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/1.0",
"neighbor-address": "fe80::250:56ff:fe8d:72bd",
"neighbor-adjacency-time": {"#text": "3w0d 17:06:40"},
"neighbor-id": "10.169.14.240",
"neighbor-priority": "128",
"neighbor-up-time": {"#text": "3w0d 17:06:44"},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "3",
},
]
}
}
|
11555294
|
import numpy as np
import scipy.optimize as sciopt
def gaussian(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def fit_gaussian(x, y, z_2d, save_fits=False):
z = z_2d
max_idx = np.unravel_index(z.argmax(), z.shape)
max_row = max_idx[0] - 1
max_col = max_idx[1] - 1
z_max_row = z[max_row, :]
z_max_col = z[:, max_col]
A = z[max_row, max_col]
p_guess_x = (A, x[max_col], 0.1*(x[-1] - x[0]))
p_guess_y = (A, y[max_row], 0.1*(y[-1] - y[0]))
coeffs_x, var_matrix_x = sciopt.curve_fit(gaussian, x, z_max_row, p_guess_x)
coeffs_y, var_matrix_y = sciopt.curve_fit(gaussian, y, z_max_col, p_guess_y)
c_x = (x[-1]-x[0])*(max_col+1)/x.size + x[0]
c_y = (y[-1]-y[0])*(y.size-(max_row+1))/y.size + y[0]
centre = (c_x, c_y)
sigma = np.array([coeffs_x[2], coeffs_y[2]])
fwhm = 2.355 * sigma
sigma_2 = 1.699 * fwhm
if save_fits:
with open('x_fit.dat', 'w') as fs:
for c in np.c_[x, z_max_row, gaussian(x, *coeffs_x)]:
s = ','.join([str(v) for v in c])
fs.write(s+'\n')
with open('y_fit.dat', 'w') as fs:
for c in np.c_[y, z_max_col, gaussian(y, *coeffs_y)]:
s = ','.join([str(v) for v in c])
fs.write(s+'\n')
return A, centre, sigma_2
|
11555319
|
import asyncio
import logging
import signal
import typing as t
import arrow
import discord
from aiohttp import ClientSession
from discord import Activity, AllowedMentions, Intents
from discord.client import _cleanup_loop
from discord.ext import commands
from modmail.config import CONFIG
from modmail.log import ModmailLogger
from modmail.utils.extensions import EXTENSIONS, NO_UNLOAD, walk_extensions
from modmail.utils.plugins import PLUGINS, walk_plugins
REQUIRED_INTENTS = Intents(
guilds=True,
messages=True,
reactions=True,
typing=True,
members=True,
emojis_and_stickers=True,
)
class ModmailBot(commands.Bot):
"""
Base bot instance.
Has an aiohttp.ClientSession and a ModmailConfig instance.
"""
logger: ModmailLogger = logging.getLogger(__name__)
def __init__(self, **kwargs):
self.config = CONFIG
self.start_time: t.Optional[arrow.Arrow] = None # arrow.utcnow()
self.http_session: t.Optional[ClientSession] = None
status = discord.Status.online
activity = Activity(type=discord.ActivityType.listening, name="users dming me!")
# listen to messages mentioning the bot or matching the prefix
# ! NOTE: This needs to use the configuration system to get the prefix from the db once it exists.
prefix = commands.when_mentioned_or(CONFIG.bot.prefix)
# allow only user mentions by default.
# ! NOTE: This may change in the future to allow roles as well
allowed_mentions = AllowedMentions(everyone=False, users=True, roles=False, replied_user=True)
super().__init__(
case_insensitive=True,
description="Modmail bot by discord-modmail.",
status=status,
activity=activity,
allowed_mentions=allowed_mentions,
command_prefix=prefix,
intents=REQUIRED_INTENTS,
**kwargs,
)
async def start(self, token: str, reconnect: bool = True) -> None:
"""
Start the bot.
This function is called by the run method, and finishes the set up of the bot that needs an
asyncrhonous event loop running, before connecting the bot to discord.
"""
try:
# create the aiohttp session
self.http_session = ClientSession(loop=self.loop)
self.logger.trace("Created ClientSession.")
# set start time to when we started the bot.
# This is now, since we're about to connect to the gateway.
# This should also be before we load any extensions, since if they have a load time, it should
# be after the bot start time.
self.start_time = arrow.utcnow()
# we want to load extensions before we log in, so that any issues in them are discovered
# before we connect to discord. This keeps us from connecting to the gateway a lot if we have a
# problem with an extension.
self.load_extensions()
# next, we log in to discord, to ensure that we are able to connect to discord
# This only logs in to discord and gets a gateway, it does not connect to the websocket
await self.login(token)
# now that we're logged in and ensured we can have connection, we load all of the plugins
# The reason to wait until we know we have a gateway we can connect to, even though we have not
# signed in yet, is in some cases, a plugin may be poorly made and mess up if it is loaded but
# the bot never connects to discord. Putting this below the login ensures that we don't load if
# we don't have a gateway.
self.load_plugins()
# alert the user that we're done loading everything
self.logger.notice("Loaded all extensions, and plugins. Starting bot.")
# finally, we enter the main loop
await self.connect(reconnect=reconnect)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs) -> None:
"""
Start up our instance of the bot. Since this method is blocking, it must be called last.
This method does several things, it loads extensions and plugins,
and then executes the main task.
This method was copied from discord.py and modified to suit our needs.
"""
loop = self.loop
try:
# adds signal handlers so the loop is safely stopped
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
# this one we may want to get rid of, depending on certain things, and just hard stop instead.
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
def stop_loop_on_completion(f) -> None: # noqa: ANN001
loop.stop()
future = asyncio.ensure_future(self.start(*args, **kwargs), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
self.logger.info("Received signal to terminate bot and event loop.")
finally:
future.remove_done_callback(stop_loop_on_completion)
self.logger.info("Cleaning up tasks.")
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
async def close(self) -> None:
"""Safely close HTTP session, unload plugins and extensions when the bot is shutting down."""
plugins = self.extensions & PLUGINS.keys()
for plug in list(plugins):
try:
self.unload_extension(plug)
except Exception:
self.logger.error(f"Exception occured while unloading plugin {plug.name}", exc_info=True)
for ext in list(self.extensions):
try:
self.unload_extension(ext)
except Exception:
self.logger.error(f"Exception occured while unloading {ext.name}", exc_info=True)
for cog in list(self.cogs):
try:
self.remove_cog(cog)
except Exception:
self.logger.error(f"Exception occured while removing cog {cog.name}", exc_info=True)
if self.http_session:
await self.http_session.close()
await super().close()
def load_extensions(self) -> None:
"""Load all enabled extensions."""
EXTENSIONS.update(walk_extensions())
# set up no_unload global too
for ext, value in EXTENSIONS.items():
if value[1]:
NO_UNLOAD.append(ext)
for extension, value in EXTENSIONS.items():
if value[0]:
self.logger.debug(f"Loading extension {extension}")
self.load_extension(extension)
def load_plugins(self) -> None:
"""Load all enabled plugins."""
PLUGINS.update(walk_plugins())
for plugin, should_load in PLUGINS.items():
if should_load:
self.logger.debug(f"Loading plugin {plugin}")
try:
# since we're loading user generated content,
# any errors here will take down the entire bot
self.load_extension(plugin)
except Exception:
self.logger.error("Failed to load plugin {0}".format(plugin), exc_info=True)
def add_cog(self, cog: commands.Cog, *, override: bool = False) -> None:
"""
Load a given cog.
Utilizes the default discord.py loader beneath, but also checks so we can warn when we're
loading a non-ModmailCog cog.
"""
from modmail.utils.cogs import ModmailCog
if not isinstance(cog, ModmailCog):
self.logger.warning(
f"Cog {cog.name} is not a ModmailCog. All loaded cogs should always be"
f" instances of ModmailCog."
)
super().add_cog(cog, override=override)
self.logger.info(f"Cog loaded: {cog.qualified_name}")
def remove_cog(self, cog: str) -> None:
"""
Delegate to super to unregister `cog`.
This only serves to make the info log, so that extensions don't have to.
"""
super().remove_cog(cog)
self.logger.info(f"Cog unloaded: {cog}")
async def on_ready(self) -> None:
"""Send basic login success message."""
self.logger.info("Logged in as %s", self.user)
|
11555363
|
import discord
import logging
from discord.ext import commands
log = logging.getLogger('daijobuudes.moderation')
class Moderation(commands.Cog):
def __init__(self, client):
self.client = client
# Purge messages
@commands.command()
@commands.has_permissions(manage_channels=True)
async def purge(self, ctx, amount=5):
await ctx.channel.purge(limit=amount+1)
log.info(f'Purged {amount} messages')
# Kick a member
@commands.command()
@commands.has_permissions(manage_roles=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.send(f'Sucessfully kicked `{member}` for `{reason}`')
log.info(f'Kicked member {member} for {reason}.')
# Ban a member
@commands.command()
@commands.has_permissions(manage_roles=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
await ctx.send(f'Sucessfully banned `{member}` for `{reason}`')
log.info(f'Banned member {member} for {reason}.')
# Unban a member
@commands.command()
@commands.has_permissions(manage_roles=True)
async def unban(self, ctx, *, member):
self.banned_users = await ctx.guild.bans()
self.member_name, self.member_discriminator = member.split('#')
for ban_entry in self.banned_users:
user = ban_entry.user
membername = self.member_name
memberdisc = self.member_discriminator
if (user.name, user.discriminator) == (membername, memberdisc):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned user {user.name}#{user.discriminator}')
log.info(f'Unbanned user {user.name}#{user.discriminator}')
return
else:
await ctx.send('User not found.')
log.info('User not found to be unbanned.')
return
def setup(client):
client.add_cog(Moderation(client))
|
11555372
|
import unittest
from soap.datatype import int_type, IntegerArrayType
from soap.expression import (
Variable, BinaryArithExpr, BinaryBoolExpr, operators, UnaryArithExpr,
SelectExpr, FixExpr, AccessExpr, UpdateExpr, Subscript,
)
from soap.semantics.error import IntegerInterval
from soap.semantics.functions.label import LabelGenerator
from soap.semantics.label import LabelContext, LabelSemantics
from soap.semantics.state.box import BoxState
from soap.semantics.state.meta import MetaState
from soap.semantics.linalg import IntegerIntervalArray
class TestLabel(unittest.TestCase):
def setUp(self):
self.context = LabelContext('test_context')
mat = IntegerIntervalArray([1, 2, 3, 4])
self.x = Variable('x', int_type)
self.y = Variable('y', int_type)
self.z = Variable('z', IntegerArrayType([4]))
self.state = BoxState({
self.x: [1, 2],
self.y: 3,
self.z: mat,
})
self.x_label = self.context.Label(
self.x, IntegerInterval([1, 2]), None)
self.y_label = self.context.Label(self.y, IntegerInterval(3), None)
self.z_label = self.context.Label(self.z, mat, None)
def label(self, expr, state=None):
state = state or self.state
return LabelGenerator(self.context).execute(expr, state)
def compare(self, expr, test_labsem):
labsem = self.label(expr)
self.assertEqual(test_labsem, labsem)
self.assertEqual(test_labsem.expr(), expr)
self.assertEqual(labsem.expr(), expr)
def test_numeral(self):
expr = IntegerInterval(1)
label = self.context.Label(expr, expr, None)
test_value = LabelSemantics(label, {label: expr})
self.compare(expr, test_value)
def test_variable(self):
expr = self.x
test_value = LabelSemantics(self.x_label, {self.x_label: expr})
self.compare(expr, test_value)
def test_UnaryArithExpr(self):
expr = UnaryArithExpr(operators.UNARY_SUBTRACT_OP, self.x)
label_expr = UnaryArithExpr(
operators.UNARY_SUBTRACT_OP, self.x_label)
label = self.context.Label(expr, IntegerInterval([-2, -1]), None)
env = {label: label_expr, self.x_label: self.x}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_BinaryArithExpr(self):
expr = BinaryArithExpr(operators.ADD_OP, self.x, self.y)
label_expr = BinaryArithExpr(
operators.ADD_OP, self.x_label, self.y_label)
label = self.context.Label(expr, IntegerInterval([4, 5]), None)
env = {
label: label_expr,
self.x_label: self.x,
self.y_label: self.y,
}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def bool_expr(self):
expr = BinaryBoolExpr(operators.LESS_OP, self.x, self.y)
label_expr = BinaryBoolExpr(
operators.LESS_OP, self.x_label, self.y_label)
# FIXME bound for bool_expr does not make sense
label = self.context.Label(expr, IntegerInterval([-2, -1]), None)
return label, expr, label_expr
def test_BinaryBoolExpr(self):
label, expr, label_expr = self.bool_expr()
env = {
label: label_expr,
self.x_label: self.x,
self.y_label: self.y,
}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_AccessExpr(self):
subscript = Subscript(self.y)
expr = AccessExpr(self.z, subscript)
label_subscript_expr = Subscript(self.y_label)
subscript_label = self.context.Label(
subscript, IntegerIntervalArray([self.state[self.y]]), None)
label_expr = AccessExpr(self.z_label, subscript_label)
label = self.context.Label(expr, IntegerInterval(4), None)
env = {
label: label_expr,
subscript_label: label_subscript_expr,
self.y_label: self.y,
self.z_label: self.z,
}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_UpdateExpr(self):
subscript = Subscript(self.y)
expr = UpdateExpr(self.z, subscript, self.x)
label_subscript_expr = Subscript(self.y_label)
subscript_label = self.context.Label(
subscript, IntegerIntervalArray([self.state[self.y]]), None)
label_expr = UpdateExpr(self.z_label, subscript_label, self.x_label)
new_bound = IntegerIntervalArray([1, 2, 3, IntegerInterval([1, 2])])
label = self.context.Label(expr, new_bound, None)
env = {
label: label_expr,
subscript_label: label_subscript_expr,
self.x_label: self.x,
self.y_label: self.y,
self.z_label: self.z,
}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_SelectExpr(self):
bool_label, bool_expr, bool_label_expr = self.bool_expr()
expr = SelectExpr(bool_expr, self.x, self.y)
label_expr = SelectExpr(bool_label, self.x_label, self.y_label)
label = self.context.Label(expr, IntegerInterval([1, 2]), None)
env = {
label: label_expr,
bool_label: bool_label_expr,
self.x_label: self.x,
self.y_label: self.y,
}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_FixExpr(self):
init_state = MetaState({self.x: IntegerInterval(0)})
init_label, init_env = self.label(init_state)
invar = BoxState({self.x: IntegerInterval([0, 4])})
end_invar = BoxState({self.x: IntegerInterval([1, 5])})
loop_state = MetaState({
self.x: BinaryArithExpr(
operators.ADD_OP, self.x, IntegerInterval(1)),
})
loop_label, loop_env = self.label(loop_state, invar)
bool_expr = BinaryBoolExpr(
operators.LESS_OP, self.x, IntegerInterval(5))
bool_labsem = self.label(bool_expr, end_invar)
bool_label, _ = bool_labsem
expr = FixExpr(bool_expr, loop_state, self.x, init_state)
bound = IntegerInterval(5)
label = self.context.Label(expr, bound, invar)
label_expr = FixExpr(bool_labsem, loop_env, self.x, init_env)
env = {label: label_expr}
test_value = LabelSemantics(label, env)
self.compare(expr, test_value)
def test_MetaState(self):
meta_state = MetaState({self.x: self.x, self.y: self.y})
env = {
self.x: self.x_label,
self.y: self.y_label,
self.x_label: self.x,
self.y_label: self.y,
}
bound = BoxState(x=self.state[self.x], y=self.state[self.y])
label = self.context.Label(meta_state, bound, None)
test_value = LabelSemantics(label, env)
self.compare(meta_state, test_value)
|
11555381
|
from django import template
from bs4 import BeautifulSoup
register = template.Library()
@register.tag(name='activehref')
def do_active_href(parser, token):
nodelist = parser.parse(('endactivehref',))
parser.delete_first_token()
return ActiveHref(nodelist)
class ActiveHref(template.Node):
"""
This template tag will set an 'active' class attribute
on any anchor with an href value that matches part of the
current url path.
Sample template usage:
{% activehref %}
<li><a href="{% url products %}">Products</a></li>
<li><a href="{% url stores %}">Stores</a></li>
<li><a href="{% url about %}">About</a></li>
{% endactivehref %}
"""
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
soup = BeautifulSoup(self.nodelist.render(context))
if context.has_key('request'):
path = context.get('request').path
for a in soup.find_all('a'):
href = a['href']
if href == '/':
if path == href:
a['class'] = 'active'
break
else:
if href in path:
a['class'] = 'active'
break
return soup
|
11555385
|
import socket
import os
from stanfordnlp.server import CoreNLPClient
def is_port_occupied(ip='127.0.0.1', port=80):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def get_corenlp_client(corenlp_path, corenlp_port):
os.environ["CORENLP_HOME"] = corenlp_path
assert not is_port_occupied(corenlp_port), "Port {} is occupied by other process".format(corenlp_port)
corenlp_client = CoreNLPClient(
annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'depparse'], timeout=60000,
memory='5G', endpoint="http://localhost:%d" % corenlp_port,
start_server=True, be_quiet=False)
corenlp_client.annotate("hello world",
annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'depparse'],
output_format="json")
return corenlp_client
if __name__ == "__main__":
client = get_corenlp_client(
corenlp_path="/home/software/stanford-corenlp/stanford-corenlp-full-2018-02-27/",
corenlp_port=11001)
client.annotate("hello world")
client.stop()
|
11555437
|
import numpy as np
import pandas as pd
import pytest
from xgboost import XGBClassifier
from trelawney.lime_explainer import LimeExplainer
def _do_explainer_test(explainer, data_to_test=None, col_real='real', col_fake='fake'):
data_to_test = data_to_test if data_to_test is not None else pd.DataFrame([[5, 0.1], [95, -0.5]], columns=[col_real, col_fake])
explanation = explainer.explain_local(data_to_test)
assert len(explanation) == data_to_test.shape[0] if len(data_to_test.shape) == 2 else 1
for single_explanation in explanation:
assert abs(single_explanation[col_real]) > abs(single_explanation[col_fake])
def test_lime_explainer_single(fake_dataset, fitted_logistic_regression):
explainer = LimeExplainer(class_names=['false', 'true'])
explainer.fit(fitted_logistic_regression, *fake_dataset)
explanation = explainer.explain_local(pd.DataFrame([[5, 0.1]]))
assert len(explanation) == 1
single_explanation = explanation[0]
assert abs(single_explanation['real']) > abs(single_explanation['fake'])
def test_lime_explainer_multiple(fake_dataset, fitted_logistic_regression):
explainer = LimeExplainer(class_names=['false', 'true'])
explainer.fit(fitted_logistic_regression, *fake_dataset)
_do_explainer_test(explainer)
def test_lime_explainer_array(fake_dataset, fitted_logistic_regression):
explainer = LimeExplainer(class_names=['false', 'true'])
explainer.fit(fitted_logistic_regression, fake_dataset[0].values, fake_dataset[1])
_do_explainer_test(explainer, np.array([[5, 0.1], [95, -0.5]]), col_real='feature_0', col_fake='feature_1')
def test_lime_explainer_series(fake_dataset, fitted_logistic_regression):
explainer = LimeExplainer(class_names=['false', 'true'])
explainer.fit(fitted_logistic_regression, *fake_dataset)
_do_explainer_test(explainer, pd.Series([5, 0.1], index=['real', 'fake']))
def test_lime_xgb(fake_dataset):
model = XGBClassifier()
x, y = fake_dataset
model.fit(x.values, y)
explainer = LimeExplainer()
explainer.fit(model, *fake_dataset)
_do_explainer_test(explainer)
def test_lime_nn(fake_dataset, fitted_neural_network):
explainer = LimeExplainer(class_names=['false', 'true'])
explainer.fit(fitted_neural_network, *fake_dataset)
explanation = explainer.explain_local(pd.DataFrame([[5, 0.1], [95, -0.5]]))
assert len(explanation) == 2
|
11555461
|
import re
import operator
import string
import numpy as np
import spacy
from nltk.tokenize import word_tokenize as nltk_word_tokenize
from nltk.tokenize import sent_tokenize as nltk_sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.stem import WordNetLemmatizer
stop_words = set(stopwords.words('english'))
SENTENCE_SPLIT_REGEX = re.compile(r'\s+')
DOC_SPLIT_REGEX = re.compile(r'[.!?]')
PAD_TOKEN = "<pad>"
UNK_TOKEN = "<unk>"
EOS_TOKEN = "<eos>"
def clean_text(sentence, lower=True, removeSpaces=True, removePunct=False, lowerWithTitles=False):
"""
Cleans up the input text.
Args:
sentence (string): The input string to clean up
removePunct (bool): Whether to remove punctuation
Returns:
string: The cleaned string
"""
if isinstance(sentence, bytes):
sentence.decode()
# Remove punctuation
if removePunct:
# translator = str.maketrans('', '', string.punctuation)
translator = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
sentence = sentence.translate(translator)
# Remove multiple spaces
if removeSpaces:
sentence = re.sub("\s+", " ", sentence).strip()
if lower:
sentence = sentence.lower()
elif lowerWithTitles and len(sentence) > 1:
sentence = " ".join([s[0] + s[1:].lower() for s in re.split(SENTENCE_SPLIT_REGEX, sentence)])
# Strip and return
return sentence.strip()
def stem_and_lemmatize(tokens, stem=True, lemmatize=True):
"""
Perform stemming and lemmatization
Args:
tokens (list[string]): List of tokens to stem and lemmatize
stem (bool): If true, perform stemming
lemmatize (bool): If True, lemmatize
Returns:
The list of tokens stemmed and lemmatized
"""
if stem:
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens]
if lemmatize:
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(t) for t in tokens]
return tokens
def word_tokenize(sentence, tokenizer="nltk"):
"""
Tokenize the input string.
Args:
sentence (string): The input string
tokenizer (string): The tokenizer to use. Default is nltk word tokenizer
Returns:
List[string]: The tokens from the input string
"""
if tokenizer in "nltk":
return nltk_word_tokenize(sentence)
else:
return re.split(SENTENCE_SPLIT_REGEX, sentence)
def sent_tokenize(document, tokenizer='nltk'):
"""
Tokenize the document into a list of sentences.
Args:
document (string): The input string
tokenizer (string): The tokenizer to use. Default is nltk sentence tokenizer
Returns:
List[string]: The sentences from the input string
"""
if tokenizer in "nltk":
return nltk_sent_tokenize(document)
else:
return re.split(document, DOC_SPLIT_REGEX)
def conceptTokenize(sentence, tokenizer='nltk'):
nostops = " ".join([t for t in word_tokenize(sentence) if t not in stop_words])
if tokenizer in 'nltk':
return word_tokenize(clean_text(nostops, removePunct=True))
else:
return word_tokenize(clean_text(nostops, removePunct=False), tokenizer='spaces')
def getFeatureMap(toks, syns):
"""
Gets text features for a given piece of text and a list of synonyms
Args:
text (List[string]): A list of tokens
syns (List[string]): A list of synonyms
Returns:
List[int]: The features present between the given text and synonyms
"""
word_overlap = 0.0
acronym = 0.0
stem_overlap = 0.0
stemmer = PorterStemmer()
#tokenize the text
#nostops = " ".join([t for t in word_tokenize(text) if t not in stop_words])
#toks = word_tokenize(clean_text(nostops, removePunct=True))
for syn_toks in syns:
acro = ''.join([t[0] for t in syn_toks])
for t in toks:
if acro == t:
acronym = 1.0
break
for t in toks:
for s in syn_toks:
if s == t:
word_overlap = 1.0
elif stemmer.stem(s) == stemmer.stem(t):
stem_overlap = 1.0
return [float(word_overlap), float(stem_overlap), float(acronym)]
def extend_overlapping_spans(spans):
"""
Method to take a list of spans and extend overlapping spans to the longest span
Args:
spans (List[tuple]): A list of span tuples (start, end)
Returns:
List[tuple]: A list of span tuples (start, end) with overlapping spans extended to the longest span
"""
spans = sorted(spans, key=operator.itemgetter(1))
i = len(spans) - 1
while i >= 0:
start, end = spans[i]
delete = False
for j in range(i + 1, len(spans)):
rstart, rend = spans[j]
if start < rstart < end:
spans[j][0] = start
del spans[i]
delete = True
break
elif start >= rstart:
del spans[i]
delete = True
break
if ~delete:
i -= 1
return spans
def label_tokens_in_spans(text, spans, begin_label="B", in_label='I', out_label="O", label_scheme="BI"):
"""
Method to label tokens within a span
Args:
text (str): The text to label
spans (List[tuple]): A list of spans within which to label tokens
begin_label (str): The label to use for the first token of a span
in_label (str): The label for all tokens inside a span (Ignored if label_scheme="Basic")
out_label (str): The label to use for tokens outside of the spans
label_scheme (str): The labeling scheme to use options are:
- Basic: just use the given label
- BI
Returns:
List[tuple]: Labeled tokens
"""
nlp = spacy.blank('en')
doc = nlp(text)
labeled = []
textptr = 0
for span in spans:
if span[0] - textptr > 0:
endI = span[0]
if text[span[0] - 1] in ' ':
endI = span[0] - 1
I = doc.char_span(textptr, endI)
labeled.extend([(tok.text, out_label) for tok in I])
# Make sure aligns with word boundaries
startB = span[0]
while text[startB] in ' ':
startB += 1
# Tokens inside the span
B = doc.char_span(startB, span[1])
if label_scheme == 'Basic':
labeled.extend([(tok.text, begin_label) for tok in B])
else:
labeled.append((B[0].text, begin_label))
labeled.extend([(tok.text, in_label) for tok in B[1:]])
textptr = span[1]
while text[textptr] in ' ':
textptr += 1
# labeled.extend([(tok, out_label) for tok in word_tokenize(text[textptr:])])
labeled.extend([(tok.text, out_label) for tok in doc.char_span(textptr, len(text))])
return labeled
def collect_abstracts(filename):
with open(filename) as f:
abstracts = [" ".join(l.split('\t')[1:]).strip() for l in f]
return abstracts
def load_dict_from_vocab_file(filename):
"""
Creates a dictionary from a vocabulary file
Args:
filename (string): File path to the vocabulary file. Each word should occupy a single line.
Returns:
dict{string: int}: The dictionary keyed off of the word with the value being the index
"""
with open(filename, encoding="utf-8") as f:
words = [w.strip() for w in f.readlines()]
return {words[n]: n for n in range(len(words))}
# Code to turn sentences into indices with normalized length
def preprocess_sentence(sentence, vocab, length=10):
"""
Cleans, tokenizes, and transforms string into an array of indices with normalized length
Args:
sentence (string): The input sentence
vocab (dict): The target vocabulary (string, int)
length (int): The normalized sentence length or None to maintain original lengths
Returns:
List[int]: List of word indices
"""
sentence = clean_text(sentence)
tokens = word_tokenize(sentence)
ids = tokens_to_ids(tokens, vocab)
if length is not None:
ids = normalize_sentence_length(ids, vocab, length)
return np.asarray(ids)
def normalize_sentence_length(ids, vocab, front_padding=True, length=10, padId=None):
"""
Normalize sentences to a given length, filling with a special PAD token
Args:
ids (List[int]): List of token indices
vocab (dict): THe target vocabulary (string, int)
front_padding (bool): Flag indicating if padding should be applied
to the front or back of the string
length (int): The target length
padId (int): ID to use for padding or None to use vocabulary's pad token
Returns:
List[int]: List of word indices with normalized length
"""
if padId is None:
padId = vocab[PAD_TOKEN]
if len(ids) > length:
ids = ids[:length]
if len(ids) < length:
if front_padding:
ids = [padId] * (length - len(ids)) + ids
else:
ids = ids + [padId] * (length - len(ids))
return ids
def tokens_to_ids(tokens, vocab):
"""
Transform a list of tokens into a list of indices. Out-of-vocab words are mapped
to a special UNK token
Args:
tokens (List[string]): The list of tokens
vocab (dict): The target vocabulary (string, int)
Returns:
List[int]: List of word indices in the vocab
"""
ids = [vocab[t] if t in vocab else vocab[UNK_TOKEN] for t in tokens]
return ids
|
11555504
|
from tests.base_unittest import BaseUnitTest
from pypokerengine.engine.player import Player
from pypokerengine.engine.seats import Seats
class SeatsTest(BaseUnitTest):
def setUp(self):
self.seats = Seats()
self.p1 = Player("uuid1", 100)
self.p2 = Player("uuid2", 100)
self.p3 = Player("uuid3", 100)
def test_sitdown(self):
self.seats.sitdown(self.p1)
self.true(self.p1 in self.seats.players)
def test_size(self):
self.__sitdown_players()
self.eq(3, len(self.seats.players))
def test_count_active_players(self):
self.__setup_pay_status()
self.__sitdown_players()
self.eq(2, self.seats.count_active_players())
def test_acount_ask_wait_players(self):
self.__setup_pay_status()
self.__sitdown_players()
self.eq(1, self.seats.count_ask_wait_players())
def test_serialization(self):
self.__sitdown_players()
serial = self.seats.serialize()
restored = Seats.deserialize(serial)
for i in range(len(self.seats.players)):
self.eq(Player.serialize(self.seats.players[i]), Player.serialize(restored.players[i]))
def __setup_pay_status(self):
self.p1.pay_info.update_by_pay(10)
self.p2.pay_info.update_to_fold()
self.p3.pay_info.update_to_allin()
def __sitdown_players(self):
for player in [self.p1, self.p2, self.p3]:
self.seats.sitdown(player)
|
11555510
|
import argparse
parser = argparse.ArgumentParser(description='Disfluency Detection')
parser.add_argument('-file', type=str, default='swbdIO/test.txt')
parser.add_argument('-pred_file', type=str, default='swbdIO/test.txt')
parser.add_argument('-out_file', type=str, default='swbdIO/test.txt')
args = parser.parse_args()
def read_anno(anno_path):
js_list=[]
with open(anno_path, "r", encoding='utf-8') as reader:
i = -1
for line in reader:
i += 1
if i % 4 == 0:
js_list.append({'sent':[token for token in line.strip().split()]})
elif i % 4 == 1:
continue
if i % 4 == 2:
l = line.strip().split()
assert (len(l) == len(js_list[-1]['sent']))
assert (len(l)>0)
js_list[-1]['sent_tag']=l
else:
continue
return js_list
def writeSig(js_list,path,out_path):
tags=[]
with open(path, "r", encoding='utf-8') as reader:
for line in reader:
tags.append(line.strip().split())
assert(len(tags)==len(js_list))
with open(out_path, "w", encoding='utf-8') as writer:
for tag,js in zip(tags,js_list):
assert len(tag)==len(js['sent_tag'])
pc1, pc2, rc1, rc2 = 0,0,0,0
for p,g in zip(tag,js['sent_tag']):
if p=='I' and g=='I':
pc1+=1
pc2+=1
rc1+=1
rc2+=1
else:
if p=='I':
pc2+=1
if g=='I':
rc2+=1
writer.write('{} {} {} {}'.format(pc1, pc2, rc1, rc2)+'\n')
js_list=read_anno(args.file)
writeSig(js_list,args.pred_file,args.out_file)
|
11555523
|
import tensorflow as tf
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from tefla.core import metrics
@pytest.fixture(autouse=True)
def _reset_graph():
tf.reset_default_graph()
def test_kappav2_op():
kappav2 = metrics.KappaV2()
labels = tf.placeholder(shape=(32, ), name='labels', dtype=tf.int32)
predictions = tf.placeholder(shape=(32, ), name='predictions', dtype=tf.int32)
kappa_metric = kappav2.metric(predictions, labels, num_classes=10, batch_size=32)
label_v = np.random.randint(low=0, high=9, size=(32, ))
pred_v = np.random.randint(low=0, high=9, size=(32, ))
kappa = metrics.Kappa()
kappa_metric_ = kappa.metric(pred_v, label_v, 10)
with tf.Session() as sess:
_kappa_metric = sess.run(kappa_metric, feed_dict={labels: label_v, predictions: pred_v})
assert_array_almost_equal(_kappa_metric, kappa_metric_)
if __name__ == '__main__':
pytest.main([__file__])
|
11555538
|
import math
import glob
import os
import uuid
import itertools
import pandas as pd
import numpy as np
import datetime as dt
class GSTools(object):
@staticmethod
def load_csv_files(dir_str):
'''
This function reads all csv from the given directory, stores them in a dictionary and returns it.
- dir_str should be of the form "../ib-data/nyse-daily-tech/"
- expected format: the csv files should have a 'date' column
'''
# read all paths
csv_paths = sorted(glob.glob(dir_str + "*.csv"))
# create python dictionary
data = {}
for path in csv_paths:
# get the file names
filename = os.path.basename(path)
filename_without_ext = os.path.splitext(filename)[0]
# read the csv file as dataframe
df = pd.read_csv(path)
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S')
data[filename_without_ext] = df
return data
@staticmethod
def get_trading_dates(data):
'''
This function returns all trading days available in the dataset.
Return type is pandas.Index
'''
dates = pd.Index([])
for key in data.keys():
# reset index
data[key] = data[key].reset_index(drop=True)
dates = dates.union(pd.Index(data[key]['date']))
return dates
@staticmethod
def sync_start_end(data):
'''
This function synchronizes the start and end date of all dataframes in a given dictionary.
Rows with dates not between MAX_START_DATE and MIN_END_DATE will be dropped.
'''
# get max starting date
MAX_START_DATE = pd.Timestamp.min
MIN_END_DATE = pd.Timestamp.max
for key in data.keys():
# reset index
data[key] = data[key].reset_index(drop=True)
# max
MAX_START_DATE = max(MAX_START_DATE, data[key]['date'].iloc[0])
MIN_END_DATE = min(MIN_END_DATE, data[key]['date'].iloc[-1])
# take subset of all dataframes
for key in data.keys():
mask = (data[key]['date'] >= MAX_START_DATE) & (data[key]['date'] <= MIN_END_DATE)
data[key] = data[key].loc[mask]
# reset index
data[key] = data[key].reset_index(drop=True)
return (data, MAX_START_DATE, MIN_END_DATE)
@staticmethod
def cut_datafeeds(data, size):
'''
This function cuts all dataframes to the intended size,
drops all dataframes whose length is < size from the dictionary
'''
del_ls = []
for key in data.keys():
# reset index, just in case
data[key] = data[key].reset_index(drop=True)
N = len(data[key])
if N < size:
del_ls.append(key)
else:
data[key] = data[key][N - size:]
# reset index again
data[key] = data[key].reset_index(drop=True)
for key in del_ls:
data.pop(key, None)
return data
@staticmethod
def get_aggregated(data, col='close'):
'''
Returns a dataframe with all close prices aggregated together.
'''
agg_df = pd.DataFrame()
for key in data.keys():
agg_df[key] = data[key][col]
return agg_df
@staticmethod
def get_aggregated_with_dates(data, col='close'):
'''
Returns a dataframe with all close prices aggregated together.
'''
agg_df = pd.DataFrame()
for key in data.keys():
agg_df[key] = data[key][col]
agg_df["date"] = data[key]["date"]
return agg_df
|
11555545
|
import skimage.io as sio
import numpy as np
import torch
from torchvision import transforms as trn
from torch.autograd import Variable
from skimage.transform import resize
import json
import os
import os.path as osp
import pickle as pkl
import pandas as pd
def set_gpu_devices(gpu_id):
gpu = ''
if gpu_id != -1:
gpu = str(gpu_id)
os.environ['CUDA_VOSIBLE_DEVICES'] = gpu
preprocess = trn.Compose([
#trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def load_file(filename):
"""
load obj from filename
:param filename:
:return:
"""
cont = None
if not osp.exists(filename):
print('{} not exist'.format(filename))
return cont
if osp.splitext(filename)[-1] == '.csv':
# return pd.read_csv(filename, delimiter= '\t', index_col=0)
return pd.read_csv(filename, delimiter=',')
with open(filename, 'r') as fp:
if osp.splitext(filename)[1] == '.txt':
cont = fp.readlines()
cont = [c.rstrip('\n') for c in cont]
elif osp.splitext(filename)[1] == '.json':
cont = json.load(fp)
return cont
def save_file(obj, filename):
"""
save obj to filename
:param obj:
:param filename:
:return:
"""
filepath = osp.dirname(filename)
if filepath != '' and not osp.exists(filepath):
os.makedirs(filepath)
else:
with open(filename, 'w') as fp:
json.dump(obj, fp, indent=4)
def pkload(file):
data = None
if osp.exists(file) and osp.getsize(file) > 0:
with open(file, 'rb') as fp:
data = pkl.load(fp)
# print('{} does not exist'.format(file))
return data
def pkdump(data, file):
dirname = osp.dirname(file)
if not osp.exists(dirname):
os.makedirs(dirname)
with open(file, 'wb') as fp:
pkl.dump(data, fp)
def to_device(videos, device):
if isinstance(videos, list):
return [v.to(device) for v in videos]
else:
return videos.to(device)
|
11555575
|
import re
from rx import Observable
from rx.subjects import Subject
class TransactionValidator:
def __init__(self):
self._error_stream = Subject()
self._price_stream = Subject()
self._symbol_stream = Subject()
self.latest_valid_order = None
def next_error(self, field, error_text):
self._error_stream.on_next([field, error_text])
# We're going to check if the price text has changed to a valid
# value. If it is an empty string or not a number that's greater
# than 0.0 we're going to emit an error in the error stream. If
# it's valid, we will emit the value to the price stream.
def next_price(self, value):
if len(value) == 0:
self.next_error('price', 'cannot be blank')
else:
try:
price = float(value)
if price < 0.01:
self.next_error('price', 'must be greater than 0.00')
else:
self._price_stream.on_next(price)
except ValueError:
if len(re.sub('[a-zA-Z ]', '', value)) == 0:
self.next_error('price', 'must be a number')
# We're going to check if the symbol text has changed. It's a
# simple check, we want to make sure it isn't an empty string. If
# it's valid, we will emit the value on the symbol stream. If it's
# invalid, we'll emit the error on the error stream.
def next_symbol(self, value):
if len(value) == 0 or len(re.sub('[0-9 ]', '', value)) == 0:
self.next_error('symbol', 'cannot be blank')
else:
self._symbol_stream.on_next(value)
def on_error(self, func):
self._error_stream.subscribe(func)
def on_valid_order(self, func):
# Combine latest will emit items when any of the observables
# have an item to emit. We want the latest valid values that
# the user has entered in the symbol and price input
# boxes. When we have that, we can enable the "submit order"
# button. The form inputs and buttons are manipulated through
# the subscription function that was passed in.
def store_order_and_send_to_subscriber(order):
self.latest_valid_order = order
func(order)
Observable.combine_latest(
self._symbol_stream,
self._price_stream,
lambda symbol, price: { 'symbol': symbol, 'price': price }
).subscribe(store_order_and_send_to_subscriber)
|
11555577
|
import os
import time
import copy
import torch
import numpy as np
from common import flatten_lists
from common.utils import dotdict
from common.sc2_utils import _update_window, warfare, save_results
from agents.grprop import GRProp
class Runner:
def __init__(self, envs, agent, ilp, sc2_filewriter, config, args, device, dirname=None):
self.args = args
self.envs, self.agent, self.ilp = envs, agent, ilp
self.gamma, self.n_steps, self.device = args.discount, args.steps, device
self.infer, self.train = args.infer, args.train
self.is_warfare = args.warfare
self.prep_time = 2 * 60 * args.prep_time # 2 env steps == 1 game sec
self.save_ilp = args.save_ilp
# file writer
self.sc2_filewriter = sc2_filewriter
self.dirname = dirname
# restore
self.init_update = 0
if args.restore != -1:
filepath = 'weights/%s/hrl-%s'%(config.full_id(), args.restore)
print("Loading the network @ ", filepath)
ckpt = torch.load(filepath, map_location=device)
self.agent.actor_critic.load_state_dict(ckpt['state_dict'])
self.agent.optimizer.load_state_dict(ckpt['optimizer'])
self.init_update = ckpt['epoch']
# init
self.num_envs = self.envs.num_envs
self.state = self.ep_rews = None
self.logs = {'updates': self.init_update}
self.step, self.ep_len, self.score_record, self.return_record = np.zeros((4,self.envs.num_envs))
self.scores, self.ep_returns, self.init_values = np.zeros((3, self.envs.num_envs))
def meta_eval_load(self, dirname, init_ep=0, eval_eps=20, test_eps=4): # HRL & MSGI
'''Load ILP models saved for each episode of meta evaluation
(i.e. ILP_ep-*.pt) and calcuate their winning rate.
'''
for ep in range(init_ep, eval_eps):
# load the saved ILP model
filename = os.path.join(dirname, 'ILP_ep-%d.pt'%ep)
self.ilp.load(filename)
# precision & recall
graphs, prec, rec = self.ilp.infer_graph(ep=ep, PR=True, eval_mode=True)
print('prec, rec=', prec.mean(), rec.mean())
# temperature annealing
params = dotdict()
params.temp = 2 + 38.0 * ( ep ) / ( eval_eps )
test_agent = GRProp(graphs, self.args, self.device, params)
self.rollout_trial(nb_epi=test_eps, eval_flag=True, agent=test_agent)
# store the eval results
mean_frame = self.cum_frames / self.nb_eval_epi
mean_score = self.test_score / self.nb_eval_epi
print('[ Summary: Ep= {} | Mean score= {} ]'.format(ep, mean_score))
self.sc2_filewriter.store(ep=ep, mean=mean_score, data=self.ep_scores, ep_len=mean_frame)
self.sc2_filewriter.save()
def meta_eval_save(self, num_iter=10, tr_epi=20, test_epi=4):
'''
- msgi-meta: train - x, infer - o
- hrl-baseline: train - o, infer - x
'''
logs = dotdict()
logs.str, logs.stst = np.zeros((2, tr_epi))
for i in range(num_iter):
# reset
self.envs.reset_task()
if self.infer:
self.ilp.reset(self.envs)
graphs, prec, rec = self.ilp.infer_graph(ep=None, PR=False)
self.agent = GRProp(graphs, self.args, self.device)
for epi_ind in range(tr_epi):
self.cur_ep = epi_ind
# rollout training trial (MSGI & HRL)
self.rollout_trial(nb_epi=1)
logs.str[epi_ind] += self.score_record
# MSGI - infer graph
if self.infer:
if self.save_ilp:
filename = self.dirname + '/ILP_ep-{}.pt'.format(epi_ind)
self.ilp.save(filename)
graphs, prec, rec = self.ilp.infer_graph(ep=tr_epi, PR=False)
self.agent = GRProp(graphs, self.args, self.device)
test_agent = self.agent
else:
test_agent = None
# save episode results
save_results(ep=epi_ind, dirname=self.dirname, score=self.score_record,
total_counts=self.total_counts)
# eval HRL
if self.args.meta == 'hrl': # HRL
self.rollout_trial(nb_epi=4, eval_flag=True, agent = test_agent)
# 4. record
mean_frame = self.cum_frames / self.nb_eval_epi
mean_score = self.test_score / self.nb_eval_epi
logs.stst[epi_ind] += mean_score
print('[ Summary: Ep= {} | Mean score= {} ]'.format(epi_ind, mean_score))
self.sc2_filewriter.store(ep=epi_ind, mean=mean_score, data=self.ep_scores, ep_len=mean_frame)
self.sc2_filewriter.save()
def rollout_trial(self, nb_epi, eval_flag=False, agent=None):
'''
1. HRL - train trial
- N-step AC. However, stop after 'nb_epi' episodes.
- result: trained policy network
2. MSGI - train trial
- Just run 'nb_epi' episodes, and collect data into ilp.
- result: ilp
3. HRL & MSGI - eval trial
- Just run 'nb_epi' episodes, measure performance.
'''
assert(not self.infer or not self.ilp is None)
if agent is None:
agent = self.agent
self.eval_flag = eval_flag
if eval_flag:
self.test_score, self.cum_frames, self.nb_eval_epi = 0., 0, 0
self.ep_scores = np.zeros(nb_epi)
# reset
self.reset_trial()
while True:
rollout = self.collect_rollout(nb_epi, eval_flag, agent)
if (self.active == False).all():
break
if not eval_flag:
if self.train: # HRL
ploss, vloss, eloss = agent.train(self.logs['updates'], *rollout)
if self.cur_ep == 19 and self.frames > 2000:
self.agent.save(step=self.frames)
self.logs['updates'] += 1
if eval_flag:
assert(self.nb_eval_epi == nb_epi)
def collect_rollout(self, nb_epi, eval_flag, agent):
states, options, option_masks = [None]*self.n_steps, [None]*self.n_steps, [None]*self.n_steps
rewards, values = torch.zeros((2, self.n_steps, self.envs.num_envs)).to(self.device)
dones, prev_dones = np.zeros((2, self.n_steps, self.envs.num_envs))
for step in range(self.n_steps):
with torch.no_grad():
if eval_flag:
if isinstance(agent, GRProp):
option, value, option_mask = agent.get_option(self.obs, self.last_dones, eval_flag)
else:
option, value, option_mask = agent.get_option(self.obs, self.last_dones)
else:
option, value, option_mask = agent.get_option(self.obs, self.last_dones)
options[step] = copy.deepcopy(option)
option_masks[step] = copy.deepcopy(option_mask)
if self.is_warfare and self.frames >= self.prep_time:
# time is up and prepare for the battle
self.obs, reward, done, frames, self.total_counts = warfare(self.envs, self.obs)
else:
self.obs, reward, done, frames = agent.execute(self.obs, option, self.envs)
if not done: # for non-warfare maps
self.total_counts = copy.deepcopy(self.envs.total_counts[0])
# MSGI
if self.infer and not eval_flag:
self.ilp.insert(self.obs, option, reward, done)
# HRL
if self.train and not eval_flag:
spatials = self.obs['spatials']
comps, eligs, masks = self.obs['meta_states']
steps = self.obs['steps']
states[step] = [spatials, comps, eligs, masks, steps]
rewards[step], dones[step], values[step] = reward, done, value
# compute records
self._compute_records(reward, done, value, frames)
# updates & log
self.epi_count += done
self.active = (self.epi_count < nb_epi)
self.last_dones = done
self.frames += frames
if self.active.sum() == 0:
break
# terminate when all episodes are finished
if self.active.sum() == 0: # ignore current samples
return None
if self.train and not eval_flag:
with torch.no_grad():
last_value = agent.get_value(self.obs).detach()
# convert to torch tensor
prev_dones = torch.from_numpy(prev_dones).float().to(self.device)
dones = torch.from_numpy(dones).float().to(self.device)
return (flatten_lists(states), torch.cat(options), rewards, dones, self.init_values, last_value)
else:
return None
def eval(self, num_iter, step=None):
ep_len_sum, score_sum, return_sum, epi_count, cur_score, cur_return = np.zeros((6, self.envs.num_envs))
gammas = np.ones((self.envs.num_envs))
steps = np.zeros((self.envs.num_envs))
meta_steps = np.zeros((self.envs.num_envs))
print('Eval for {} iterations!'.format(num_iter))
ep_lens = []
score_buffer = []
observations, _, _ = self.envs.reset()
dones = np.zeros((self.envs.num_envs, 1))
while True:
with torch.no_grad():
options, *_ = self.agent.get_option(observations, dones)
if self.is_warfare and steps >= self.prep_time:
# time is up and prepare for the battle
observations, rewards, dones, frames, _ = warfare(self.envs, observations)
else:
observations, rewards, dones, frames = self.agent.execute(observations, options, self.envs)
dones = np.asarray(dones, dtype=np.float32)
rewards = np.asarray(rewards, dtype=np.float32)
# computes cur values
steps += frames
cur_score += rewards
cur_return += rewards if self.is_warfare else gammas * rewards
gammas = (1 - dones) * np.power(self.gamma, frames) * gammas + dones
# break if episode is done
mask = (epi_count < num_iter)
if mask.sum() == 0:
break
if (dones * mask).sum() > 0:
# save the episode score (episode / win_or_lose / 0 / 0)
ep = copy.deepcopy(epi_count[0])
score = float(copy.deepcopy(cur_score) > 0)
score_buffer.append([ep, score, 0., 0.])
# store episode length
ep_lens.append([ep, mask*dones*steps, 0., 0.])
# save the stats for the episode
if not self.args.eval:
save_results(ep=int(epi_count[0]), dirname=self.dirname, score=cur_score,
total_counts=self.total_counts)
print('[ Ep: {} | Score: {} ]'.format(ep, score))
else:
self.total_counts = copy.deepcopy(self.envs.total_counts[0])
ep_len_sum += mask*dones*steps
score_sum += mask*dones*cur_score
return_sum += mask*dones*cur_return
# reset if episode is done
epi_count += dones*mask
steps *= (1 - dones)
cur_score *= (1 - dones)
cur_return *= (1 - dones)
div = num_iter * self.envs.num_envs
print('\n========= Final Result =========')
print('Avg Length =', ep_len_sum.sum()/div)
print('Avg Score =', score_sum.sum()/div)
print('Avg Return =', return_sum.sum()/div)
print('================================')
# save the mean scores and episode lengths
ep_lens = np.asarray(ep_lens)
scores = np.asarray(score_buffer)
mean_ep_lens = ep_lens[:, 1].mean()
mean_score = scores[:, 1].mean()
ep_lens[:, 1].fill(mean_ep_lens)
scores[:, 1].fill(mean_score)
if not self.args.eval:
self.sc2_filewriter.save(mean=scores, ep_len=ep_lens)
def _compute_records(self, reward, done, value, frames):
if value is not None:
value = value.cpu().numpy()
reward = reward.cpu().numpy()
# episode length taken
self.step += 1
for i in range(done.shape[0]):
if done[i] > 0.5:
self.ep_len_window = _update_window(self.ep_len_window, self.step[i])
self.ep_len = (1 - done)*self.ep_len + done*self.step
self.step = 0*done + (1 - done)*self.step
# init value est
if value is not None:
self.init_values = (1 - self.last_dones)*self.init_values + self.last_dones*value
self.scores = (1 - self.last_dones)*(reward + self.scores) + self.last_dones*reward
self.ep_returns = (1 - self.last_dones)*(self.gammas*reward + self.ep_returns) + self.last_dones*self.gammas*reward
self.gammas = (1 - done)*self.gamma*self.gammas + done
self.score_record = done * self.scores + (1 - done) * self.score_record
self.return_record = done * self.ep_returns + (1 - done) * self.return_record
self.dones_window = _update_window(self.dones_window, self.last_dones )
self.frames = (1 - done)*self.frames
done = done > 0.5
num_dones = done.sum()
num_succ = (done * (reward > 0)).sum()
for i in range(num_dones):
if i < num_succ:
self.success_window = _update_window(self.success_window, 1) # success
else:
self.success_window = _update_window(self.success_window, 0) # fail
if self.eval_flag:
self.cum_frames += frames
if self.eval_flag and done:
self.ep_scores[self.nb_eval_epi] = float(self.scores > 0)
self.test_score += float(self.scores > 0)
self.nb_eval_epi += 1
def reset_trial(self):
self.obs, _, _ = self.envs.reset()
if self.infer:
self.ilp.insert( self.obs)
self.epi_count = np.zeros(self.envs.num_envs)
self.frames = np.zeros(self.envs.num_envs)
self.last_dones = np.ones(self.envs.num_envs)
self.scores.fill(0)
self.dones_window = np.zeros( (self.envs.num_envs, 100) )
self.success_window, self.ep_len_window = np.zeros( (2, self.envs.num_envs*10) )
self.gammas = self.gamma*np.ones(self.envs.num_envs)
self.logs.update({'eps': 0, 'rew_best': 0, 'start_time': time.time(),
'ep_rew': np.zeros(self.envs.num_envs),
'dones': np.zeros(self.envs.num_envs)})
|
11555610
|
import sys
from ansible.errors import AnsibleError
from ansible.parsing.vault import VaultLib
def load_vault_key(key_path):
key = None
with open(key_path, 'r') as fp:
key = fp.read().strip()
if key is None:
raise Exception('Could not load key or it had zero content')
return VaultLib(key)
def decrypt_file(file_path, vault):
plaintext = None
with open(file_path, 'r') as fp:
ciphertext = fp.read()
try:
plaintext = vault.decrypt(ciphertext)
except AnsibleError as e:
print('Error: {0}'.format(e))
print('Please verify that you are using the correct key.')
sys.exit(1)
return plaintext
def encrypt_file(plaintext, file_path, vault):
with open(file_path, 'w') as fp:
ciphertext = vault.encrypt(plaintext)
fp.write(ciphertext)
def rekey_vault_file(original_file, new_file, original_vault, new_vault):
plaintext = decrypt_file(original_file, original_vault)
encrypt_file(plaintext, new_file, new_vault)
print('Re-encrypted {}'.format(original_file))
def argument_handler(value, all_args):
original_vault = load_vault_key(all_args.orig_key)
new_vault = load_vault_key(all_args.new_key)
rekey_vault_file(
all_args.filename,
all_args.filename,
original_vault,
new_vault
)
|
11555613
|
import pytest
from unittest.mock import patch
from flask_mailman import EmailMessage
from resources.email import Email
@pytest.mark.usefixtures("empty_test_db")
class TesttEmail:
@patch.object(EmailMessage, "send")
def test_reset_password_msg(self, send_mail_msg, create_user):
with patch.object(EmailMessage, "__init__", return_value=None):
Email.send_reset_password_msg(create_user())
send_mail_msg.assert_called()
@patch.object(EmailMessage, "send")
def test_send_user_invite_msg(self, send_mail_msg, create_user):
with patch.object(EmailMessage, "__init__", return_value=None):
Email.send_user_invite_msg(create_user())
send_mail_msg.assert_called()
|
11555614
|
import pytest
from remote.configuration.classic import CONFIG_FILE_NAME, ClassicConfigurationMedium
from remote.configuration.discovery import get_configuration_medium, resolve_workspace_root
def test_resolve_workspace_root(tmp_path):
work_dir = tmp_path / "foo" / "bar"
work_dir.mkdir(parents=True)
(tmp_path / CONFIG_FILE_NAME).write_text("my-host:some/dir")
(medium, root) = resolve_workspace_root(work_dir)
assert root == tmp_path
assert isinstance(medium, ClassicConfigurationMedium)
@pytest.mark.parametrize("medium_class", [ClassicConfigurationMedium])
def test_get_configuration_medium(medium_class, workspace_config):
medium_class().save_config(workspace_config)
medium = get_configuration_medium(workspace_config)
assert isinstance(medium, medium_class)
|
11555650
|
import FWCore.ParameterSet.Config as cms
#
# produce stGenEvent with all necessary ingredients
#
from TopQuarkAnalysis.TopEventProducers.producers.TopInitSubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.TopDecaySubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.StGenEvtProducer_cfi import *
makeGenEvtTask = cms.Task(
initSubset,
decaySubset,
genEvtSingleTop
)
makeGenEvt = cms.Sequence(makeGenEvtTask)
|
11555651
|
from flask import render_template, request, flash
from nanobrok.models import DeviceInfo, User
import uuid, jwt
from dynaconf import settings
from nanobrok.ext.database import db
from flask_login import login_required
# This file is part of the Nanobrok Open Source Project.
# nanobrok is licensed under the Apache 2.0.
# Copyright 2021 p0cL4bs Team - <NAME> (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@login_required
def sync_index():
user_mobile = User.query.one_or_none()
# TODO: implement option in user model for control intial access
if request.method == "POST":
if user_mobile:
if user_mobile.is_connected:
user_mobile.is_authenticated = True
db.session.add(user_mobile)
db.session.commit()
flash(
f"The device {user_mobile.deviceInfo.manufacturer} { user_mobile.deviceInfo.model } has connected successfully."
)
if not user_mobile:
user_mobile = User(public_id=str(uuid.uuid4()))
user_mobile.deviceInfo = DeviceInfo()
db.session.add(user_mobile)
db.session.commit()
token_jwt = jwt.encode(
{"public_id": user_mobile.public_id},
settings.SECRET_KEY,
)
return render_template(
"pages/sync/index.html",
token_jwt=token_jwt.decode("UTF-8"),
user=user_mobile,
)
|
11555680
|
from __future__ import unicode_literals
import re
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import JSONField
from reviewboard.attachments.models import FileAttachmentHistory
from reviewboard.reviews.models.default_reviewer import DefaultReviewer
@python_2_unicode_compatible
class BaseReviewRequestDetails(models.Model):
"""Base information for a review request and draft.
ReviewRequest and ReviewRequestDraft share a lot of fields and
methods. This class provides those fields and methods for those
classes.
"""
MAX_SUMMARY_LENGTH = 300
description = models.TextField(_("description"), blank=True)
description_rich_text = models.BooleanField(
_('description in rich text'),
default=False)
testing_done = models.TextField(_("testing done"), blank=True)
testing_done_rich_text = models.BooleanField(
_('testing done in rich text'),
default=False)
bugs_closed = models.CharField(_("bugs"), max_length=300, blank=True)
branch = models.CharField(_("branch"), max_length=300, blank=True)
commit_id = models.CharField(_('commit ID'), max_length=64, blank=True,
null=True, db_index=True)
extra_data = JSONField(null=True)
# Deprecated and no longer used for new review requests as of 2.0.9.
rich_text = models.BooleanField(_("rich text"), default=False)
def get_review_request(self):
raise NotImplementedError
def get_bug_list(self):
"""Returns a list of bugs associated with this review request."""
if self.bugs_closed == "":
return []
bugs = list(set(re.split(r"[, ]+", self.bugs_closed)))
# First try a numeric sort, to show the best results for the majority
# case of bug trackers with numeric IDs. If that fails, sort
# alphabetically.
try:
bugs.sort(key=int)
except ValueError:
bugs.sort()
return bugs
def get_screenshots(self):
"""Return a generator for all active screenshots.
This includes all current screenshots, but not previous inactive ones.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
Yields:
reviewboard.reviews.models.screenshot.Screenshot:
A screenshot on the review request or draft.
"""
if self.screenshots_count > 0:
review_request = self.get_review_request()
for screenshot in self.screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_inactive_screenshots(self):
"""Return a generator for all inactive screenshots.
This only includes screenshots that were previously visible but
have since been removed.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
Yields:
reviewboard.reviews.models.screenshot.Screenshot:
An inactive screenshot on the review request or draft.
"""
if self.inactive_screenshots_count > 0:
review_request = self.get_review_request()
for screenshot in self.inactive_screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_file_attachments(self):
"""Return a list for all active file attachments.
This includes all current file attachments, but not previous inactive
ones.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
Returns:
list of reviewboard.attachments.models.FileAttachment:
The active file attachments on the review request or draft.
"""
def get_attachments(review_request):
for file_attachment in self.file_attachments.all():
file_attachment._review_request = review_request
# Handle legacy entries which don't have an associated
# FileAttachmentHistory entry.
if (not file_attachment.is_from_diff and
file_attachment.attachment_history is None):
history = FileAttachmentHistory.objects.create(
display_position=FileAttachmentHistory
.compute_next_display_position(
review_request))
review_request.file_attachment_histories.add(history)
file_attachment.attachment_history = history
file_attachment.save(update_fields=['attachment_history'])
yield file_attachment
def get_display_position(attachment):
if attachment.attachment_history_id is not None:
return attachment.attachment_history.display_position
else:
return 0
if self.file_attachments_count > 0:
review_request = self.get_review_request()
return sorted(get_attachments(review_request),
key=get_display_position)
else:
return []
def get_inactive_file_attachments(self):
"""Return a generator for all inactive file attachments.
This only includes file attachments that were previously visible
but have since been removed.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
Yields:
reviewboard.attachments.models.FileAttachment:
An inactive file attachment on the review request or draft.
"""
if self.inactive_file_attachments_count > 0:
review_request = self.get_review_request()
for file_attachment in self.inactive_file_attachments.all():
file_attachment._review_request = review_request
yield file_attachment
def add_default_reviewers(self):
"""Add default reviewers based on the diffset.
This method goes through the DefaultReviewer objects in the database
and adds any missing reviewers based on regular expression comparisons
with the set of files in the diff.
"""
if not self.repository:
return
diffset = self.get_latest_diffset()
if not diffset:
return
match_default_reviewer_ids = []
# This won't actually be queried until needed, since we're not
# evaluating the queryset at this stage. That means we save a lookup
# if the list of default reviewers is empty below.
files = diffset.files.values_list('source_file', 'dest_file')
default_reviewers = (
DefaultReviewer.objects.for_repository(self.repository,
self.local_site)
.only('pk', 'file_regex')
)
for default_reviewer in default_reviewers:
try:
regex = re.compile(default_reviewer.file_regex)
except:
continue
for source_file, dest_file in files:
if regex.match(source_file or dest_file):
match_default_reviewer_ids.append(default_reviewer.pk)
break
if not match_default_reviewer_ids:
return
# Get the list of users and groups across all matched default
# reviewers. We'll fetch them directly from the ManyToMany tables,
# to avoid extra queries. Django's m2m.add() methods will ensure no
# duplicates are added, and that insertions aren't performed if not
# needed.
self.target_people.add(*(
entry.user
for entry in (
DefaultReviewer.people.through.objects
.filter(defaultreviewer_id__in=match_default_reviewer_ids,
user__is_active=True)
.select_related('user')
)
))
self.target_groups.add(*(
entry.group
for entry in (
DefaultReviewer.groups.through.objects
.filter(defaultreviewer_id__in=match_default_reviewer_ids)
.select_related('group')
)
))
def save(self, **kwargs):
self.bugs_closed = self.bugs_closed.strip()
self.summary = self._truncate(self.summary, self.MAX_SUMMARY_LENGTH)
super(BaseReviewRequestDetails, self).save(**kwargs)
def _truncate(self, string, num):
if len(string) > num:
string = string[0:num]
i = string.rfind('.')
if i != -1:
string = string[0:i + 1]
return string
def __str__(self):
if self.summary:
return six.text_type(self.summary)
else:
return six.text_type(_('(no summary)'))
class Meta:
abstract = True
app_label = 'reviews'
|
11555694
|
from inspr import *
import sys
PING_INPUT_CHANNEL = "pinginput"
PING_OUTPUT_CHANNEL = "pingoutput"
def main():
client = Client()
msg = "Ping!"
@client.handle_channel(PING_INPUT_CHANNEL)
def read_pong_and_send_ping(data):
if data == 'Pong!':
print(data, file=sys.stderr)
else:
print('Not received Pong', file=sys.stderr)
try:
client.write_message(PING_OUTPUT_CHANNEL, msg)
return Response(status=200)
except:
raise Exception
try:
client.write_message(PING_OUTPUT_CHANNEL, msg)
except:
print("An error has occured", file=sys.stderr)
return
client.run()
if __name__ == "__main__":
main()
|
11555720
|
from skyfield.data import hipparcos
from skyfield.api import Star, load
import numpy as np
import erfa
HEADER = '''############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
#
# this file is auto generated for the purpose of getting data prepared
# to show the alignment stars in mountwizzard
#
# standard libraries
# external packages
# local import
def generateAlignStars():
"""
generateAlignStars is the function where the alignment stars which were
present in the mount computer from hipparcos catalogue are stored. for a
correct calculation we need beside the J2000 coordinated the proper motion in
ra and dec, the parallax and the radial velocity as the stars move over time.
the data is calculated from the hipparcos catalogue using skyfield library
the data is written in
[name, hip no, ra, dec, ra proper motion, dec proper motion, parallax,
radial velocity] based on J2000 epoch. the units are fitting erfa needs:
[str, int, radians, radians, radians / year, radians/year, arc sec, km /s]
"""
star = dict()
'''
FOOTER = '''
return star
'''
named_star_dict = {
'Achernar': 7588,
'Acrux': 60718,
'Adhara': 33579,
# 'Agena': 68702,
'Albireo': 95947,
'Alcor': 65477,
'Aldebaran': 21421,
'Alderamin': 105199,
'Algenib': 15863,
'Algieba': 50583,
'Algol': 14576,
'Alhena': 31681,
'Alioth': 62956,
'Alkaid': 67301,
'Almach': 9640,
'Alnair': 109268,
'Alnilam': 26311,
'Alnitak': 26727,
'Alphard': 46390,
# 'Alphecca': 76267,
'Alpheratz': 677,
'Altair': 97649,
'Aludra': 35904,
'Ankaa': 2081,
'Antares': 80763,
'Arcturus': 69673,
# 'Arided': 102098,
# 'Aridif': 102098,
# 'Aspidiske': 45556,
'Atria': 82273,
'Avior': 41037,
'Becrux': 62434,
'Bellatrix': 25336,
# 'Benetnash': 67301,
'Betelgeuse': 27989,
'Birdun': 66657,
'Canopus': 30438,
'Capella': 24608,
'Caph': 746,
'Castor': 36850,
'Deneb': 102098,
# '<NAME>': 3419,
'Denebola': 57632,
'Diphda': 3419,
'Dschubba': 78401,
'Dubhe': 54061,
'<NAME>': 8102,
'Elnath': 25428,
'Enif': 107315,
'Etamin': 87833,
'Fomalhaut': 113368,
'Foramen': 93308,
'Gacrux': 61084,
'Gemma': 76267,
'Gienah': 102488,
'Girtab': 86228,
'Gruid': 112122,
'Hadar': 68702,
'Hamal': 9884,
"Herschel's Garnet Star": 107259,
'Izar': 72105,
'<NAME>': 90185,
'Kochab': 72607,
'<NAME>': 42913,
'Marchab': 113963,
'Marfikent': 71352,
'Markab': 45941,
'Megrez': 59774,
'Men': 71860,
'Menkalinan': 28360,
'Menkent': 68933,
'Merak': 53910,
'Miaplacidus': 45238,
# 'Mimosa': 62434,
'Mintaka': 25930,
'Mira': 10826,
'Mirach': 5447,
# 'Mirfak': 15863,
'Mirzam': 30324,
'Mizar': 65378,
'Muhlifein': 61932,
# 'Murzim': 30324,
'Naos': 39429,
'Nunki': 92855,
'Peacock': 100751,
'Phad': 58001,
# 'Phecda': 58001,
'Polaris': 11767,
'Pollux': 37826,
'Procyon': 37279,
# '<NAME>': 86032,
'Rasalhague': 86032,
'Regor': 39953,
'Regulus': 49669,
'Rigel': 24436,
# '<NAME>': 71683,
# '<NAME>': 71683,
'Sabik': 84012,
'Sadira': 16537,
'Sadr': 100453,
'Saiph': 27366,
# 'Sargas': 86228,
'Scheat': 113881,
'Schedar': 3179,
# 'Scutulum': 45556,
'Shaula': 85927,
'Sirius': 32349,
# 'Sirrah': 677,
'South Star': 104382,
'Spica': 65474,
'Suhail': 44816,
'Thuban': 68756,
'Toliman': 71683,
# '<NAME>': 93308,
'Tsih': 4427,
'Turais': 45556,
'Vega': 91262,
'Wei': 82396,
'Wezen': 34444,
}
def make_file():
with load.open(hipparcos.URL) as f:
df = hipparcos.load_dataframe(f)
with open('alignstars.py', 'w') as f:
f.write(HEADER)
for name in named_star_dict:
starH = Star.from_dataframe(df.loc[named_star_dict[name]])
ra = starH.ra.radians
dec = starH.dec.radians
ra_mas_per_year = starH.ra_mas_per_year
dec_mas_per_year = starH.dec_mas_per_year
parallax_mas = starH.parallax_mas
radial_km_per_s = starH.radial_km_per_s
# convert it for erfa routine in astropy as skyfield calculation
# is not performant enough
# convert mas / year to radians / year
PR = ra_mas_per_year / 3600000 * 2 * np.pi / 360
PD = dec_mas_per_year / 3600000 * 2 * np.pi / 360
PX = parallax_mas / 1000
RV = radial_km_per_s
# and convert the epoch of hipparcos (J1991,25) to the epoch
# erfa needs (J2000)
# J2000 = 2451545
# HIP = J1991,25 = 2448347.5
ra2, dec2, pr2, pd2, px2, rv2 = erfa.pmsafe(ra,
dec,
PR,
PD,
PX,
RV,
2448347.5,
0.0,
2451545,
0.0,
)
if name.startswith('Hersch'):
name = 'Herschel Star'
lineA = f"star['{name}'] = [{ra2}, {dec2},\n"
lineB = f" {pr2}, {pd2},\n"
lineC = f" {px2}, {rv2}]\n"
print(name)
f.write(' ' + lineA)
spacer = ' ' * (len(name) - 3)
f.write(' ' + spacer + lineB)
f.write(' ' + spacer + lineC)
f.write(FOOTER)
if __name__ == '__main__':
make_file()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.