id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1693608
|
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("../audio/")
import hparams as hp
import audio_utils as audio
import librosa
import librosa.display
def plot(file, fname):
wav = librosa.load(file, sr=16000)[0]
stft = librosa.stft(y=wav, n_fft=hp.hparams.n_fft_den, hop_length=hp.hparams.hop_size_den, win_length=hp.hparams.win_size_den)
print("STFT: ", stft.shape)
# Display magnitude spectrogram
D = np.abs(stft)
librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),y_axis='log', x_axis='time')
plt.title('Power spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
plt.savefig(fname+".jpg")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--gt_file', type=str, required=True, help='GT wav file')
parser.add_argument('--noisy_file', type=str, required=True, help='Noisy wav file')
parser.add_argument('--pred_file', type=str, required=True, help='Predicted wav file')
args = parser.parse_args()
plot(args.gt_file, 'gt')
plot(args.noisy_file, 'noisy')
plot(args.pred_file, 'pred')
|
1693611
|
import re
class Templite(object):
delimiter = re.compile(r"\$\{(.*?)\}\$", re.DOTALL)
def __init__(self, template):
self.tokens = self.compile(template)
@classmethod
def from_file(cls, file):
"""
loads a template from a file. `file` can be either a string, specifying
a filename, or a file-like object, supporting read() directly
"""
if isinstance(file, basestring):
file = open(file)
return cls(file.read())
@classmethod
def compile(cls, template):
tokens = []
for i, part in enumerate(cls.delimiter.split(template)):
if i % 2 == 0:
if part:
tokens.append((False, part.replace("$\\{", "${")))
else:
if not part.strip():
continue
lines = part.replace("}\\$", "}$").splitlines()
margin = min(len(l) - len(l.lstrip()) for l in lines if l.strip())
realigned = "\n".join(l[margin:] for l in lines)
code = compile(realigned, "<templite %r>" % (realigned[:20],), "exec")
tokens.append((True, code))
return tokens
def render(__self, __namespace = None, **kw):
"""
renders the template according to the given namespace.
__namespace - a dictionary serving as a namespace for evaluation
**kw - keyword arguments which are added to the namespace
"""
namespace = {}
if __namespace: namespace.update(__namespace)
if kw: namespace.update(kw)
def emitter(*args):
for a in args: output.append(str(a))
def fmt_emitter(fmt, *args):
output.append(fmt % args)
namespace["emit"] = emitter
namespace["emitf"] = fmt_emitter
output = []
for is_code, value in __self.tokens:
if is_code:
eval(value, namespace)
else:
output.append(value)
return "".join(output)
# shorthand
__call__ = render
---------
example:
---------
>>> from templite import Templite
>>>
>>> demo = r"""
... <html>
... <body>
... ${
... def say_hello(arg):
... emit("hello ", arg, "<br>")
... }$
...
... <table>
... ${
... for i in range(10):
... emit("<tr><td> ")
... say_hello(i)
... emit(" </tr></td>\n")
... }$
... </table>
...
... ${emit("hi")}$
...
... tralala ${if x > 7:
... say_hello("big x")}$ lala
...
... $\{this is escaped starting delimiter
...
... ${emit("this }\$ is an escaped ending delimiter")}$
...
... ${# this is a python comment }$
...
... </body>
... </html>
... """
>>>
>>> t = Templite(demo)
>>> print t(x = 8)
<html>
<body>
<table>
<tr><td> hello 0<br> </tr></td>
<tr><td> hello 1<br> </tr></td>
<tr><td> hello 2<br> </tr></td>
<tr><td> hello 3<br> </tr></td>
<tr><td> hello 4<br> </tr></td>
<tr><td> hello 5<br> </tr></td>
<tr><td> hello 6<br> </tr></td>
<tr><td> hello 7<br> </tr></td>
<tr><td> hello 8<br> </tr></td>
<tr><td> hello 9<br> </tr></td>
</table>
hi
tralala hello big x<br> lala
${this is escaped starting delimiter
this }$ is an escaped ending delimiter
</body>
</html>
>>>
|
1693624
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from lib.normalize import Normalize
import torch
from models.cbam import CBAM
import torch.nn.functional as F
from lib.utils import showfeature, showimage
import numpy as np
import random
import torch.backends.cudnn as cudnn
import os
my_whole_seed = 111
random.seed(my_whole_seed)
np.random.seed(my_whole_seed)
torch.manual_seed(my_whole_seed)
torch.cuda.manual_seed_all(my_whole_seed)
torch.cuda.manual_seed(my_whole_seed)
np.random.seed(my_whole_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(my_whole_seed)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, low_dim=128, multitask=False, showfeature=False, finetune=False, domain=False,args=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
# bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1) # 7 if input is 224 !!!!!!!!!!
self.fc = nn.Linear(512 * block.expansion, low_dim)
self.l2norm = Normalize(2)
self.saveembed = args.saveembed
self.showfeature = showfeature
self.multitask = multitask
self.finetune = finetune
self.domain = domain
if self.finetune:
self.finetune_layer = nn.Sequential(
Flatten(),
nn.Linear(128, 128, bias=False),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 2, bias=False),
)
if self.multitask and self.domain:
self.domain_classifier = nn.Linear(128, 2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.fc_block = nn.Sequential(
Flatten(),
# 3*3 if input is 224
nn.Linear(512 * 3 * 3, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
)
if self.multitask:
self.rotation_classifier = nn.Linear(128, 4)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.fc_block = nn.Sequential(
Flatten(),
# 3*3 if input is 224
nn.Linear(512 * 3 * 3, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# dataX_90 = torch.flip(torch.transpose(x, 2, 3), [2])
# dataX_180 = torch.flip(torch.flip(x, [2]), [3])
# dataX_270 = torch.transpose(torch.flip(x, [2]), 2, 3)
# x = torch.stack([x, dataX_90, dataX_180, dataX_270], dim=1)
# x = x.view([3 * 4, 3,224,224])
#
# print (x.shape)
# exit(0)
# for b in range(0, 8):
# showimage(x[0], "batch-0-image.png")
# showimage(x[75], "batch-75-image.png")
# showimage(x[150], "batch-150-image.png")
# showimage(x[225], "batch-225-image.png")
# exit(0)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# if self.showfeature:
# showfeature(x[4,:,:,:], "feature_1rot.png")
# if self.showfeature:
# showfeature(x[5,:,:,:], "feature_101rot.png")
# if self.showfeature:
# showfeature(x[6,:,:,:], "feature_201rot.png")
# if self.showfeature:
# showfeature(x[7,:,:,:], "feature_301rot.png")
# print (x.shape)
# exit(0)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.l2norm(x)
# if self.saveembed != "":
# print (x.shape)
# print ("save to ", self.saveembed)
# np.savetxt("embed/"+self.saveembed, x.cpu().data.numpy())
# exit(0)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
1693631
|
import sys
sys.path.append("environments")
from environment_roundtrip_c8y import Environment_roundtrip_c8y
"""
Roundtrip test C8y
Given a configured system with configured certificate
When we derive from EnvironmentC8y
When we run the smoketest for JSON publishing with defaults a size of 20, 100ms delay
Then we validate the data from C8y
"""
class SmokeTestJson(Environment_roundtrip_c8y):
def setup(self):
super().setup()
self.samples = "20"
self.delay = "100"
self.timeslot = "10"
self.style = "JSON"
|
1693679
|
def encrypt(value_, params):
lo = value_.find("@")
if lo > 0:
return "#####" + value_[lo - 1:]
else:
raise Exception("invalid email {}".format(value_))
|
1693687
|
from setuptools import setup
setup(
name='autodsp',
version='0.0.1',
description='Code to reproduce the 2021 WASPAA paper titled AUTO-DSP: LEARNING TO OPTIMIZE ACOUSTIC ECHO CANCELLERS.',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/jmcasebeer/autodsp',
packages=['autodsp'],
license='University of Illinois Open Source License',
install_requires=[
'matplotlib==3.4.3',
'numpy==1.21.2',
'pandas==1.3.3',
'scipy==1.7.1',
'tqdm==4.62.3',
'wandb==0.12.4',
]
)
|
1693689
|
import numpy as np
import tensorflow as tf
from scipy import ndimage
from .base import SaliencyMap
class GuidedBackprop(SaliencyMap):
def get_mask(self, image, preprocess=True):
"""Computes Integrated Gradients for a predicted label.
Args:
image (ndarray): Original image
top_pred_idx: Predicted label for the input image
baseline (ndarray): The baseline image to start with for interpolation
num_steps: Number of interpolation steps between the baseline
and the input used in the computation of integrated gradients. These
steps along determine the integral approximation error. By default,
num_steps is set to 50.
Returns:
Integrated gradients w.r.t input image
"""
@tf.custom_gradient
def guidedRelu(x):
def grad(dy):
return tf.cast(dy>0,"float32") * tf.cast(x>0, "float32") * dy
return tf.nn.relu(x), grad
guided_relu_model = Model(
inputs = [self.model.inputs],
outputs = [self.model.outputs]
)
layer_dict = [layer for layer in guided_relu_model.layers[1:] if hasattr(layer, 'activation')]
for layer in layer_dict:
if layer.activation == tf.keras.activations.relu:
layer.activation = guidedRelu
with tf.GradientTape() as tape:
inputs = tf.cast(image, tf.float32)
tape.watch(inputs)
outputs = guided_relu_model(inputs)
grads = tape.gradient(outputs, inputs)[0]
return grads
|
1693691
|
import os
import torch
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from torch.utils.data.dataloader import DataLoader
from graph_ter_seg.models.backbone import Backbone
from graph_ter_seg.runner.runner import Runner
from graph_ter_seg.tools.utils import import_class
class BackboneRunner(Runner):
def __init__(self, args):
super(BackboneRunner, self).__init__(args)
# loss
self.loss = nn.MSELoss().to(self.output_dev)
def load_dataset(self):
feeder_class = import_class(self.args.dataset)
feeder = feeder_class(
self.args.data_path, num_points=self.args.num_points,
transform=self.transform, phase='train'
)
self.dataset['train'] = DataLoader(
dataset=feeder,
batch_size=self.args.train_batch_size,
shuffle=True,
num_workers=8
)
self.print_log(f'Train data loaded: {len(feeder)} samples.')
def load_model(self):
model = Backbone(
k=self.args.knn, out_features=self.transform.out_features
)
model = model.to(self.output_dev)
self.model['train'] = model
def initialize_model(self):
if self.args.backbone is not None:
self.load_model_weights(
self.model['train'],
self.args.backbone,
self.args.ignore_backbone
)
self.load_optimizer_weights(self.optimizer, self.args.backbone)
self.load_scheduler_weights(self.scheduler, self.args.backbone)
def run(self):
best_epoch = -1
best_loss = np.Inf
for epoch in range(self.epoch, self.args.num_epochs):
loss = self._train_backbone(epoch)
if loss < best_loss:
best_loss = loss
best_epoch = epoch
self.print_log(
'Min loss: {:.5f}, best model: model{}.pt'.format(
best_loss, best_epoch + 1
))
def _train_backbone(self, epoch):
self.print_log(f'Train Backbone Epoch: {epoch + 1}')
self.model['train'].train()
loss_values = []
self.record_time()
timer = dict(data=0.0, model=0.0, statistic=0.0)
for batch_id, (x, y, t, m, _, _) in enumerate(self.dataset['train']):
# get data
x = x.float().to(self.output_dev)
y = y.float().to(self.output_dev)
t = t.float().to(self.output_dev)
m = m.long().to(self.output_dev)
timer['data'] += self.tick()
# forward
t_hat = self.model['train'](x, y)
t_hat = torch.gather(t_hat, dim=-1, index=m)
loss = self.loss(t, t_hat) * self.args.lambda_mse
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
timer['model'] += self.tick()
loss_values.append(loss.item())
if (batch_id + 1) % self.args.log_interval == 0:
self.print_log(
'Batch({}/{}) done. Loss: {:.4f}, lr: {:.5f}'.format(
batch_id + 1, len(self.dataset['train']), loss.item(),
self.optimizer.param_groups[0]['lr']
))
timer['statistic'] += self.tick()
self.scheduler.step()
mean_loss = np.mean(loss_values)
self.print_log('Mean training loss: {:.4f}.'.format(mean_loss))
self.print_log(
'Time consumption: [Data] {:.1f} min, [Model] {:.1f} min'.format(
timer['data'] / 60.0, timer['model'] / 60.0
))
if self.args.save_model and (epoch + 1) % self.args.save_interval == 0:
model_path = os.path.join(
self.backbone_path, f'model{epoch + 1}.pt'
)
self.save_weights(
epoch, self.model['train'], self.optimizer, self.scheduler,
model_path
)
if self.args.use_tensorboard:
with SummaryWriter(log_dir=self.tensorboard_path) as writer:
writer.add_scalar('train/backbone_loss', mean_loss, epoch)
return mean_loss
|
1693703
|
import numpy as np
from sklearn import metrics
from garrus.core import BaseMetric
class AUPRE(BaseMetric):
"""Area under the precision-recall curve using errors as the positive class."""
def _compute(self, confidences: np.ndarray, accuracies: np.ndarray, **kwargs) -> float:
aupr_err = metrics.average_precision_score(-1 * accuracies + 1, -1 * confidences)
return float(aupr_err)
|
1693822
|
import importlib
import logging
import numpy as np
import os
import os.path as osp
import time
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import ConcatDataset
from bisect import bisect_right
from functools import partial
from six.moves import map, zip
from libs.datasets.transform import TrainTransform
from libs.datasets.transform import EvalTransform
class AverageMeter(object):
"""Computes and stores the average and current value
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def resource_path(relative_path):
"""To get the absolute path"""
base_path = osp.abspath(".")
return osp.join(base_path, relative_path)
def ensure_dir(root_dir, rank=0):
if not osp.exists(root_dir) and rank == 0:
print(f'=> creating {root_dir}')
os.mkdir(root_dir)
else:
while not osp.exists(root_dir):
print(f'=> wait for {root_dir} created')
time.sleep(10)
return root_dir
def create_logger(cfg, rank=0):
# working_dir root
abs_working_dir = resource_path('work_dirs')
working_dir = ensure_dir(abs_working_dir, rank)
# output_dir root
output_root_dir = ensure_dir(os.path.join(working_dir, cfg.OUTPUT_ROOT), rank)
time_str = time.strftime('%Y-%m-%d-%H-%M')
final_output_dir = ensure_dir(os.path.join(output_root_dir, time_str), rank)
# set up logger
logger = setup_logger(final_output_dir, time_str, rank)
return logger, final_output_dir
def setup_logger(final_output_dir, time_str, rank, phase='train'):
log_file = f'{phase}_{time_str}_rank{rank}.log'
final_log_file = os.path.join(final_output_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def get_model(cfg, device):
module = importlib.import_module(cfg.MODEL.FILE)
model, criterion, postprocessors = getattr(module, 'build_model')(cfg, device)
return model, criterion, postprocessors
def get_optimizer(cfg, model):
"""Support two types of optimizers: SGD, Adam.
"""
assert (cfg.TRAIN.OPTIMIZER in [
'sgd',
'adam',
])
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
nesterov=cfg.TRAIN.NESTEROV)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
return optimizer
def load_checkpoint(cfg, model, optimizer, lr_scheduler, device, module_name='model'):
last_iter = -1
resume_path = cfg.MODEL.RESUME_PATH
resume = cfg.TRAIN.RESUME
if resume_path and resume:
if osp.exists(resume_path):
checkpoint = torch.load(resume_path, map_location='cpu')
# resume
if 'state_dict' in checkpoint:
model.module.load_state_dict(checkpoint['state_dict'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
elif 'model' in checkpoint:
if module_name == 'detr':
model.module.detr_head.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> detr pretrained from {resume_path} \n')
else:
model.module.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info(f'==> optimizer resumed, continue training')
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
if 'epoch' in checkpoint:
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
# pre-train
else:
logging.error(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
else:
logging.info("==> train model without resume")
return model, optimizer, lr_scheduler, last_iter
class WarmupMultiStepLR(_LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3,
warmup_iters=500, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def get_lr_scheduler(cfg, optimizer, last_epoch=-1):
"""Support three types of optimizers: StepLR, MultiStepLR, MultiStepWithWarmup.
"""
assert (cfg.TRAIN.LR_SCHEDULER in [
'StepLR',
'MultiStepLR',
'MultiStepWithWarmup',
])
if cfg.TRAIN.LR_SCHEDULER == 'StepLR':
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
cfg.TRAIN.LR_STEPS[0],
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepLR':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepWithWarmup':
lr_scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
cfg.TRAIN.WARMUP_INIT_FACTOR,
cfg.TRAIN.WARMUP_STEP,
last_epoch)
else:
raise AttributeError(f'{cfg.TRAIN.LR_SCHEDULER} is not implemented')
return lr_scheduler
def get_det_criterion(cfg):
return critertion
def get_trainer(cfg, model, criterion, optimizer, lr_scheduler, postprocessors,
log_dir, performance_indicator, last_iter, rank, device, max_norm):
module = importlib.import_module(cfg.TRAINER.FILE)
Trainer = getattr(module, cfg.TRAINER.NAME)(
cfg,
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
postprocessors=postprocessors,
log_dir=log_dir,
performance_indicator=performance_indicator,
last_iter=last_iter,
rank=rank,
device=device,
max_norm = max_norm
)
return Trainer
def list_to_set(data_list, name='train'):
if len(data_list) == 0:
dataset = None
logging.warning(f"{name} dataset is None")
elif len(data_list) == 1:
dataset = data_list[0]
else:
dataset = ConcatDataset(data_list)
if dataset is not None:
logging.info(f'==> the size of {name} dataset is {len(dataset)}')
return dataset
def get_dataset(cfg):
train_transform = TrainTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
scales=cfg.DATASET.SCALES,
max_size=cfg.DATASET.MAX_SIZE
)
eval_transform = EvalTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
max_size=cfg.DATASET.MAX_SIZE
)
module = importlib.import_module(cfg.DATASET.FILE)
Dataset = getattr(module, cfg.DATASET.NAME)
data_root = cfg.DATASET.ROOT # abs path in yaml
# get train data list
train_root = osp.join(data_root, 'train')
train_set = [d for d in os.listdir(train_root) if osp.isdir(osp.join(train_root, d))]
if len(train_set) == 0:
train_set = ['.']
train_list = []
for sub_set in train_set:
train_sub_root = osp.join(train_root, sub_set)
logging.info(f'==> load train sub set: {train_sub_root}')
train_sub_set = Dataset(cfg, train_sub_root, train_transform)
train_list.append(train_sub_set)
# get eval data list
eval_root = osp.join(data_root, 'test')
eval_set = [d for d in os.listdir(eval_root) if osp.isdir(osp.join(eval_root, d))]
if len(eval_set) == 0:
eval_set = ['.']
eval_list = []
for sub_set in eval_set:
eval_sub_root = osp.join(eval_root, sub_set)
logging.info(f'==> load val sub set: {eval_sub_root}')
eval_sub_set = Dataset(cfg, eval_sub_root, eval_transform)
eval_list.append(eval_sub_set)
# concat dataset list
train_dataset = list_to_set(train_list, 'train')
eval_dataset = list_to_set(eval_list, 'eval')
return train_dataset, eval_dataset
def save_checkpoint(states, is_best, output_dir, filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
logging.info(f'save model to {output_dir}')
if is_best:
torch.save(states['state_dict'], os.path.join(output_dir, 'model_best.pth'))
def load_eval_model(resume_path, model):
if resume_path != '':
if osp.exists(resume_path):
print(f'==> model load from {resume_path}')
checkpoint = torch.load(resume_path)
if 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
else:
print(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
return model
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def naive_np_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = x1.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep]
def write_dict_to_json(mydict, f_path):
import json
import numpy
class DateEnconding(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16,numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
elif isinstance(obj, (numpy.ndarray,)): # add this line
return obj.tolist() # add this line
return json.JSONEncoder.default(self, obj)
with open(f_path, 'w') as f:
json.dump(mydict, f, cls=DateEnconding)
print("write down det dict to %s!" %(f_path))
|
1693894
|
from snowddl.blueprint import ProcedureBlueprint, Ident, SchemaObjectIdentWithArgs, NameWithType, DataType
from snowddl.parser.abc_parser import AbstractParser, ParsedFile
procedure_json_schema = {
"type": "object",
"properties": {
"language": {
"type": "string"
},
"body": {
"type": "string"
},
"arguments": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"returns": {
"type": "string"
},
"is_strict": {
"type": "boolean"
},
"is_execute_as_caller": {
"type": "boolean"
},
"comment": {
"type": "string"
}
},
"required": ["body", "returns"],
"additionalProperties": False
}
class ProcedureParser(AbstractParser):
def load_blueprints(self):
self.parse_schema_object_files("procedure", procedure_json_schema, self.process_procedure)
def process_procedure(self, f: ParsedFile):
arguments = [NameWithType(name=Ident(k), type=DataType(t)) for k, t in f.params.get('arguments', {}).items()]
base_name = self.validate_name_with_args(f.path, arguments)
bp = ProcedureBlueprint(
full_name=SchemaObjectIdentWithArgs(self.env_prefix, f.database, f.schema, base_name, [a.type.base_type for a in arguments]),
language=f.params.get('language', 'SQL'),
body=f.params['body'],
arguments=arguments,
returns=DataType(f.params['returns']),
is_strict=f.params.get('is_strict', False),
is_immutable=f.params.get('is_immutable', False),
is_execute_as_caller=f.params.get('is_execute_as_caller', False),
comment=f.params.get('comment'),
)
self.config.add_blueprint(bp)
|
1693935
|
from .casing import camel_to_snake, snake_to_camel
from .pluralize import to_plural
__all__ = ["to_plural", "snake_to_camel", "camel_to_snake"]
|
1693957
|
import pytest
from dash.testing.browser import Browser
from dash.testing.consts import SELENIUM_GRID_DEFAULT
@pytest.mark.parametrize("browser_type", ("Chrome", "Firefox"))
def test_browser_smoke(browser_type, tmpdir):
browser = Browser(
browser=browser_type,
remote=False,
remote_url=SELENIUM_GRID_DEFAULT,
headless=True,
options=None,
download_path=tmpdir.mkdir("download").strpath,
percy_finalize=True,
)
assert browser.driver.name == browser_type.lower()
def test_browser_use_remote_webdriver(tmpdir):
# test creation with remote=True
with pytest.raises(Exception):
Browser(
browser="Chrome",
remote=True,
remote_url=SELENIUM_GRID_DEFAULT,
headless=True,
options=None,
download_path=tmpdir.mkdir("download").strpath,
percy_finalize=True,
)
# test creation with remote_url other than default
with pytest.raises(Exception):
Browser(
browser="Chrome",
remote=False,
remote_url="http://token@any.selenium.grid:3333",
headless=True,
options=None,
download_path=tmpdir.mkdir("download").strpath,
percy_finalize=True,
)
|
1693991
|
from __future__ import division
import sys
import os
import argparse
import numpy as np
import cv2
import torch
from torch.utils import data
sys.path.insert(0, './pnpransac')
from pnpransac import pnpransac
from models import get_model
from datasets import get_dataset
def get_pose_err(pose_gt, pose_est):
transl_err = np.linalg.norm(pose_gt[0:3,3]-pose_est[0:3,3])
rot_err = pose_est[0:3,0:3].T.dot(pose_gt[0:3,0:3])
rot_err = cv2.Rodrigues(rot_err)[0]
rot_err = np.reshape(rot_err, (1,3))
rot_err = np.reshape(np.linalg.norm(rot_err, axis = 1), -1) / np.pi * 180.
return transl_err, rot_err[0]
def eval(args):
scenes_7S = ['chess', 'fire', 'heads', 'office', 'pumpkin',
'redkitchen','stairs']
scenes_12S = ['apt1/kitchen', 'apt1/living', 'apt2/bed',
'apt2/kitchen', 'apt2/living', 'apt2/luke',
'office1/gates362', 'office1/gates381',
'office1/lounge', 'office1/manolis',
'office2/5a', 'office2/5b']
scenes_Cambridge = ['GreatCourt', 'KingsCollege', 'OldHospital',
'ShopFacade', 'StMarysChurch']
if args.dataset in ['7S', 'i7S']:
if args.scene not in scenes_7S:
print('Selected scene is not valid.')
sys.exit()
if args.dataset in ['12S', 'i12S']:
if args.scene not in scenes_12S:
print('Selected scene is not valid.')
sys.exit()
if args.dataset == 'Cambridge':
if args.scene not in scenes_Cambridge:
print('Selected scene is not valid.')
sys.exit()
if args.dataset == 'i19S':
if args.scene not in scenes_7S + scenes_12S:
print('Selected scene is not valid.')
sys.exit()
# prepare datasets
if args.dataset == 'i19S':
datasetSs = get_dataset('7S')
datasetTs = get_dataset('12S')
if args.scene in scenes_7S:
datasetSs = datasetSs(args.data_path, args.dataset, args.scene,
split='test')
datasetTs = datasetTs(args.data_path, args.dataset)
dataset = datasetSs
if args.scene in scenes_12S:
datasetSs = datasetSs(args.data_path, args.dataset)
datasetTs = datasetTs(args.data_path, args.dataset, args.scene,
split='test')
dataset = datasetTs
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_7S:
centers = np.concatenate([centers, datasetSs.scene_data[scene][2]
+ datasetSs.scene_data[scene][0]])
for scene in scenes_12S:
centers = np.concatenate([centers, datasetTs.scene_data[scene][2]
+ datasetTs.scene_data[scene][0]])
elif args.dataset == 'i7S':
dataset = get_dataset('7S')
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_7S:
centers = np.concatenate([centers, dataset.scene_data[scene][2]
+ dataset.scene_data[scene][0]])
elif args.dataset == 'i12S':
dataset = get_dataset('12S')
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_12S:
centers = np.concatenate([centers, dataset.scene_data[scene][2]
+ dataset.scene_data[scene][0]])
else:
dataset = get_dataset(args.dataset)
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = dataset.centers
intrinsics_color = dataset.intrinsics_color
dataloader = data.DataLoader(dataset, batch_size=1,
num_workers=4, shuffle=False)
pose_solver = pnpransac(intrinsics_color[0,0], intrinsics_color[1,1],
intrinsics_color[0,2], intrinsics_color[1,2])
# prepare model
torch.set_grad_enabled(False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = get_model(args.model, args.dataset)
model_state = torch.load(args.checkpoint,
map_location=device)['model_state']
model.load_state_dict(model_state)
model.to(device)
model.eval()
# start evaluation
rot_err_list = []
transl_err_list = []
x = np.linspace(4, 640-4, 80) + 106 * (args.dataset == 'Cambridge')
y = np.linspace(4, 480-4, 60)
xx, yy = np.meshgrid(x, y)
pcoord = np.concatenate((np.expand_dims(xx,axis=2),
np.expand_dims(yy,axis=2)), axis=2)
for _, (img, pose) in enumerate(dataloader):
if args.dataset == 'Cambridge':
img = img[:,:,:,106:106+640].to(device)
else:
img = img.to(device)
if args.model == 'hscnet':
coord, lbl_2, lbl_1 = model(img)
#print(lbl_2.shape)
#print(lbl_2)
lbl_1 = torch.argmax(lbl_1, dim=1)
lbl_2 = torch.argmax(lbl_2, dim=1)
lbl = (lbl_1 * 25 + lbl_2).cpu().data.numpy()[0,:,:]
ctr_coord = centers[np.reshape(lbl,(-1)),:]
ctr_coord = np.reshape(ctr_coord, (60,80,3))
coord = np.transpose(coord.cpu().data.numpy()[0,:,:,:], (1,2,0))
coord = coord + ctr_coord
else:
coord = np.transpose(model(img).cpu().data.numpy()[0,:,:,:],
(1,2,0))
coord = np.ascontiguousarray(coord)
pcoord = np.ascontiguousarray(pcoord)
rot, transl = pose_solver.RANSAC_loop(np.reshape(pcoord,
(-1,2)).astype(np.float64), np.reshape(coord,
(-1,3)).astype(np.float64), 256)
pose_gt = pose.data.numpy()[0,:,:]
pose_est = np.eye(4)
pose_est[0:3,0:3] = cv2.Rodrigues(rot)[0].T
pose_est[0:3,3] = -np.dot(pose_est[0:3,0:3], transl)
transl_err, rot_err = get_pose_err(pose_gt, pose_est)
rot_err_list.append(rot_err)
transl_err_list.append(transl_err)
print('Pose error: {}m, {}\u00b0'.format(transl_err, rot_err))
results = np.array([transl_err_list, rot_err_list]).T
np.savetxt(os.path.join(args.output,
'pose_err_{}_{}_{}.txt'.format(args.dataset,
args.scene.replace('/','.'), args.model)), results)
if args.dataset != 'Cambridge':
print('Accuracy: {}%'.format(np.sum((results[:,0] <= 0.05)
* (results[:,1] <= 5)) * 1. / len(results) * 100))
print('Median pose error: {}m, {}\u00b0'.format(np.median(results[:,0]),
np.median(results[:,1])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Hscnet")
parser.add_argument('--model', nargs='?', type=str, default='hscnet',
choices=('hscnet', 'scrnet'),
help='Model to use [\'hscnet, scrnet\']')
parser.add_argument('--dataset', nargs='?', type=str, default='7S',
choices=('7S', '12S', 'i7S', 'i12S', 'i19S',
'Cambridge'), help='Dataset to use')
parser.add_argument('--scene', nargs='?', type=str, default='heads',
help='Scene')
parser.add_argument('--checkpoint', required=True, type=str,
help='Path to saved model')
parser.add_argument('--data_path', required=True, type=str,
help='Path to dataset')
parser.add_argument('--output', nargs='?', type=str, default='./',
help='Output directory')
args = parser.parse_args()
eval(args)
|
1694039
|
from waflib import Utils, Task, Options, Logs, Errors
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot
import os.path
ccroot.USELIB_VARS['msbuild'] = set(['CSFLAGS'])
def configure(conf):
conf.find_program('msbuild')
msbuild_fmt = '''<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" DefaultTargets="Build" ToolsVersion="4.0">
<PropertyGroup>
{PROPERTIES}
</PropertyGroup>
{SOURCES}
<ItemGroup>
{REFERENCES}
{REF_HINTS}
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
</Project>
'''
# Compile, EmbeddedResource, Page, Resource
src_fmt = ''' <ItemGroup>
<{1} Include="{0}">
<Link>{2}</Link>
</{1}>
</ItemGroup>'''
ref_fmt = ''' <Reference Include="{0}">
<HintPath>{1}</HintPath>
</Reference>'''
use_fmt = ''' <Reference Include="{0}"/>'''
cfg_fmt = ''' <{0}>{1}</{0}>'''
def get_source_type(name):
if name.endswith('.cs'):
return 'Compile'
if name.endswith('.xaml'):
return 'Page'
return 'EmbeddedResource'
def get_link_path(self, node):
if node.is_src():
return str(node.path_from(self.path))
else:
return str(node.path_from(self.path.get_bld()))
@feature('msbuild')
@before_method('process_source')
def apply_msbuild(self):
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
asm = self.path.find_or_declare(self.gen)
cfg = {}
cfg['OutputType'] = bintype
cfg['AssemblyName'] = os.path.splitext(self.gen)[0]
cfg['RootNamespace'] = getattr(self, 'namespace', cfg['AssemblyName'])
cfg['TargetFrameworkVersion'] = 'v4.0'
cfg['PlatformTarget'] = getattr(self, 'platform', 'anycpu')
cfg['IntermediateOutputPath'] = 'obj'
cfg['OutputPath'] = self.path.get_bld().abspath()
cfg['UseCommonOutputDirectory'] = 'true'
cfg['WarningLevel'] = '4'
self.gen_task = self.create_task('genproj', [], asm.change_ext('.proj'))
self.cs_task = self.create_task('msbuild', self.gen_task.outputs, asm)
main = self.to_nodes(getattr(self, 'main', []))
source = self.to_nodes(getattr(self, 'source', []))
resource = self.to_nodes(getattr(self, 'resource', []))
icon = self.to_nodes(getattr(self, 'icon', []))
srcs = []
for x in main:
srcs.append( (x.abspath(), 'ApplicationDefinition', get_link_path(self, x)) )
if x in source:
source.remove(x)
for x in source:
srcs.append( (x.abspath(), get_source_type(x.name), get_link_path(self, x)) )
for x in resource:
srcs.append( (x.abspath(), 'Resource', get_link_path(self, x)) )
if icon:
cfg['ApplicationIcon'] = icon[0].abspath()
self.gen_task.env.MSBUILD_FMT = msbuild_fmt
self.gen_task.env.MSBUILD_CFG = cfg
self.gen_task.env.MSBUILD_SRC = srcs
self.gen_task.env.MSBUILD_REF = []
self.gen_task.env.MSBUILD_USE = []
self.cs_task.dep_nodes.extend(main + source + resource + icon)
self.source = []
x.y = 1
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.bld.install_files(inst_to, self.cs_task.outputs[:], env=self.env, chmod=mod)
# if this is an exe, look for app.config and install to ${BINDIR}
if 'exe' in bintype:
cfg = self.path.find_or_declare('app.config')
self.bld.install_as('%s/%s.config' % (inst_to, self.gen), cfg, env=self.env, chmod=Utils.O755)
@feature('msbuild')
@after_method('propagate_uselib_vars')
def uselib_msbuild(self):
ccroot.propagate_uselib_vars(self)
flags = self.env.CSFLAGS
defs = ','.join( f[8:] for f in flags if '/define:' in f)
self.gen_task.env.MSBUILD_CFG['Optimize'] = '/optimize+' in flags and 'true' or 'false'
self.gen_task.env.MSBUILD_CFG['DefineConstants'] = defs
@feature('msbuild')
@after_method('apply_msbuild')
def use_msbuild(self):
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.gen_task.env.append_value('MSBUILD_USE', os.path.splitext(x)[0])
continue
y.post()
tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r' % self)
self.cs_task.dep_nodes.extend(tsk.outputs) # dependency
self.cs_task.set_run_after(tsk) # order (redundant, the order is infered from the nodes inputs/outputs)
f = tsk.outputs[0]
self.gen_task.env.MSBUILD_REF.append( (f.abspath(), os.path.splitext(f.name)[0]) )
@feature('msbuild')
@after_method('apply_msbuild', 'use_msbuild')
def debug_msbuild(self):
csdebug = getattr(self, 'csdebug', self.env.CSDEBUG)
if not csdebug:
return
node = self.cs_task.outputs[0]
if self.env.CS_NAME == 'mono':
out = node.parent.find_or_declare(node.name + '.mdb')
else:
out = node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug == 'pdbonly':
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'true'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'pdbonly'
elif csdebug == 'full':
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'true'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'full'
else:
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'false'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'none'
@feature('msbuild')
@after_method('apply_msbuild', 'use_msbuild')
def doc_msbuild(self):
csdoc = getattr(self, 'csdoc', self.env.CSDOC)
if not csdoc:
return
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
if bintype != 'library':
return
node = self.cs_task.outputs[0]
out = node.change_ext('.xml')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
self.gen_task.env.MSBUILD_CFG['DocumentationFile'] = out.name
class msbuild(Task.Task):
"""
Run msbuild
"""
color = 'YELLOW'
run_str = '${MSBUILD} ${SRC}'
class genproj(Task.Task):
color = 'PINK'
vars = [ 'MSBUILD_FMT', 'MSBUILD_CFG', 'MSBUILD_SRC', 'MSBUILD_REF', 'MSBUILD_USE' ]
def run(self):
cfg = '\n'.join([ cfg_fmt.format(k, v) for k,v in self.env.MSBUILD_CFG.items()])
src = '\n'.join([ src_fmt.format(n, t, l) for n,t,l in self.env.MSBUILD_SRC])
ref = '\n'.join([ ref_fmt.format(n, p) for p,n in self.env.MSBUILD_REF])
use = '\n'.join([ use_fmt.format(i) for i in self.env.MSBUILD_USE])
fmt = {
'PROPERTIES' : cfg,
'SOURCES' : src,
'REF_HINTS' : ref,
'REFERENCES' : use,
}
txt = self.env.MSBUILD_FMT.format(**fmt)
#print txt
self.outputs[0].write(txt)
|
1694046
|
import logging
import unittest
from panda3d.core import ConfigVariableBool
ConfigVariableBool('editor_mode', False).set_value(True)
ConfigVariableBool('no_ui', False).set_value(True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
)
if __name__ == '__main__':
suite = unittest.TestLoader().discover('.', pattern = 'test_*.py')
unittest.TextTestRunner(verbosity=2).run(suite)
|
1694099
|
from .constants import BASE_URL
from .api.stores import BestBuyStoresAPI
from .api.bulk import BestBuyBulkAPI
from .api.products import BestBuyProductsAPI
from .api.categories import BestBuyCategoryAPI
__version__ = "2.0.0"
class BestBuyAPI:
def __init__(self, api_key):
"""API's base class
:params:
:api_key (str): best buy developer API key.
"""
self.api_key = api_key.strip()
self.bulk = BestBuyBulkAPI(self.api_key)
self.products = BestBuyProductsAPI(self.api_key)
self.category = BestBuyCategoryAPI(self.api_key)
self.stores = BestBuyStoresAPI(self.api_key)
|
1694111
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import output
from esphome.const import CONF_CHANNEL, CONF_ID
from . import TLC59208FOutput, tlc59208f_ns
DEPENDENCIES = ["tlc59208f"]
TLC59208FChannel = tlc59208f_ns.class_("TLC59208FChannel", output.FloatOutput)
CONF_TLC59208F_ID = "tlc59208f_id"
CONFIG_SCHEMA = output.FLOAT_OUTPUT_SCHEMA.extend(
{
cv.Required(CONF_ID): cv.declare_id(TLC59208FChannel),
cv.GenerateID(CONF_TLC59208F_ID): cv.use_id(TLC59208FOutput),
cv.Required(CONF_CHANNEL): cv.int_range(min=0, max=7),
}
)
async def to_code(config):
paren = await cg.get_variable(config[CONF_TLC59208F_ID])
var = cg.new_Pvariable(config[CONF_ID])
cg.add(var.set_channel(config[CONF_CHANNEL]))
cg.add(paren.register_channel(var))
await output.register_output(var, config)
|
1694113
|
import os
import sys
import re
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.core.models import Supervisors
from sqlalchemy import delete
def main():
from docassemble.base.config import hostname
supervisor_url = os.environ.get('SUPERVISOR_SERVER_URL', None)
if supervisor_url:
db.session.execute(delete(Supervisors).filter_by(hostname=hostname))
db.session.commit()
new_entry = Supervisors(hostname=hostname, url="http://" + hostname + ":9001", role=os.environ.get('CONTAINERROLE', None))
db.session.add(new_entry)
db.session.commit()
if __name__ == "__main__":
#import docassemble.webapp.database
with app.app_context():
#app.config['SQLALCHEMY_DATABASE_URI'] = docassemble.webapp.database.alchemy_connection_string()
main()
db.engine.dispose()
|
1694124
|
from django.db.models.signals import post_save
from django.conf import settings
from factory.django import mute_signals
from mock import patch
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase
from remo.profiles.tests import UserFactory
from remo.remozilla.tests import BugFactory
from remo.remozilla.utils import get_bugzilla_url
from remo.voting.models import Poll, automated_poll_discussion_email
from remo.voting.tests import PollCommentFactory, PollFactory
class AutomatedRadioPollTest(RemoTestCase):
"""Tests the automatic creation of new Radio polls."""
fixtures = ['demo_users.json']
def test_automated_radio_poll_valid_bug(self):
"""Test the creation of an automated radio poll."""
UserFactory.create(username='remobot')
bug = BugFactory.create(council_vote_requested=True, component='Budget Requests')
poll = Poll.objects.get(bug=bug)
eq_(poll.bug.bug_id, bug.bug_id)
eq_(poll.description, bug.first_comment)
eq_(poll.name, bug.summary)
def test_automated_radio_poll_no_auto_bug(self):
"""Test the creation of an automated radio
poll with a non budget/swag bug.
"""
BugFactory.create()
eq_(Poll.objects.filter(automated_poll=True).count(), 0)
def test_automated_radio_poll_already_exists(self):
"""Test that a radio poll is not created
if the bug already exists.
"""
UserFactory.create(username='remobot')
bug = BugFactory.create(council_vote_requested=True,
component='Budget Requests')
bug.first_comment = 'My first comment.'
bug.save()
eq_(Poll.objects.filter(automated_poll=True).count(), 1)
def test_send_discussion_email_to_council(self):
bug = BugFactory.create(bug_id=989812)
automated_poll = PollFactory.build(
name='automated_poll', automated_poll=True, bug=bug)
with patch('remo.voting.models.send_remo_mail') as mocked_send_mail:
automated_poll_discussion_email(None, automated_poll, True, {})
subject = 'Discuss [Bug 989812] - Bug summary'
data = {'bug': bug, 'BUGZILLA_URL': get_bugzilla_url(bug),
'poll': automated_poll}
mocked_send_mail.delay.assert_called_once_with(
subject=subject,
email_template='emails/review_budget_notify_review_team.jinja',
recipients_list=[settings.REPS_REVIEW_ALIAS],
data=data)
def test_send_discussion_email_to_council_edit(self):
bug = BugFactory.create(bug_id=989812)
automated_poll = PollFactory.build(
name='automated_poll', automated_poll=True, bug=bug)
with patch('remo.voting.models.send_remo_mail') as mocked_send_mail:
automated_poll_discussion_email(None, automated_poll, False, {})
ok_(not mocked_send_mail.called)
class VotingCommentSignalTests(RemoTestCase):
def test_comment_one_user(self):
"""Test sending email when a new comment is added on a Poll
and the user has the option enabled in his/her settings.
"""
commenter = UserFactory.create()
creator = UserFactory.create(
userprofile__receive_email_on_add_voting_comment=True)
# Disable notifications related to the creation of a poll
with mute_signals(post_save):
poll = PollFactory.create(created_by=creator)
with patch('remo.voting.models.send_remo_mail.delay') as mail_mock:
PollCommentFactory.create(user=commenter, poll=poll, comment='This is a comment')
ok_(mail_mock.called)
eq_(mail_mock.call_count, 1)
def test_one_user_settings_False(self):
"""Test sending email when a new comment is added on a Poll
and the user has the option disabled in his/her settings.
"""
commenter = UserFactory.create()
user = UserFactory.create(userprofile__receive_email_on_add_voting_comment=False)
with mute_signals(post_save):
poll = PollFactory.create(created_by=user)
with patch('remo.voting.models.send_remo_mail.delay') as mail_mock:
PollCommentFactory.create(user=commenter, poll=poll, comment='This is a comment')
ok_(not mail_mock.called)
|
1694140
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class D2RLActor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(D2RLActor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256+state_dim, 256)
self.l3 = nn.Linear(256+state_dim, 256)
self.l4 = nn.Linear(256+state_dim, 256)
self.l5 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = torch.cat([a, state], 1)
a = F.relu(self.l2(a))
a = torch.cat([a, state], 1)
a = F.relu(self.l3(a))
a = torch.cat([a, state], 1)
a = F.relu(self.l4(a))
return self.max_action * torch.tanh(self.l5(a))
class D2RLCritic(nn.Module):
def __init__(self, state_dim, action_dim):
super(D2RLCritic, self).__init__()
in_dim = state_dim + action_dim
# Q1 architecture
self.l1 = nn.Linear(in_dim, 256)
self.l2 = nn.Linear(256+in_dim, 256)
self.l3 = nn.Linear(256+in_dim, 256)
self.l4 = nn.Linear(256+in_dim, 256)
self.l5 = nn.Linear(256, 1)
# Q2 architecture
self.l6 = nn.Linear(in_dim, 256)
self.l7 = nn.Linear(256+in_dim, 256)
self.l8 = nn.Linear(256+in_dim, 256)
self.l9 = nn.Linear(256+in_dim, 256)
self.l0 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l2(q1))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l3(q1))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l4(q1))
q1 = self.l5(q1)
q2 = F.relu(self.l6(sa))
q2 = torch.cat([q2, sa], 1)
q2 = F.relu(self.l7(q2))
q2 = torch.cat([q2, sa], 1)
q2 = F.relu(self.l8(q2))
q2 = torch.cat([q2, sa], 1)
q2 = F.relu(self.l9(q2))
q2 = self.l0(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l2(q1))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l3(q1))
q1 = torch.cat([q1, sa], 1)
q1 = F.relu(self.l4(q1))
q1 = self.l5(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
args=None,
):
self.args = args
actor_prototype = D2RLActor
self.actor = actor_prototype(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
critic_prototype = D2RLCritic
self.critic = critic_prototype(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
|
1694167
|
from .discrete_tuning import GradientTriggerSearch
from .continuous_tuning import ContinuousTriggerEmbedding
|
1694184
|
class IntegerRange(object,IDisposable):
""" A class to define a range of a sequence of consecutive integer numbers """
def Dispose(self):
""" Dispose(self: IntegerRange) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: IntegerRange,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
High=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The upper limit of the range
Get: High(self: IntegerRange) -> int
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: IntegerRange) -> bool
"""
Low=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lower limit of the range
Get: Low(self: IntegerRange) -> int
"""
|
1694192
|
import config
from google.appengine.ext import db
# +++
class PlayerModel(db.Model):
name = db.StringProperty()
password = db.StringProperty()
rating = db.IntegerProperty(default = 0)
game_count = db.IntegerProperty(default = 0)
games_won = db.IntegerProperty(default = 0)
avatar_hash = db.StringProperty()
last_game_key_name = db.StringProperty()
credits_acquired_total = db.IntegerProperty(default = 0)
credits_consumed_total = db.IntegerProperty(default = 0)
munts_killed_total = db.IntegerProperty(default = 0)
structs_killed_total = db.IntegerProperty(default = 0)
munts_lost_total = db.IntegerProperty(default = 0)
structs_lost_total = db.IntegerProperty(default = 0)
built_counts_total = db.ListProperty(int, default = [0] * config.BUILT_COUNTS_MAX)
elapsed_seconds_total = db.IntegerProperty(default = 0)
rating_check_utc = db.IntegerProperty(default = 0)
last_game_utc = db.IntegerProperty(default = 0)
blocked = db.BooleanProperty(default = False)
hidden = db.BooleanProperty(default = False)
history = db.TextProperty(default = '[]')
sequence_number = db.IntegerProperty(default = 0)
def playermodel_key_from_key_name(key_name):
return db.Key.from_path("PlayerModel", key_name)
def playermodel_key(name):
return db.Key.from_path("PlayerModel", 'k' + name.lower())
# +++
class PlayerUpdateFailed(db.Model):
name = db.StringProperty()
password = db.StringProperty()
avatar_hash = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
backtrace = db.TextProperty()
# +++
class AvatarModel(db.Model):
hash = db.StringProperty()
content = db.BlobProperty()
def avatarmodel_key(hash):
return db.Key.from_path("AvatarModel", 'k' + hash.lower())
# +++
class GameStatsModel(db.Model):
date = db.DateTimeProperty(auto_now_add=True)
player_key_names = db.StringListProperty()
mission_title = db.StringProperty()
start_utc = db.IntegerProperty()
end_utc = db.IntegerProperty()
old_ratings = db.TextProperty()
dids = db.TextProperty()
json = db.TextProperty()
def gamestatsmodel_key_from_name(key_name):
return db.Key.from_path("GameStatsModel", key_name)
def gamestatsmodel_key(server_id, server_start, gameid):
key_name = '<KEY>' % (server_id, server_start, gameid)
return gamestatsmodel_key_from_name(key_name)
# +++
class GameStatsFailed(db.Model):
json = db.TextProperty()
date = db.DateTimeProperty(auto_now_add=True)
backtrace = db.TextProperty()
# +++
class ServerInfoModel(db.Model):
json = db.TextProperty()
command = db.TextProperty()
expires_utc = db.IntegerProperty()
def serverinfomodel_key(name, start_utc):
key_name = 'k%s-%u' % (name.lower(), start_utc)
return db.Key.from_path('ServerInfoModel', key_name)
# +++
class AccountModel(db.Model):
nickname = db.StringProperty()
access_rights = db.IntegerProperty()
def accountmodel_key(user_id):
return db.Key.from_path("AccountModel", 'k' + user_id.lower())
# +++
class PlayerActionModel(db.Model):
player_name = db.StringProperty()
anonymous = db.BooleanProperty()
did = db.StringProperty()
ip_address = db.StringProperty()
action = db.StringProperty()
time_utc = db.IntegerProperty()
platform = db.StringProperty()
# +++
class AdminActionModel(db.Model):
admin_user = db.StringProperty()
ip_address = db.StringProperty()
action = db.TextProperty()
time_utc = db.IntegerProperty()
|
1694228
|
import pytest
from HABApp.util.multimode import BaseMode, ValueMode, MultiModeItem
from ..test_core import ItemTests
from tests.helpers.parent_rule import DummyRule
class TestMultiModeItem(ItemTests):
CLS = MultiModeItem
TEST_VALUES = [0, 'str', (1, 2, 3)]
def test_diff_prio(parent_rule: DummyRule):
p = MultiModeItem('TestItem')
p1 = ValueMode('modea', '1234')
p2 = ValueMode('modeb', '4567')
p.add_mode(1, p1).add_mode(2, p2)
p1.set_value(5)
assert p.value == '4567'
p2.set_enabled(False)
assert p.value == 5
p2.set_enabled(True)
assert p.value == '4567'
p2.set_enabled(False)
p2.set_value(8888)
assert p.value == 8888
def test_calculate_lower_priority_value(parent_rule: DummyRule):
p = MultiModeItem('TestItem')
m1 = ValueMode('modea', '1234')
m2 = ValueMode('modeb', '4567')
p.add_mode(1, m1).add_mode(2, m2)
assert m1.calculate_lower_priority_value() is None
assert m2.calculate_lower_priority_value() == '1234'
m1.set_value('asdf')
assert m2.calculate_lower_priority_value() == 'asdf'
def test_auto_disable_1(parent_rule: DummyRule):
p = MultiModeItem('TestItem')
m1 = ValueMode('modea', 50)
m2 = ValueMode('modeb', 60, auto_disable_func= lambda l, o: l > o)
p.add_mode(1, m1).add_mode(2, m2)
m1.set_value(50)
assert p.value == 60
m1.set_value(61)
assert not m2.enabled
assert p.value == 61
m1.set_value(59)
assert p.value == 59
def test_auto_disable_func(parent_rule: DummyRule):
p = MultiModeItem('TestItem')
m1 = ValueMode('modea', 50)
m2 = ValueMode('modeb', 60, auto_disable_func=lambda low, s: low == 40)
p.add_mode(1, m1).add_mode(2, m2)
m2.set_value(60)
assert p.value == 60
assert m2.enabled is True
m1.set_value(40)
assert p.value == 40
assert m2.enabled is False
m1.set_value(50)
assert p.value == 50
assert m2.enabled is False
def test_unknown(parent_rule: DummyRule):
p = MultiModeItem('asdf')
with pytest.raises(KeyError):
p.get_mode('asdf')
p.add_mode(1, BaseMode('mode'))
with pytest.raises(KeyError):
p.get_mode('asdf')
def test_remove(parent_rule: DummyRule):
p = MultiModeItem('asdf')
m1 = BaseMode('m1')
m2 = BaseMode('m2')
p.add_mode(0, m1)
p.add_mode(1, m2)
p.remove_mode('m1')
assert p.all_modes() == [(1, m2)]
|
1694257
|
import FWCore.ParameterSet.Config as cms
from math import pi
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
B2GDQM = DQMEDAnalyzer(
"B2GDQM",
#Trigger Results
triggerResultsCollection = cms.InputTag("TriggerResults", "", "HLT"),
PFJetCorService = cms.string("ak4PFL1FastL2L3"),
jetLabels = cms.VInputTag(
'ak4PFJets',
'ak4PFJetsPuppi',
'ak8PFJetsPuppi',
'ak8PFJetsPuppiSoftDrop'
),
jetPtMins = cms.vdouble(
50.,
50.,
50.,
50.,
100.
),
pfMETCollection = cms.InputTag("pfMet"),
sdjetLabel = cms.InputTag("ak8PFJetsPuppiSoftDrop"),
muonSrc = cms.InputTag("muons"),
electronSrc = cms.InputTag("gedGsfElectrons"),
allHadPtCut = cms.double(380.0), # Edit in 2019: Lower pt cut slightly because this now selects groomed jet pt
allHadRapidityCut = cms.double(1.0),
allHadDeltaPhiCut = cms.double( pi / 2.0),
muonSelect = cms.string("pt > 45.0 & abs(eta)<2.1 & isGlobalMuon & abs(globalTrack.d0)<1 & abs(globalTrack.dz)<20"),
semiMu_HadJetPtCut = cms.double(400.0),
semiMu_LepJetPtCut = cms.double(30.0),
semiMu_dphiHadCut = cms.double( pi / 2.0),
semiMu_dRMin = cms.double( 0.5 ),
semiMu_ptRel = cms.double( 25.0 ),
elecSelect = cms.string("pt > 45.0 & abs(eta)<2.5 & abs(gsfTrack.d0)<1 & abs(gsfTrack.dz)<20"),
semiE_HadJetPtCut = cms.double(400.0),
semiE_LepJetPtCut = cms.double(30.0),
semiE_dphiHadCut = cms.double( pi / 2.0),
semiE_dRMin = cms.double( 0.5 ),
semiE_ptRel = cms.double( 25.0 )
)
|
1694263
|
import requests
from json import dumps, loads
import csv
import os
import boto3
from datetime import datetime
from requests.auth import HTTPBasicAuth
import collections
import ast
platform_dict = {
"ios": "https://api.appbot.co/api/v2/apps/2411517/ratings/historical?country=&end=" + datetime.today().strftime(
'%Y-%m-%d') + "&start=2020-09-19",
"android": "https://api.appbot.co/api/v2/apps/2411517/ratings/historical?end=" + datetime.today().strftime(
'%Y-%m-%d') + "&start=2020-09-19"}
def get_secrets():
secretsmanager = boto3.client('secretsmanager')
api_key = secretsmanager.get_secret_value(SecretId="/appbot/api_key")
api_password = secretsmanager.get_secret_value(SecretId="/appbot/api_password")
return {"api_key": api_key, "api_password": <PASSWORD>_password}
def flatten(d):
items = []
for k, v in d.items():
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v).items())
else:
items.append((k, v))
return dict(items)
def request_data(platform):
secrets = get_secrets()
r = requests.get(platform_dict[platform],
auth=HTTPBasicAuth(loads(secrets["api_key"]["SecretString"])["/appbot/api_key"],loads(secrets["api_password"]["SecretString"])["/appbot/api_password"]))
body = loads(r.content.decode())["all_time"]
csv_columns = [key for key in flatten(body[0])]
csv_columns.remove('version')
csv_file = f"analytics-{platform}-lookup.csv"
lambda_path = "/tmp/" + csv_file
try:
with open(lambda_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in body:
del data['version']
writer.writerow(flatten(data))
with open(lambda_path) as f:
string = f.read()
encoded_string = string.encode("utf-8")
env_name = os.environ["env"]
bucket_name = env_name + "-analytics-" + platform + "-historical-ratings"
s3_path = csv_file
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string)
except IOError:
print("I/O error")
def handler(event, context):
request_data("android")
request_data("ios")
handler("","")
|
1694283
|
from qibo.models import Circuit
from qibo import gates
import numpy as np
from scipy.optimize import minimize
def ansatz(p=0):
"""Ansatz for driving a random state into its up-tp-phases canonical form.
Args:
p (float): probability of occuring a single-qubit depolarizing error
Returns:
Qibo circuit implementing the variational ansatz.
"""
C = Circuit(3, density_matrix=p > 0)
for i in range(3):
C.add(gates.RZ(i, theta=0))
C.add(gates.RY(i, theta=0))
C.add(gates.RZ(i, theta=0))
if p > 0:
C.add(gates.PauliNoiseChannel(i, px=p/3, py=p/3, pz=p/3))
for i in range(3):
if p > 0:
C.add(gates.PauliNoiseChannel(i, px=10 * p))
C.add(gates.M(i))
return C
def cost_function(theta, state, circuit, shots=1000):
"""Cost function encoding the difference between a state and its up-to-phases canonical form
Args:
theta (array): parameters of the unitary rotations.
state (cplx array): three-qubit random state.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
Returns:
float, cost function
"""
circuit.set_parameters(theta)
measurements = circuit(state, nshots=shots).frequencies(binary=False)
return (measurements[1] + measurements[2] + measurements[3]) / shots
def canonize(state, circuit, shots=1000):
"""Function to transform a given state into its up-to-phases canonical form
Args:
state (cplx array): three-qubit random state.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
Returns:
Value cost function, parameters to canonize the given state
"""
theta = np.zeros(9)
result = minimize(cost_function, theta, args=(state, circuit, shots),
method='powell')
return result.fun, result.x
def canonical_tangle(state, theta, circuit, shots=1000, post_selection=True):
"""Tangle of a canonized quantum state
Args:
state (cplx array): three-qubit random state.
theta (array): parameters of the unitary rotations.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
post_selection (bool): whether post selection is applied or not
Returns:
tangle
"""
circuit.set_parameters(theta)
result = circuit(state, nshots=shots).frequencies(binary=False)
measures = np.zeros(8)
for i, r in result.items():
measures[i] = result[i] / shots
if post_selection:
measures[1] = 0
measures[2] = 0
measures[3] = 0
measures = measures / np.sum(measures)
return 4*opt_hyperdeterminant(measures)
def hyperdeterminant(state):
"""Hyperdeterminant of any quantum state
Args:
state (cplx array): three-qubit random state
Returns:
Hyperdeterminant
"""
indices = [(1, [(0, 0, 7, 7), (1, 1, 6, 6), (2, 2, 5, 5), (3, 3, 4, 4)]),
(-2, [(0, 7, 3, 4), (0, 7, 5, 2), (0, 7, 6, 1), (3, 4, 5, 2), (3, 4, 6, 1), (5, 2, 6, 1)]),
(4, [(0, 6, 5, 3), (7, 1, 2, 4)])]
hyp = sum(coeff * sum(state[i] * state[j] * state[k] * state[l] for i, j, k, l in ids)
for coeff, ids in indices)
return hyp
def opt_hyperdeterminant(measures):
"""Hyperdeterminant of a canonized quantum state from its outcomes
Args:
measures (array): outcomes of the canonized state
Returns:
Hyperdeterminant
"""
hyp = measures[0] * measures[7]
return hyp
def create_random_state(seed):
"""Function to create a random quantum state from sees
Args:
seed (int): random seed
Returns:
Random quantum state
"""
np.random.seed(seed)
state = (np.random.rand(8) - .5) + 1j*(np.random.rand(8) - .5)
state = state / np.linalg.norm(state)
return state
def compute_random_tangle(seed):
"""Function to compute the tangle of a randomly created random quantum state from seed
Args:
seed (int): random seed
Returns:
Tangle
"""
state = create_random_state(seed)
return 4 * np.abs(hyperdeterminant(state))
|
1694284
|
from krun import ABS_TIME_FORMAT, UNKNOWN_TIME_DELTA, UNKNOWN_ABS_TIME
import datetime
class TimeEstimateFormatter(object):
def __init__(self, seconds):
"""Generates string representations of time estimates.
Args:
seconds -- estimated seconds into the future. None for unknown.
"""
self.start = datetime.datetime.now()
if seconds is not None:
self.delta = datetime.timedelta(seconds=seconds)
self.finish = self.start + self.delta
else:
self.delta = None
self.finish = None
@property
def start_str(self):
return str(self.start.strftime(ABS_TIME_FORMAT))
@property
def finish_str(self):
if self.finish is not None:
return str(self.finish.strftime(ABS_TIME_FORMAT))
else:
return UNKNOWN_ABS_TIME
@property
def delta_str(self):
if self.delta is not None:
return str(self.delta).split(".")[0]
else:
return UNKNOWN_TIME_DELTA
def now_str():
"""Just return the time now (formatted)"""
return str(datetime.datetime.now().strftime(ABS_TIME_FORMAT))
|
1694315
|
import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import sys
import os
import subprocess
import imageio
import shutil
import json
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import tqdm
import matplotlib.pyplot as plt
import matplotlib.ticker
import matplotlib.cm
import matplotlib.colors
import matplotlib.patches as patches
import matplotlib.cbook
import seaborn as sns
import itertools
from typing import NamedTuple, Tuple, List, Optional, Any, Union
import common.utils as utils
import pyrenderer
from volnet.inference import LoadedModel
from volnet.network_gradients import NetworkGradientTransformer
from losses.lossbuilder import LossBuilder
from volnet.sampling import PlasticSampler
BASE_PATH = 'volnet/results/eval_GradientNetworks1_v2'
BEST_GRID_RESOLUTION = 32
BEST_GRID_CHANNELS = 16 #32
BEST_NETWORK_LAYERS = 4 #6
BEST_NETWORK_CHANNELS = 32
BEST_ACTIVATION = "SnakeAlt:1"
BEST_FOURIER_STD = -1 # NERF
BEST_FOURIER_COUNT = 14 # to fit within 32 channels
DEFAULT_NUM_SAMPLES = "512**3"
DEFAULT_NUM_EPOCHS = 300
DEFAULT_STEPSIZE = 1 / 1024 #1 / 512
GRADIENT_WEIGHT_RANGE_MAX = -2
GRADIENT_WEIGHT_RANGE_MIN = -10
GRADIENT_WEIGHT_SCALE = 0.5
GRADIENT_WEIGHT_DEFAULT_VALUE = -6
# only use cosine similarity on gradients longer than this value
EVAL_WORLD_NUM_POINTS = 256**3 #512**3
EVAL_SCREEN_SIZE = 1024
EVAL_LENGTH_THRESHOLDS = [0.0, 0.01, 0.1, 1.0]
EVAL_LENGTH_THRESHOLDS_IDX_PLOT = 1
EVAL_SCREEN_FD_SCALES = [(1, '*1', '_x1')]
EVAL_WORLD_FD_SCALES = EVAL_SCREEN_FD_SCALES
EVAL_WORLD_AD_SCALES = [(4, '*4')]
class Config(NamedTuple):
name: str
human_name: str
settings: str
grid_size: int
overwrite_layers: Optional[int] = None
overwrite_samples: Optional[str] = None
overwrite_epochs: Optional[int] = None
synthetic: bool = False
use_in_teaser: bool = False
configX = [
Config(
name = "Blobby",
human_name = "Blobby",
settings = "config-files/implicit-Blobby.json",
grid_size = 128,
synthetic = True
),
Config(
name = "MarschnerLobb",
human_name = "Marschner~Lobb",
settings = "config-files/implicit-MarschnerLobb.json",
grid_size = 256,
synthetic = True
),
Config(
name = "Jet",
human_name = "Jet",
settings = "config-files/LuBerger-Jet-v3-shaded.json",
grid_size = 512,
use_in_teaser = True
),
Config(
name = "Ejecta1024",
human_name = "Ejecta",
settings = "config-files/ejecta1024-v7-shaded.json",
grid_size = 1024,
overwrite_samples = "1024**3",
overwrite_epochs=100
),
]
def main():
cfgs = []
for config in configX:
print("\n==========================================")
print(config.name)
print("==========================================")
train(config)
statistics_file = eval(config)
cfgs.append((config, statistics_file))
print("\n==========================================")
print("MAKE PLOTS")
print("==========================================")
make_plots(cfgs)
def _gradient_weight(index: int):
"""
Converts from the weight index in [GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX]
to the actual gradient weight in [0,1]
"""
return np.tanh(GRADIENT_WEIGHT_SCALE*index)*0.5 + 0.5
def _run_name(config: Config, gradient_weight: Optional[int]):
"""
Returns the name of the run for the given config and gradient weight index.
If the gradient weight index is None, the network is trained without gradients.
:param config:
:param gradient_weight:
:return: the run name
"""
if gradient_weight is None:
return config.name + "-NoGradient"
else:
return config.name + "-Gradient%+02d"%gradient_weight
def train(config: Config):
best_network_layers = config.overwrite_layers or BEST_NETWORK_LAYERS
training_samples = config.overwrite_samples or DEFAULT_NUM_SAMPLES
epochs = config.overwrite_epochs or DEFAULT_NUM_EPOCHS
common_args = [
sys.executable, "volnet/train_volnet.py",
config.settings,
"--train:mode", "world",
"--train:samples", training_samples,
'--rebuild_dataset', '51',
'--rebuild_importance', '0.1',
"--val:copy_and_split",
"--layers", ':'.join([str(BEST_NETWORK_CHANNELS)] * (best_network_layers - 1)), # -1 because last layer is implicit
"--train:batchsize", "64*64*128",
"--activation", BEST_ACTIVATION,
'--fouriercount', str(BEST_FOURIER_COUNT),
'--fourierstd', str(BEST_FOURIER_STD),
'--volumetric_features_channels', str(BEST_GRID_CHANNELS),
'--volumetric_features_resolution', str(BEST_GRID_RESOLUTION),
"-l1", "1",
'-lr', '0.01',
"--lr_step", "100",
"-i", str(epochs),
"--logdir", BASE_PATH + '/log',
"--modeldir", BASE_PATH + '/model',
"--hdf5dir", BASE_PATH + '/hdf5',
'--save_frequency', '20',
]
def args_no_grad():
return [
"--outputmode", "density:direct",
"--lossmode", "density",
]
def args_with_grad(weight_index: int):
return [
"--outputmode", "densitygrad:direct",
"--lossmode", "densitygrad",
"--gradient_weighting", str(_gradient_weight(weight_index)),
"--gradient_l1", "0",
"--gradient_l2", "1",
]
def run(args, filename):
args2 = args + ["--name", filename]
if os.path.exists(os.path.join(BASE_PATH, 'hdf5', filename+".hdf5")):
print("Skipping", filename)
else:
print("\n=====================================\nRun", filename)
subprocess.run(args2, check=True)
run(common_args + args_no_grad(), _run_name(config, None))
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX+1):
run(common_args + args_with_grad(i), _run_name(config, i))
class NetworkWrapperExtractDensity(nn.Module):
"""
Wraps a densitygrad-network and returns only the density
"""
def __init__(self, net: nn.Module):
super().__init__()
self._net = net
def forward(self, x, *args, **kwargs):
y = self._net(x, *args, **kwargs)
return y[...,:1]
class VolumeEvaluation(nn.Module):
def __init__(self, vol: pyrenderer.IVolumeInterpolation):
super().__init__()
self._vol = vol
def forward(self, x, *args, **kwargs):
return self._vol.evaluate(x)
class VolumeEvaluationWithGradient(nn.Module):
def __init__(self, vol: pyrenderer.IVolumeInterpolation):
super().__init__()
self._vol = vol
def forward(self, x, *args, **kwargs):
densities, gradients = self._vol.evaluate_with_gradients(x)
return torch.cat((densities, gradients), dim=-1)
def use_direction(self):
return False
def _eval_world(interp_or_net: Union[pyrenderer.VolumeInterpolationNetwork, torch.nn.Module],
dataloader: torch.utils.data.DataLoader,
network_args: Any = None,
no_gradients: bool = False,
input16bit: bool = False):
device = torch.device('cuda')
dtype32 = torch.float32
dtype16 = torch.float16
density_l1 = None
density_l2 = None
gradient_l1 = None
gradient_l2 = None
gradient_length_l1 = None
gradient_cosine_simX = [None] * len(EVAL_LENGTH_THRESHOLDS)
weights = None
time_seconds = 0
timer = pyrenderer.GPUTimer()
def append(out, v):
v = v.cpu().numpy()
if out is None: return v
return np.concatenate((out, v), axis=0)
with torch.no_grad():
#if not isinstance(interp_or_net, torch.nn.Module):
# scene_network = interp_or_net.current_network()
# old_box_min = scene_network.box_min
# old_box_size = scene_network.box_size
# scene_network.clear_gpu_resources() # so that changing the box has an effect
# scene_network.box_min = pyrenderer.float3(0, 0, 0)
# scene_network.box_size = pyrenderer.float3(1, 1, 1)
warmup = True
for locations_gt, densities_gt, gradients_gt, opacities_gt in tqdm.tqdm(dataloader):
locations_gt = locations_gt[0].to(device=device)
densities_gt = densities_gt[0].to(device=device)
gradients_gt = gradients_gt[0].to(device=device)
opacities_gt = opacities_gt[0].to(device=device)
if isinstance(interp_or_net, torch.nn.Module):
# Native Pytorch
if warmup:
if input16bit:
prediction = interp_or_net(locations_gt.to(dtype=torch.float16), *network_args)
else:
prediction = interp_or_net(locations_gt, *network_args)
warmup = False
timer.start()
if input16bit:
prediction = interp_or_net(locations_gt.to(dtype=torch.float16), *network_args)
else:
prediction = interp_or_net(locations_gt, *network_args)
timer.stop()
time_seconds += timer.elapsed_milliseconds()/1000.0
densities_pred = prediction[:,:1]
if not no_gradients:
gradients_pred = prediction[:,1:]
else:
gradients_pred = None
else:
# Custom TensorCore Implementation
if warmup:
if no_gradients:
densities_pred = interp_or_net.evaluate(locations_gt)
else:
densities_pred, gradients_pred = interp_or_net.evaluate_with_gradients(locations_gt)
warmup = False
timer.start()
if no_gradients:
densities_pred = interp_or_net.evaluate(locations_gt)
gradients_pred = None
else:
densities_pred, gradients_pred = interp_or_net.evaluate_with_gradients(locations_gt)
timer.stop()
time_seconds += timer.elapsed_milliseconds() / 1000.0
density_l1 = append(density_l1, torch.abs(densities_gt-densities_pred)[:,0])
density_l2 = append(density_l2, F.mse_loss(densities_gt, densities_pred, reduction='none')[:,0])
if not no_gradients:
weights = append(weights, opacities_gt[:,0])
gradient_l1 = append(gradient_l1,
torch.mean(torch.abs(gradients_gt - gradients_pred), dim=1))
gradient_l2 = append(gradient_l2,
torch.mean(F.mse_loss(gradients_gt, gradients_pred, reduction='none'), dim=1))
len_gt = torch.linalg.norm(gradients_gt, dim=1, keepdim=True)
len_pred = torch.linalg.norm(gradients_pred, dim=1, keepdim=True)
gradient_length_l1 = append(gradient_length_l1, torch.abs(len_gt - len_pred)[:,0])
len_gt = torch.clip(len_gt, min=1e-5)
len_pred = torch.clip(len_pred, min=1e-5)
N = gradients_gt.shape[0]
cosine_sim = torch.bmm((gradients_gt / len_gt).reshape(N, 1, 3),
(gradients_pred / len_pred).reshape(N, 3, 1))
cosine_sim = cosine_sim[:,0,0]
len_gt = len_gt[:,0]
for i in range(len(EVAL_LENGTH_THRESHOLDS)):
length_mask = len_gt >= EVAL_LENGTH_THRESHOLDS[i]
cosine_sim_filtered = torch.masked_select(cosine_sim, length_mask)
gradient_cosine_simX[i] = append(gradient_cosine_simX[i], cosine_sim_filtered)
#if not isinstance(interp_or_net, torch.nn.Module):
# scene_network = interp_or_net.current_network()
# scene_network.box_min = old_box_min
# scene_network.box_size = old_box_size
# scene_network.clear_gpu_resources() # for reset
def extract_stat(v, weights=None):
# create histogram
frequencies, bin_edges = np.histogram(v, bins=50, weights=weights)
# create boxplot stats
bxpstats = matplotlib.cbook.boxplot_stats(v)
for d in bxpstats:
d['fliers'] = list() # delete fliers (too big)
# fill dictionary
avg = np.average(v, weights=weights)
if weights is None:
std = np.std(v)
else:
std = np.sqrt(np.average((v-avg)**2, weights=weights))
return {
'min': float(np.min(v)),
'max': float(np.max(v)),
'mean': float(avg),
'median': float(np.median(v)),
'std': float(std),
'histogram': {"frequencies": list(map(int, frequencies)), "bin_edges": list(map(float, bin_edges))},
'bxpstats': bxpstats
}
if no_gradients:
return {
'density_l1': extract_stat(density_l1),
'density_l2': extract_stat(density_l2),
'total_time_seconds': float(time_seconds)
}
else:
return {
'density_l1': extract_stat(density_l1),
'density_l2': extract_stat(density_l2),
'gradient_l1': extract_stat(gradient_l1),
'gradient_l2': extract_stat(gradient_l2),
'length_l1': extract_stat(gradient_length_l1),
'cosine_similarity': [
{'threshold': EVAL_LENGTH_THRESHOLDS[i], 'data': extract_stat(gradient_cosine_simX[i])}
for i in range(len(EVAL_LENGTH_THRESHOLDS))
],
'gradient_l1_weighted': extract_stat(gradient_l1, weights=weights),
'gradient_l2_weighted': extract_stat(gradient_l2, weights=weights),
'length_l1_weighted': extract_stat(gradient_length_l1, weights=weights),
'cosine_similarity_weighted': [
{'threshold': 0.0, 'data': extract_stat(gradient_cosine_simX[0], weights=weights)}
],
'total_time_seconds': float(time_seconds)
}
def eval(config: Config):
"""
Evaluates the networks in world- and screen-space
:param config:
:return:
"""
print("Evaluate")
statistics_file = os.path.join(BASE_PATH, 'stats-%s.json' % config.name)
if os.path.exists(statistics_file):
print("Statistics file already exists!")
return statistics_file
timer = pyrenderer.GPUTimer()
device = torch.device('cuda')
dtype = torch.float32
#world
num_points = EVAL_WORLD_NUM_POINTS #256**3 #512**3
batch_size = min(EVAL_WORLD_NUM_POINTS, 128**3)
num_batches = num_points // batch_size
#screen
width = EVAL_SCREEN_SIZE
height = EVAL_SCREEN_SIZE
stepsize = DEFAULT_STEPSIZE
ssim_loss = LossBuilder(device).ssim_loss(4)
lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)
grid_encoding = pyrenderer.SceneNetwork.LatentGrid.ByteLinear #.Float
rendering_mode = LoadedModel.EvaluationMode.TENSORCORES_MIXED
output_stats = {
"name": config.name,
"settings": config.settings,
}
# Load networks
torch.cuda.empty_cache()
def load_and_save(i: Optional[int]):
filename = _run_name(config, i)
filename = os.path.abspath(os.path.join(BASE_PATH, 'hdf5', filename+".hdf5"))
if not os.path.exists(filename):
print("File not found:", filename, file=sys.stderr)
raise ValueError("File not found: "+filename)
try:
ln = LoadedModel(filename, force_config_file=config.settings,
grid_encoding=grid_encoding)
volnet_filename = filename.replace('.hdf5', '.volnet')
ln.save_compiled_network(volnet_filename)
volnet_filesize = os.path.getsize(volnet_filename)
return ln, filename, volnet_filesize
except Exception as e:
print("Unable to load '%s':" % filename, e)
raise ValueError("Unable to load '%s': %s" % (filename, e))
lns = dict()
lns['nograd'] = load_and_save(None)
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
lns[i] = load_and_save(i)
base_ln: LoadedModel = lns['nograd'][0]
network_fd = NetworkGradientTransformer.finite_differences(
base_ln.get_network_pytorch()[0], 1/config.grid_size)
network_ad = NetworkGradientTransformer.autodiff(
base_ln.get_network_pytorch()[0])
# EVALUATE SCREEN
print("-- EVALUATE SCREEN SPACE --")
image_folder = os.path.join(BASE_PATH, "images")
os.makedirs(image_folder, exist_ok=True)
camera = base_ln.get_default_camera()
reference_image = base_ln.render_reference(
camera, width, height, timer=None, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Color) # warmup
base_ln.render_reference(
camera, width, height, timer=timer, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Color) # timing
reference_feature = base_ln.get_image_evaluator().volume.volume().get_feature(0)
channels = reference_feature.channels()
resolution = reference_feature.base_resolution()
bytes_per_voxel = pyrenderer.Volume.bytes_per_type(reference_feature.type())
reference_volume_size = bytes_per_voxel * channels * \
resolution.x * resolution.y * resolution.z
output_stats['reference_volume_size'] = reference_volume_size
screen_time_reference = timer.elapsed_milliseconds()/1000.0
imageio.imwrite(
os.path.join(image_folder, '%s-color-reference.png' % config.name),
LoadedModel.convert_image(reference_image))
reference_normal_image = base_ln.render_reference(
camera, width, height, timer=None, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Normal)
imageio.imwrite(
os.path.join(image_folder, '%s-normal-reference.png' % config.name),
LoadedModel.convert_image(reference_normal_image))
output_stats_screen = {}
def _eval_screen(ln, name, mode, override_network=None):
with torch.no_grad():
ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=None,
channel=pyrenderer.IImageEvaluator.Color) # warmup
current_image = ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=timer,
channel=pyrenderer.IImageEvaluator.Color) # actual rendering
imgname = os.path.join(image_folder, '%s-color-%s.png' % (config.name, name))
imageio.imwrite(
imgname,
LoadedModel.convert_image(current_image))
# normal image
normal_image = ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=None,
channel=pyrenderer.IImageEvaluator.Normal)
normal_imgname = os.path.join(image_folder, '%s-normal-%s.png' % (config.name, name))
imageio.imwrite(
normal_imgname,
LoadedModel.convert_image(normal_image))
# return stats
return {
"time_seconds": timer.elapsed_milliseconds()/1000.0,
"ssim-color": ssim_loss(current_image, reference_image).item(),
"lpips-color": lpips_loss(current_image, reference_image).item(),
"ssim-normal": ssim_loss(normal_image, reference_normal_image).item(),
"lpips-normal": lpips_loss(normal_image, reference_normal_image).item(),
'color_image_path': imgname,
'normal_image_path': normal_imgname
}
# baseline methods
print("Evaluate baselines")
volume_interp_network = base_ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.FINITE_DIFFERENCES
for scale, name, imgname in EVAL_SCREEN_FD_SCALES:
print("evaluate FD with scale", scale)
volume_interp_network.finite_differences_stepsize = 1 / (scale * config.grid_size)
output_stats_screen['FD%s'%name] = _eval_screen(
base_ln, 'FD%s'%imgname, LoadedModel.EvaluationMode.TENSORCORES_MIXED)
print("evaluate AD")
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
output_stats_screen['AD'] = _eval_screen(
base_ln, 'AD', LoadedModel.EvaluationMode.TENSORCORES_MIXED)
# no-grad network
print("evaluate no-grad network")
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_screen['nograd'] = _eval_screen(
base_ln, 'nograd', LoadedModel.EvaluationMode.TENSORCORES_MIXED)
output_stats_screen['nograd']['compressed_size'] = lns['nograd'][2]
output_stats_screen['nograd']['compression'] = \
reference_volume_size / lns['nograd'][2]
# densitygrad networks
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
print("evaluate network", i)
ln: LoadedModel = lns[i][0]
volume_interp_network = ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_screen['network%+02d' % i] = _eval_screen(
ln, 'network%+02d'%i, LoadedModel.EvaluationMode.TENSORCORES_MIXED)
output_stats_screen['network%+02d' % i]['compressed_size'] = lns[i][2]
output_stats_screen['network%+02d' % i]['compression'] = \
reference_volume_size / lns[i][2]
output_stats_screen['reference'] = {'time_seconds': screen_time_reference}
output_stats['screen'] = output_stats_screen
torch.cuda.empty_cache()
# EVALUATE WORLD
print("-- EVALUATE WORLD SPACE --")
# create dataset
dataset = []
volume_interpolation = base_ln.get_image_evaluator().volume
ray_evaluator = base_ln.get_image_evaluator().ray_evaluator
min_density = ray_evaluator.min_density
max_density = ray_evaluator.max_density
tf_evaluator = ray_evaluator.tf
sampler = PlasticSampler(3)
for i in tqdm.trange(num_batches):
indices = np.arange(i*batch_size, (i+1)*batch_size, dtype=np.int32)
locations = sampler.sample(indices).astype(np.float32)
locations_gpu = torch.from_numpy(locations).to(device=device)
densities, gradients = volume_interpolation.evaluate_with_gradients(locations_gpu)
colors = tf_evaluator.evaluate(
densities, min_density, max_density, gradients=gradients)
opacities = colors[:, 3:4]
dataset.append((
locations,
torch.clamp(densities, 0.0, 1.0).cpu().numpy(),
gradients.cpu().numpy(),
opacities.cpu().numpy()))
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False)
tf_index = torch.full((batch_size,), 0, dtype=torch.int32, device=device)
time_index = torch.full((batch_size,), 0, dtype=torch.float32, device=device)
ensemble_index = torch.full((batch_size,), 0, dtype=torch.float32, device=device)
network_args = [tf_index, time_index, ensemble_index, 'screen']
output_stats_world = {}
# no-gradient for performance
print("No gradients for performance")
output_stats_world['Forward-PyTorch32'] = _eval_world(
base_ln.get_network_pytorch()[0], dataloader, network_args, no_gradients=True)
output_stats_world['Forward-PyTorch16'] = _eval_world(
base_ln.get_network_pytorch()[1], dataloader, network_args, no_gradients=True, input16bit=True)
volume_interp_network = base_ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_world['Forward-TensorCores-NoSaving'] = _eval_world(
volume_interp_network, dataloader, no_gradients=True)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
output_stats_world['Forward-TensorCores-WithSaving'] = _eval_world(
volume_interp_network, dataloader, no_gradients=True)
# baseline methods
print("Evaluate baselines")
output_stats_world['FD-PyTorch'] = _eval_world(
network_fd, dataloader, network_args)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.FINITE_DIFFERENCES
for scale, name, _ in EVAL_WORLD_FD_SCALES:
volume_interp_network.finite_differences_stepsize = 1 / (scale * config.grid_size)
output_stats_world['FD-TensorCores%s'%name] = _eval_world(
volume_interp_network, dataloader)
output_stats_world['AD-PyTorch'] = _eval_world(
network_ad, dataloader, network_args)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
for scale, name in EVAL_WORLD_AD_SCALES:
volume_interp_network.adjoint_latent_grid_central_differences_stepsize_scale = scale
output_stats_world['AD-TensorCores%s'%name] = _eval_world(
volume_interp_network, dataloader)
# densitygrad networks
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
print("evaluate network", i)
ln: LoadedModel = lns[i][0]
volume_interp_network = ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_world['network%+02d'%i] = _eval_world(
volume_interp_network, dataloader)
output_stats['world'] = output_stats_world
torch.cuda.empty_cache()
# save statistics
print("\n===================================== Done, save statistics")
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, (np.float32, np.float64)):
return float(obj)
return json.JSONEncoder.default(self, obj)
with open(statistics_file, "w") as f:
json.dump(output_stats, f, cls=NumpyEncoder)
return statistics_file
def make_plots(cfgs: List[Tuple[Config, str]]):
#load stats
cfgs2 = []
for row, (cfg, statfile) in enumerate(cfgs):
with open(statfile, "r") as f:
stats = json.load(f)
cfgs2.append((cfg, stats))
#_make_adjoint_table(cfgs2)
#_make_fd_table(cfgs2)
_make_performance_table(cfgs2)
_make_synthetic_error_plots(cfgs2)
_make_big_error_table(cfgs2)
_make_teaser(cfgs2)
def _make_adjoint_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the latent grid derivative in world space
"""
def format_stat(stat, key):
s = stat['world'][key]['gradient_l1']['mean']
return "%.3e"%s, s
print("Write adjoint table table")
with open(os.path.join(BASE_PATH, "AdjointTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|c%s@{}}\n"%("c"*len(EVAL_WORLD_AD_SCALES)))
f.write("\\toprule\n")
f.write(" & \\multicolumn{%d}{c}{AD Grid Stepsize - Mean Gradient L1}\\\\\n"%(1+len(EVAL_WORLD_AD_SCALES)))
f.write("Dataset & Torch & %s\\\\\n" % " & ".join([name for scale,name in EVAL_WORLD_AD_SCALES]))
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
f.write(" & ")
f.write(format_stat(stats, 'AD-PyTorch')[0])
best_stat_index = np.argmin([format_stat(stats, 'AD-TensorCores%s'%name)[1] for scale,name in EVAL_WORLD_AD_SCALES])
for i,(scale,name) in enumerate(EVAL_WORLD_AD_SCALES):
f.write(" & ")
s,v = format_stat(stats, 'AD-TensorCores%s'%name)
if i==best_stat_index:
f.write("\\textbf{"+s+"}")
else:
f.write(s)
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
def _make_fd_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the finite differences in screenspace
"""
def format_stat(stat, key):
s = stat['world'][key]['gradient_l1']['mean']
return "%.3e"%s, s
def format_scale(scale):
if scale < 1:
return "%d/R"%int(1/scale)
elif scale==1:
return "1/R"
else:
return "1/%dR"%int(scale)
print("Write finite difference table")
with open(os.path.join(BASE_PATH, "FiniteDifferenceTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|%s@{}}\n"%("c"*len(EVAL_WORLD_AD_SCALES)))
f.write("\\toprule\n")
f.write(" & \\multicolumn{%d}{c}{FD Stepsize - Mean Gradient L1}\\\\\n"%(len(EVAL_WORLD_FD_SCALES)))
f.write("Dataset & %s\\\\\n" % " & ".join([format_scale(scale) for scale,name,_ in EVAL_WORLD_FD_SCALES]))
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
best_stat_index = np.argmin([format_stat(stats, 'FD-TensorCores%s'%name)[1] for scale,name,_ in EVAL_WORLD_FD_SCALES])
for i,(scale,name,_) in enumerate(EVAL_WORLD_FD_SCALES):
f.write(" & ")
s,v = format_stat(stats, 'FD-TensorCores%s'%name)
if i==best_stat_index:
f.write("\\textbf{"+s+"}")
else:
f.write(s)
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
def _make_performance_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the latent grid derivative in world space
"""
def format_stat(stat, key1, key2, base=None):
s = stat[key1][key2]
if 'total_time_seconds' in s:
s = s['total_time_seconds']
else:
s = s['time_seconds']
if base is None:
return "$%.3f$"%s, s
else:
return "$%.3f$ ($\\times %.2f$)"%(s,s/base), s
print("Write performance table")
with open(os.path.join(BASE_PATH, "PerformanceTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|ccc}\n")
f.write("\\toprule\n")
f.write(" & \\multicolumn{3}{c}{Time in seconds for an image of $%d^2$ pixels}\\\\\n" % (EVAL_SCREEN_SIZE))
f.write("Dataset & Direct & FD & Adjoint\\\\\n")
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
s, base = format_stat(stats, 'screen', 'network-7')
f.write(" & " + s)
f.write(" & " + format_stat(stats, 'screen', 'FD*1', base)[0])
f.write(" & " + format_stat(stats, 'screen', 'AD', base)[0])
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}%\n\\\\%\n")
f.write("\\begin{tabular}{@{}ccccc}\n")
f.write("\\toprule\n")
f.write("\\multicolumn{5}{c|}{Time in seconds for $2^{%d}$ points}\\\\\n"%(np.log2(EVAL_WORLD_NUM_POINTS)))
f.write("Forward & Forward w/ saving & Direct & FD & Adjoint\\\\\n")
f.write("\\midrule\n")
for cfg, stats in cfgs:
s, base = format_stat(stats, 'world', 'Forward-TensorCores-NoSaving')
f.write(s)
f.write(" & " + format_stat(stats, 'world', 'Forward-TensorCores-WithSaving', base)[0])
f.write(" & " + format_stat(stats, 'world', 'network-7', base)[0])
f.write(" & " + format_stat(stats, 'world', 'FD-TensorCores*1', base)[0])
f.write(" & " + format_stat(stats, 'world', 'AD-TensorCores*4', base)[0])
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}%\n")
def _make_synthetic_error_plots(cfgs: List[Tuple[Config, dict]]):
print("Write small statistics for synthetic tests")
PLOT = "boxplot" # "errorbar", "violinplot", "boxplot"
YAXIS = "linear" # log, linear
cfgs_filtered = list(filter(lambda x: x[0].synthetic, cfgs))
num_classes = len(cfgs_filtered)
cm = matplotlib.cm.get_cmap('viridis')
class_colors = [
cm(f) for f in np.linspace(0, 1, num_classes)
]
X = XticksMajor = np.array([0, 1, 2])
Xclass = 4
Xall = np.concatenate([X + Xclass*i for i in range(num_classes)])
Xlabels = ["FD", "Adjoint", "Direct"]
XlabelsAll = np.concatenate([
["FD\n ", f"Adjoint\n$\\bf{{{cfg.human_name}}}$", "Direct\n "]
for cfg,s in cfgs_filtered])
violin_width = 0.8
violin_alpha = 0.5
marker_size = 8
def errorbar(ax, x, s, color):
y = np.array([s['median']]) if PLOT == 'boxplot' else np.array([s['mean']])
if PLOT == "errorbar":
yerr = np.array([s['std']])
ax.errorbar([x], y, yerr=yerr, elinewidth=0.5*violin_width, color='black')
ax.plot([x], y, color=color, marker='o', markersize=marker_size)
elif PLOT == "violinplot":
# simulate data
frequencies = np.array(s['histogram']['frequencies'])
bin_edges = np.array(s['histogram']['bin_edges'])
MAX_POINTS = 10000
current_points = np.sum(frequencies, dtype=np.int64)
frequencies = (frequencies * (MAX_POINTS / current_points)).astype(np.int32)
x1 = np.random.uniform(np.repeat(bin_edges[:-1], frequencies), np.repeat(bin_edges[1:], frequencies))
# plot
parts = ax.violinplot([x1], positions=[x], widths=violin_width,
showmeans=False, showmedians=False, showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('black')
pc.set_alpha(violin_alpha)
# show mean
ax.plot([x], y, color=color, marker='o', markersize=marker_size)
elif PLOT == 'boxplot':
bxpstats = s['bxpstats']
ax.bxp(bxpstats, positions=[x], widths=violin_width, showfliers=False)
#ax.boxplot([x1], positions=[x], widths=violin_width)
# annotate
if PLOT != "boxplot":
ax.annotate("%.4f"%y, (x, y),
xytext=(0, 4),
textcoords='offset points',
ha='center', va='bottom')
def plot(ax: plt.Axes, stat, lossX, color, offX):
if not isinstance(lossX, (list, tuple)):
lossX = [lossX]
def get_loss(key):
s = stat['world'][key]
for l in lossX:
s = s[l]
return s
s = get_loss('FD-TensorCores*1')
errorbar(ax, offX + X[0], s, color=color)
s = get_loss('AD-TensorCores*4')
errorbar(ax, offX + X[1], s, color=color)
s = get_loss('network%+d'%GRADIENT_WEIGHT_DEFAULT_VALUE)
errorbar(ax, offX + X[2], s, color=color)
fig, axes = plt.subplots(nrows=1, ncols=2, squeeze=True, figsize=(9, 2.5))
for dset, (cfg, stats) in enumerate(cfgs_filtered):
plot(axes[0], stats, 'length_l1', class_colors[dset], dset*Xclass)
if YAXIS=='log':
axes[0].set_yscale("symlog", linthresh=0.2)
axes[0].set_xticks(Xall)
axes[0].set_xticklabels(XlabelsAll)
axes[0].set_title("Gradient Magnitude Error $\downarrow$")
for dset, (cfg, stats) in enumerate(cfgs_filtered):
plot(axes[1], stats, ['cosine_similarity', 0, 'data'], class_colors[dset], dset*Xclass)
if YAXIS=='log':
zero_threshold = 1e-2
max_y = 1.01
axes[1].set_ylim(0.0, max_y) #(-1.5, max_y)
axes[1].set_yscale("functionlog", functions=[
lambda x: np.maximum(zero_threshold, max_y - x),
lambda y: np.where(y > zero_threshold, max_y - y, max_y - zero_threshold)
])
axes[1].set_yticks(list(np.arange(10) * 0.1) + list(np.arange(10) * 0.01 + 0.9), minor=True)
axes[1].set_yticks([0, 0.5, 0.9, 1], minor=False) #([-1, -0.5, 0, 0.5, 0.9, 1], minor=False)
axes[1].set_yticklabels(["0", "0.5", "0.9", "1"]) #(["-1", "-0.5", "0", "0.5", "0.9", "1"])
axes[1].set_yticklabels([], minor=True)
else:
axes[1].invert_yaxis()
axes[1].set_xticks(Xall)
axes[1].set_xticklabels(XlabelsAll)
axes[1].set_title("Gradient Cosine Similarity $\downarrow$")
fig.tight_layout()
output_filename = os.path.join(BASE_PATH, 'GradientsAnalyticDatasets.pdf')
fig.savefig(output_filename, bbox_inches='tight')
print("Done, saved to", output_filename)
# copy files
OUT_PATH = os.path.join(BASE_PATH, "images-out")
os.makedirs(OUT_PATH, exist_ok=True)
IMAGE_KEYS = [
"reference", "FD_x1", "AD",
"network%+d"%GRADIENT_WEIGHT_DEFAULT_VALUE
]
IMAGE_NAMES = [
"Ref.", "FD", "Adjoint", "Direct"
]
STAT_KEYS = [
None,
'FD*1',
'AD',
'network%+d' % GRADIENT_WEIGHT_DEFAULT_VALUE
]
for cfg, stats in cfgs_filtered:
for k in IMAGE_KEYS:
filename = "%s-color-%s.png"%(cfg.name, k)
in_path = os.path.join(BASE_PATH, "images", filename)
out_path = os.path.join(OUT_PATH, filename)
shutil.copy2(in_path, out_path)
# make table
LATEX_IMAGE_PREFIX = "figures/analytic/" #"images-out/"
LATEX_IMAGE_SIZE = "%.3f\\linewidth"%(0.9/len(IMAGE_KEYS))
with open(os.path.join(BASE_PATH, "GradientsAnalyticDatasetsImages.tex"), "w") as f:
f.write("""
\\setlength{\\tabcolsep}{2pt}%
\\renewcommand{\\arraystretch}{0.4}%
""")
f.write("\\begin{tabular}{%s}%%\n" % ("rl" * len(IMAGE_KEYS)))
for row, (cfg, stats) in enumerate(cfgs_filtered):
if row>0: f.write("\\\\%\n")
# Images
for col, k in enumerate(IMAGE_KEYS):
filename = "%s-color-%s_lens.png" % (cfg.name, k)
if col>0: f.write("&%\n")
f.write("\\multicolumn{2}{c}{\\includegraphics[width=%s]{%s}}" % (
LATEX_IMAGE_SIZE, LATEX_IMAGE_PREFIX+filename))
# stats
for i, (stat, fmt) in enumerate([
('ssim-color', "SSIM: %.3f"),
('lpips-color', "LPIPS: %.3f")]):
f.write("\\\\%\n")
for col, (k,n,sk) in enumerate(zip(IMAGE_KEYS, IMAGE_NAMES,STAT_KEYS)):
if col > 0: f.write("&%\n")
if i==0:
f.write("\multirow{2}{*}{%s}%%\n"%n)
if sk is None:
f.write("&~%\n")
else:
f.write("&{\\tiny " + (fmt % stats['screen'][sk][stat]) + "}%\n")
f.write("\\end{tabular}%\n")
print("Latex file written")
def _make_big_error_table(cfgs: List[Tuple[Config, dict]]):
print("Write big error table")
plot_type = 'violin' # 'errorbar', 'plot', 'violin'
scale_x = 'linear' #'only_one' # 'linear' or 'like_weights'
if scale_x == 'only_one':
weight_indices = [GRADIENT_WEIGHT_DEFAULT_VALUE]
else:
weight_indices = list(range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX+1))
weight_values = [_gradient_weight(i) for i in weight_indices]
if scale_x == 'like_weights':
XoffWeights = 0.2
X = np.array([0, 0.1] + [XoffWeights + w for w in weight_values])
XticksMajor = [0, 0.1] + [XoffWeights + w for w in weight_values[::5]]
XticksMinor = [XoffWeights + w for w in weight_values]
Xlabels = ["FD", "AD"] + ["%.2f"%w for w in weight_values[::5]]
elif scale_x == 'linear':
#assert GRADIENT_WEIGHT_RANGE_MAX == -4, "GRADIENT_WEIGHT_RANGE_MAX changed, also change plot x indexing"
#assert GRADIENT_WEIGHT_RANGE_MIN == -8, "GRADIENT_WEIGHT_RANGE_MIN changed, also change plot x indexing"
range_weight_values = list(range(len(weight_values)))
X = np.array([0, 1.5] + [3 + i for i in range_weight_values])
XticksMajor = X
XticksMinor = X
Xlabels = ["FD", "AD"] + ["%.4f" % w for w in weight_values]
else: # only one example for gradient weights
assert len(weight_indices)==1
X = XticksMajor = np.array([0, 1, 2])
XticksMinor = []
Xlabels = ["FD", "AD", "ours"]
violin_width = 0.6
violin_alpha = 0.4
marker_size = 8
def errorbar(ax, x, sx, color, clip=False, plot_type=plot_type):
y = np.array([s['mean'] for s in sx])
yerr = np.array([s['std'] for s in sx])
if plot_type == 'violin':
for i, s in enumerate(sx):
# simulate data
frequencies = np.array(s['histogram']['frequencies'])
bin_edges = np.array(s['histogram']['bin_edges'])
MAX_POINTS = 10000
current_points = np.sum(frequencies, dtype=np.int64)
frequencies = (frequencies * (MAX_POINTS / current_points)).astype(np.int32)
x1 = np.random.uniform(np.repeat(bin_edges[:-1], frequencies), np.repeat(bin_edges[1:], frequencies))
# plot
parts = ax.violinplot([x1], positions=x[i:i+1], widths=violin_width,
showmeans=False, showmedians=False, showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('black')
pc.set_alpha(violin_alpha)
# show mean
ax.plot(x, y, color=color, marker='o', markersize=marker_size)
elif plot_type == 'errorbar':
if clip:
# clip error to avoid negative numbers
yerr2 = np.copy(yerr)
yerr2[yerr >= y] = y[yerr >= y] * .999999
yerr = yerr2
ax.errorbar(x, y, yerr=yerr, color=color, marker='o', markersize=marker_size)
elif plot_type == 'plot':
ax.plot(x, y, color=color, marker='o', markersize=marker_size)
else:
raise ValueError("Unknown plot type: " + plot_type)
#fig, axes = plt.subplots(len(cfgs), 7, squeeze=False, sharey='col', figsize=(7*5, 4*len(cfgs)))
fig, axes = plt.subplots(len(cfgs), 7, squeeze=False, figsize=(7 * 5, 4 * len(cfgs)))
for row, (cfg, stats) in enumerate(cfgs):
ax0 = axes[row, 0]
ax5 = axes[row, 1]
ax6 = axes[row, 2]
ax1 = axes[row, 3]
ax2 = axes[row, 4] # ax2 = ax1.twinx()
ax3 = axes[row, 5] # ax3 = ax1.twinx()
ax4 = axes[row, 6]
ax0.set_ylabel(cfg.name, fontsize='xx-large')
if row==0:
ax0.set_title("Reference Rendering")
ax5.set_title("Adjoint Method")
ax6.set_title("Best Direct Prediction")
ax1.set_title("Density L1 $\downarrow$")
ax2.set_title("Gradient Length L1 $\downarrow$")
ax3.set_title("Gradient Cosine Similarity $\downarrow$")
ax4.set_title("Image LPIPS $\downarrow$")
if row==len(cfgs)-1:
ax1.set_xlabel("network gradient weight")
ax2.set_xlabel("network gradient weight")
ax3.set_xlabel("network gradient weight")
ax4.set_xlabel("network gradient weight")
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-reference.png"%cfg.name))
ax0.imshow(img)
ax0.get_xaxis().set_visible(False)
plt.setp(ax0.get_yticklabels(), visible=False)
ax0.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax0.spines[spine].set_visible(False)
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-AD.png" % cfg.name))
ax5.imshow(img)
ax5.get_xaxis().set_visible(False)
plt.setp(ax5.get_yticklabels(), visible=False)
ax5.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax5.spines[spine].set_visible(False)
best_lpips_index = np.argmin([stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices])
best_lpips_index = weight_indices[best_lpips_index]
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-network%+d.png" % (cfg.name, best_lpips_index)))
ax6.imshow(img)
ax6.get_xaxis().set_visible(False)
plt.setp(ax6.get_yticklabels(), visible=False)
ax6.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax6.spines[spine].set_visible(False)
cm = matplotlib.cm.get_cmap('viridis')
color1 = cm(0)
color2 = cm(0.33)
color3 = cm(0.66)
color4 = cm(0.99)
def plot(ax: plt.Axes, stat, lossX, color, offx, clip=False, plot_type=plot_type):
if not isinstance(lossX, (list, tuple)):
lossX = [lossX]
def get_loss(key):
s = stat['world'][key]
for l in lossX:
s = s[l]
return s
s = get_loss('FD-TensorCores*1')
errorbar(ax, [X[0]+offx], [s], color=color, clip=clip, plot_type=plot_type)
s = get_loss('AD-TensorCores*4')
errorbar(ax, [X[1]+offx], [s], color=color, clip=clip, plot_type=plot_type)
sx = []
for i in weight_indices:
sx.append(get_loss('network%+d'%i))
errorbar(ax, X[2:]+offx, sx, color=color, clip=clip, plot_type=plot_type)
return color
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xticks(XticksMajor, minor=False)
ax.set_xticks(XticksMinor, minor=True)
ax.set_xticklabels(Xlabels, minor=False)
#ax1.set_ylabel("density L1")
plot(ax1, stats, 'density_l1', color1, 0)
ax1.yaxis.label.set_color(color1)
ax1.set_yscale("symlog", linthresh=0.1)
#ax2.set_ylabel("gradient length L1")
plot(ax2, stats, 'length_l1_weighted', color2, 0)
ax2.set_yscale("symlog", linthresh=0.2)
ax2.yaxis.label.set_color(color2)
#ax3.set_ylabel("gradient cosine sim. $\epsilon=%.2f$"%
# EVAL_LENGTH_THRESHOLDS[EVAL_LENGTH_THRESHOLDS_IDX_PLOT])
#plot(ax3, stats, ['cosine_similarity', EVAL_LENGTH_THRESHOLDS_IDX_PLOT, 'data'], color3, 0)
plot(ax3, stats, ['cosine_similarity_weighted', 0, 'data'], color3, 0)
#ax3.invert_yaxis()
ax3.yaxis.label.set_color(color3)
#ax3.spines['right'].set_position(('outward', 60))
zero_threshold = 1e-2
max_y = 1.01
ax3.set_ylim(-1.5, max_y)
ax3.set_yscale("functionlog", functions=[
lambda x: np.maximum(zero_threshold, max_y-x),
lambda y: np.where(y>zero_threshold, max_y-y, max_y-zero_threshold)
])
ax3.set_yticks(list(np.arange(10)*0.1) + list(np.arange(10)*0.01+0.9), minor=True)
ax3.set_yticks([-1, -0.5, 0, 0.5, 0.9, 1], minor=False)
ax3.set_yticklabels(["-1", "-0.5", "0", "0.5", "0.9", "1"])
ax3.set_yticklabels([], minor=True)
ax4.plot([X[0]], [stats['screen']['FD*1']['lpips-color']], color=color4, marker='o', markersize=marker_size)
ax4.plot([X[1]], [stats['screen']['AD']['lpips-color']], color=color4, marker='o', markersize=marker_size)
y4 = [stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices]
ax4.plot(X[2:], y4, color=color4, marker='o', markersize=marker_size)
ax4.set_yscale('log')
fig.tight_layout()
output_filename = os.path.join(BASE_PATH, 'GradientNetworks.pdf')
fig.savefig(output_filename, bbox_inches='tight')
#plt.show()
print("Done, saved to", output_filename)
def _make_teaser(cfgs: List[Tuple[Config, dict]]):
print("Write teaser")
IMAGE_FOLDER = os.path.join(BASE_PATH, "Teaser")
LATEX_IMAGE_SIZE = "height=3.5cm"
HEATMAP_SIZE = "width=7cm"
COLUMNS = 2 # number of columns of datasets
# filter for teaser datasets
cfgs_filtered = list(filter(lambda x: x[0].use_in_teaser, cfgs))
num_dsets = len(cfgs_filtered)
assert num_dsets%COLUMNS==0
# find best network for each dataset based on LPIPS score
best_index = [None] * num_dsets
best_index_raw = [None] * num_dsets
weight_indices = list(range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1))
weight_indices_names = ["$w$="+str(w) for w in weight_indices]
for i in range(num_dsets):
cfg, stats = cfgs_filtered[i]
best_lpips_index = np.argmin([stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices])
best_index[i] = weight_indices[best_lpips_index]
best_index_raw[i] = best_lpips_index
print(f"Dataset {cfg.name}, best weight index: {best_index[i]}, default: {GRADIENT_WEIGHT_DEFAULT_VALUE}")
# write LaTeX and Images
os.makedirs(IMAGE_FOLDER, exist_ok=True)
with open(os.path.join(IMAGE_FOLDER, "GradientTeaser-v1.tex"), "w") as f:
f.write("""
\\documentclass[10pt,a4paper]{standalone}
\\usepackage{graphicx}
\\usepackage{multirow}
\\begin{document}
\\newcommand{\\timesize}{0.2}%
\\setlength{\\tabcolsep}{1pt}%
\\renewcommand{\\arraystretch}{0.4}%
""")
f.write("\\begin{tabular}{%s}%%\n" % ("rl" * (4*COLUMNS)))
# header
NAMES = ["a) Reference", "b) Finite Differences", "c) Adjoint", "d) Direct"]
NAMES = [v for i in range(COLUMNS) for v in NAMES]
for i, n in enumerate(NAMES):
if i > 0: f.write(" & ")
f.write("\\multicolumn{2}{c}{%s}" % n)
# statistic declaration
STATS = [
# key, name, value-lambda
('time_seconds', 'Rendering:',
lambda v: ("%.3fs" % v) if v < 40 else ("%dm %02ds" % (int(v / 60), int(v) % 60))),
('ssim-color', 'SSIM {\\tiny $\\uparrow$}:', lambda v: "%.3f" % v),
('lpips-color', 'LPIPS {\\tiny $\\downarrow$}:', lambda v: "%.3f" % v)
]
# each dataset gets its own row
for row in range(num_dsets//COLUMNS):
name_cols = [
cfgs_filtered[r][0].name for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
stats_cols = [
cfgs_filtered[r][1] for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
best_lpips_index_cols = [
best_index[r] for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
f.write("\\\\%\n")
# image + stat names
IMAGE_NAMES_cols = [[
"%s-color-reference" % name_cols[c],
"%s-color-FD_x1" % name_cols[c],
"%s-color-AD" % name_cols[c],
"%s-color-network%+d" % (name_cols[c], best_lpips_index_cols[c]),
# extra names, needed later for the detailed stats
"%s-color-network%+d" % (name_cols[c], GRADIENT_WEIGHT_DEFAULT_VALUE),
"%s-normal-reference" % name_cols[c],
"%s-normal-network%+d" % (name_cols[c], best_lpips_index_cols[c]),
"%s-normal-network%+d" % (name_cols[c], GRADIENT_WEIGHT_DEFAULT_VALUE),
] for c in range(COLUMNS)]
STAT_NAMES_cols = [[
"reference",
"FD*1",
"AD",
"network%+d"%best_lpips_index_cols[c]
] for c in range(COLUMNS)]
# images
for col1 in range(COLUMNS):
for col2 in range(4):
shutil.copy2(os.path.join(BASE_PATH, "images", IMAGE_NAMES_cols[col1][col2]+".png"),
os.path.join(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2]+".png"))
img = "%s_lens.png" % IMAGE_NAMES_cols[col1][col2]
if not (col1==0 and col2==0):
f.write(" &%\n")
f.write("\\multicolumn{2}{c}{\\includegraphics[%s]{%s}}%%\n" % (LATEX_IMAGE_SIZE, img))
# extra copy for the detailed statistics
for col2 in range(4, len(IMAGE_NAMES_cols[col1])):
shutil.copy2(os.path.join(BASE_PATH, "images", IMAGE_NAMES_cols[col1][col2] + ".png"),
os.path.join(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2] + ".png"))
# statistics
for stat_key, stat_name, stat_value in STATS:
f.write("\\\\%\n")
for col1 in range(COLUMNS):
for col2 in range(4):
if not (col1 == 0 and col2 == 0):
f.write(" &%\n")
net_name = STAT_NAMES_cols[col1][col2]
if (net_name is not None) and (stat_key in stats_cols[col1]['screen'][net_name]):
v = stats_cols[col1]['screen'][net_name][stat_key]
f.write("{\\footnotesize %s} & {\\footnotesize %s}%%\n" % (stat_name, stat_value(v)))
else:
f.write(" & %\n")
f.write("\\end{tabular}%\n")
f.write("\\end{document}")
# create heatmap images
default_weight_index = weight_indices.index(GRADIENT_WEIGHT_DEFAULT_VALUE)
def make_heatmap(cfg: Config, stats:dict, colornormal:str, best_index: int, humanname: str):
values_lpips = np.array([stats['screen']['network%+d' % i]['lpips-'+colornormal] for i in weight_indices])
values_ssim = np.array([stats['screen']['network%+d' % i]['ssim-' + colornormal] for i in weight_indices])
cmap = "rocket_r"
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(14, 1.3))
# make heatmap - SSIM
g = sns.heatmap(values_ssim[np.newaxis,:], ax=axes[0],
cmap='mako',
annot=True, fmt='.3f',
annot_kws={'fontsize': 8},
linewidths=1, square=True,
xticklabels=weight_indices_names,
yticklabels=[f"SSIM {humanname}:"],
cbar=False)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
g.set_xticklabels(g.get_xticklabels(), rotation=0, fontsize=8)
# make heatmap - LPIPS
g = sns.heatmap(values_lpips[np.newaxis, :], ax=axes[1],
cmap='rocket_r',
annot=True, fmt='.3f',
annot_kws={'fontsize': 8},
linewidths=1, square=True,
xticklabels=weight_indices_names,
yticklabels=[f"LPIPS {humanname}:"],
cbar=False)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
g.set_xticklabels(g.get_xticklabels(), rotation=0, fontsize=8)
# annotate
def annotate(x, c):
rect = patches.Rectangle((x+0.05, 0.05), 0.9, 0.9, linewidth=2, edgecolor=c, fill=False)
rect.set_clip_on(False)
axes[1].add_patch(rect)
annotate(default_weight_index, 'green')
annotate(best_index, 'red')
# save
fig.tight_layout()
plt.subplots_adjust(hspace=0.01)
output_filename = f'Heatmap_{cfg.name}_{colornormal}.pdf'
fig.savefig(os.path.join(IMAGE_FOLDER, output_filename), bbox_inches='tight')
plt.close(fig)
return output_filename
with open(os.path.join(IMAGE_FOLDER, "GradientTeaserDetailed-v1.tex"), "w") as f:
f.write("""
\\documentclass[10pt,a4paper]{standalone}
\\usepackage{graphicx}
\\usepackage{xcolor}
\\usepackage[export]{adjustbox}
\\usepackage{multirow}
\\begin{document}
\\newcommand{\\timesize}{0.2}%
\\setlength{\\tabcolsep}{1pt}%
\\renewcommand{\\arraystretch}{0.4}%
\\begin{tabular}{rcccccc}%
""")
for i in range(num_dsets):
cfg, stats = cfgs_filtered[i]
if i>0: f.write("\\\\[2em]%\n")
# name of the dataset
f.write("\\multirow{3}{*}{\\rotatebox[origin=c]{90}{\\textbf{%s}}}%%\n"%cfg.human_name)
# first row: heatmap
for key,name in [("color", "Color"), ("normal", "Normal")]:
fn = make_heatmap(cfg, stats, key, best_index_raw[i], name)
f.write(" & \\multicolumn{3}{c}{\\includegraphics[%s]{%s}}%%\n"%(
HEATMAP_SIZE, fn))
# second row: images
f.write("\\\\%\n")
for suffix, extra in [
("-color-reference", ",cfbox=black 1pt 1pt"),
("-color-network%+d" % GRADIENT_WEIGHT_DEFAULT_VALUE, ",cfbox=green!50!black 1pt 1pt"),
("-color-network%+d" % best_index[i], ",cfbox=red 1pt 1pt"),
("-normal-reference", ",cfbox=black 1pt 1pt"),
("-normal-network%+d" % GRADIENT_WEIGHT_DEFAULT_VALUE, ",cfbox=green!50!black 1pt 1pt"),
("-normal-network%+d" % best_index[i], ",cfbox=red 1pt 1pt")
]:
f.write(" & \\includegraphics[%s%s]{%s.png}%%\n"%(
LATEX_IMAGE_SIZE, extra, cfg.name+suffix))
# third row: stats
f.write("\\\\%\n")
wx = [GRADIENT_WEIGHT_DEFAULT_VALUE, best_index[i]]
for key in ["color", "normal"]:
f.write(" &\n") # empty reference
for j in range(2):
f.write(" & \\begin{tabular}{rl}")
network_key = "network%+d" % wx[j]
w = wx[j]
alpha = _gradient_weight(w)
f.write("$\\alpha$ =&$%.4f$\\\\"%alpha)
f.write("SSIM =&$%.3f$\\\\"%stats['screen'][network_key]['ssim-'+key])
f.write("LPIPS =&$%.3f$" % stats['screen'][network_key]['lpips-' + key])
f.write("\\end{tabular}\n")
f.write("\\end{tabular}%\n")
f.write("\\end{document}\n")
print("Latex files written")
def test():
ln = LoadedModel('volnet/results/hdf5/gradient-Sphere-w02.hdf5')
N = 2 ** 10
torch.manual_seed(42)
np.random.seed(42)
positions = torch.rand((N, 3), dtype=ln._dtype, device=ln._device)
tf_index = torch.full((positions.shape[0],), 0, dtype=torch.int32, device=ln._device)
time_index = torch.full((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)
ensemble_index = torch.full((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)
network_args = [tf_index, time_index, ensemble_index, 'world']
image_evaluator = ln.get_image_evaluator()
volume_interpolation = image_evaluator.volume
network = ln.get_network_pytorch()[0]
network_only_density = NetworkWrapperExtractDensity(network)
grad_network_fd = NetworkGradientTransformer.finite_differences(network_only_density, h=1e-2)
grad_network_ad = NetworkGradientTransformer.autodiff(network_only_density)
grad_volume_fd = NetworkGradientTransformer.finite_differences(VolumeEvaluation(volume_interpolation), h=1e-4)
with torch.no_grad():
# ground truth
densities_gt, gradients_gt = volume_interpolation.evaluate_with_gradients(positions)
# network
tmp = network(positions, *network_args)
densities_network = tmp[...,:1]
gradients_network = tmp[...,1:]
_, gradients_fd = grad_network_fd(positions, *network_args)
_, gradients_ad = grad_network_ad(positions, *network_args)
densities_grid, gradients_fd_grid = grad_volume_fd(positions, *network_args)
def density_difference(a, b):
diff = torch.abs(a-b)
return f"absolute difference: min={torch.min(diff).item():.4f}, " \
f"max={torch.max(diff).item():.4f}, mean={torch.mean(diff).item():.4f}"
def gradient_difference(a, b):
diff_abs = torch.abs(a-b)
len_a = torch.linalg.norm(a, dim=1, keepdim=True)
len_b = torch.linalg.norm(b, dim=1, keepdim=True)
diff_length = torch.abs(len_a - len_b)
len_a = torch.clip(len_a, min=1e-5)
len_b = torch.clip(len_b, min=1e-5)
N = a.shape[0]
cosine_sim = torch.bmm((a/len_a).reshape(N, 1, 3), (b/len_b).reshape(N, 3, 1))
return f"difference absolute: min={torch.min(diff_abs).item():.4f}, " \
f"max={torch.max(diff_abs).item():.4f}, mean={torch.mean(diff_abs).item():.4f}; " \
f"length: min={torch.min(diff_length).item():.4f}, " \
f"max={torch.max(diff_length).item():.4f}, mean={torch.mean(diff_length).item():.4f}; " \
f"cosine sim.: min={torch.min(cosine_sim).item():.4f}, " \
f"max={torch.max(cosine_sim).item():.4f}, mean={torch.mean(cosine_sim).item():.4f}"
print()
print("densities GT<->Network: ", density_difference(densities_gt, densities_network))
print("gradients GT<->Network: ", gradient_difference(gradients_gt, gradients_network))
print("gradients GT<->FD: ", gradient_difference(gradients_gt, gradients_fd))
print("gradients GT<->AutoGrad:", gradient_difference(gradients_gt, gradients_ad))
print("densities GT<->Grid: ", density_difference(densities_gt, densities_grid))
print("gradients GT<->FD-Grid: ", gradient_difference(gradients_gt, gradients_fd_grid))
ad_diff = torch.abs(gradients_gt-gradients_ad)
max_error_pos = torch.argmax(ad_diff).item()//3
print()
print("Max error at index", max_error_pos)
print(" Position:", positions[max_error_pos].cpu().numpy())
print(" Density GT:", densities_gt[max_error_pos].cpu().numpy())
print(" Density Network:", densities_network[max_error_pos].cpu().numpy())
print(" Gradient GT:", gradients_gt[max_error_pos].cpu().numpy())
print(" Gradient Network:", gradients_network[max_error_pos].cpu().numpy())
print(" Gradient FD:", gradients_fd[max_error_pos].cpu().numpy())
print(" Gradient AD:", gradients_ad[max_error_pos].cpu().numpy())
#_ = grad_network_fd(positions[max_error_pos:max_error_pos+1,:], *network_args)
# Render images
ref_camera = ln.get_default_camera()
ref = ln.render_reference(ref_camera, 512, 512)
imageio.imwrite('test-reference.png', LoadedModel.convert_image(ref))
stepsize = 0.002
img_network = ln.render_network(ref_camera, 512, 512, LoadedModel.EvaluationMode.PYTORCH32, stepsize)
imageio.imwrite('test-network.png', LoadedModel.convert_image(img_network))
img_grid = ln.render_network(ref_camera, 512, 512, LoadedModel.EvaluationMode.PYTORCH32, stepsize,
override_network=VolumeEvaluationWithGradient(volume_interpolation))
imageio.imwrite('test-grid.png', LoadedModel.convert_image(img_grid))
print("Done")
if __name__ == '__main__':
main()
#test()
|
1694342
|
import _plotly_utils.basevalidators
class CopyZstyleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='copy_zstyle',
parent_name='scatter3d.error_x',
**kwargs
):
super(CopyZstyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
1694375
|
import datetime
import pytest
import requests
import responses
from app.internal.weather_forecast import get_weather_data
HISTORY_URL = "https://visual-crossing-weather.p.rapidapi.com/history"
FORECAST_URL = "https://visual-crossing-weather.p.rapidapi.com/forecast"
RESPONSE_FROM_MOCK = {"locations": {"Tel Aviv": {"values": [
{"mint": 6, "maxt": 17.2, "conditions": "Partially cloudy"}]}}}
ERROR_RESPONSE_FROM_MOCK = {"message": "Error Text"}
DATA_GET_WEATHER = [
pytest.param(2020, "tel aviv", 0, marks=pytest.mark.xfail,
id="invalid input type"),
pytest.param(datetime.datetime(day=4, month=4, year=2070), "tel aviv", 0,
marks=pytest.mark.xfail, id="year out of range"),
pytest.param(datetime.datetime(day=4, month=4, year=2020),
"tel aviv", 0, id="basic historical test"),
pytest.param(datetime.datetime(day=1, month=1, year=2030), "tel aviv", 0,
id="basic historical forecast test - prior in current year"),
pytest.param(datetime.datetime(day=31, month=12, year=2030),
"tel aviv", 0, id="basic historical forecast test - future"),
pytest.param(datetime.datetime(day=29, month=2, year=2024), "tel aviv",
0, id="basic historical forecast test"),
]
@pytest.mark.parametrize('requested_date, location, expected',
DATA_GET_WEATHER)
def test_get_weather_data(requested_date, location, expected, requests_mock):
requests_mock.get(HISTORY_URL, json=RESPONSE_FROM_MOCK)
output = get_weather_data(requested_date, location)
assert output['Status'] == expected
def test_get_forecast_weather_data(requests_mock):
temp_date = datetime.datetime.now() + datetime.timedelta(days=2)
response_from_mock = RESPONSE_FROM_MOCK
response_from_mock["locations"]["Tel Aviv"]["values"][0]["datetimeStr"] = \
temp_date.isoformat()
requests_mock.get(FORECAST_URL, json=response_from_mock)
output = get_weather_data(temp_date, "tel aviv")
assert output['Status'] == 0
def test_location_not_found(requests_mock):
requested_date = datetime.datetime(day=10, month=1, year=2020)
requests_mock.get(HISTORY_URL, json=ERROR_RESPONSE_FROM_MOCK)
output = get_weather_data(requested_date, "neo")
assert output['Status'] == -1
@responses.activate
def test_historical_no_response_from_api():
requested_date = datetime.datetime(day=11, month=1, year=2020)
responses.add(responses.GET, HISTORY_URL, status=500)
requests.get(HISTORY_URL)
output = get_weather_data(requested_date, "neo")
assert output['Status'] == -1
@responses.activate
def test_historical_exception_from_api():
requested_date = datetime.datetime(day=12, month=1, year=2020)
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(HISTORY_URL)
output = get_weather_data(requested_date, "neo")
assert output['Status'] == -1
@responses.activate
def test_forecast_exception_from_api():
requested_date = datetime.datetime.now() + datetime.timedelta(days=3)
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(FORECAST_URL)
output = get_weather_data(requested_date, "neo")
assert output['Status'] == -1
|
1694392
|
import math
import numpy
from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, \
BrodyDistribution, GOEDistribution
from xData import XYs
"""
Collection of fake level sequence generators, including the One True Generator: getGOEFakeLevelSequence
"""
fakeLevelStyles = ['wigner', 'picket fence', 'poisson', 'brody', 'goe']
def getFakeLevelSequence(E0=0.0, aveD=None, numLevels=None, style='goe', BrodyW=0.5, levelDensity=None):
"""
wrapper function
:param E0: see documentation for individual styles; note: for GOE is best to use E0=0.0
:param aveD: see documentation for individual styles
:param numLevels: see documentation for individual styles
:param style: one of ['wigner','picket fence', 'poisson', 'brody', 'goe']
:param BrodyW: see documentation for individual styles
:param levelDensity: see documentation for individual styles
:return:
"""
# To generate non-GOE levels, we need to know the average level spacing, the starting energy and the number
# of levels to build. We can get this information a number of different ways. These are the options:
# * Easiest way: aveD, E0 and numLevels
# * aveD, E0 and upperBound, compute numLevels = 1+(upperBound - E0)/D
# * levelDensity and E0. Internally getFakeLevelSequence converts this to aveD and numLevels.
if style != 'goe':
if aveD is None and levelDensity is not None:
aveD = 1 / levelDensity.evaluate(0.5 * (levelDensity.domainMin + levelDensity.domainMax))
else:
raise ValueError("Not enough information to determine aveD")
if numLevels is None and levelDensity is not None:
numLevels = 1+(levelDensity.domainMax - E0)/aveD
else:
raise ValueError("Not enough information to determine numLevels")
# To generate GOE levels, we need basically the same information, but the GOE routine uses the levelDensity
# instead of the aveD for a more finely tuned reproduction of the fake level scheme.
if style == 'goe':
if levelDensity is None:
raise ValueError("For GOE style, need a level density")
if numLevels is None:
numLevels = numpy.random.poisson(levelDensity.integrate().value)
print("setting numLevels to", numLevels)
if style == 'wigner':
return getWignerFakeLevelSequence(E0, 1/levelDensity)
elif style == 'picket fence':
return getPicketFenceFakeLevelSequence(E0, aveD, numLevels)
elif style == 'poisson':
return getPoissonFakeLevelSequence(E0, aveD, numLevels)
elif style == 'brody':
return getBrodyFakeLevelSequence(E0, aveD, numLevels, BrodyW)
elif style == 'goe':
return getGOEFakeLevelSequence(E0, numLevels, levelDensity)
else:
raise ValueError("style must be one of " + str(fakeLevelStyles))
def sample_goe_matrix(ndim, scale=1.0):
"""
Create a ndim x ndim GOE "Hamiltonian" matrix following <NAME>'s algorithm.
:param ndim: an int, the dimension of the matrix
:param scale: an energy scale factor. This sets the variance of the Gaussian used to generate the GOE matrix
:return: a numpy ndim x ndim GOE
"""
goe = numpy.random.normal(loc=0.0, scale=scale, size=(ndim, ndim))
goe = (goe + goe.T) / math.sqrt(2.0)
for i in range(ndim):
goe[i, i] *= math.sqrt(2.0)
return goe
def sample_goe_eigenvalues(ndim, normalize=True):
"""
Generate a GOE spectrum by making a GOE "Hamiltonian" and then diagonalizing it
Note: on Dave's MacBook Pro, we can't handle too many levels (<500 safer). Don't know why
:param ndim: number of eigenvalues (equivalently the size of the GOE "Hamiltonian")
:param normalize: Ensure the eigenmodes are on the interval [-1,1] instead of [-ndim/2,ndim/2]
:return:list of eigenvalues
"""
if normalize:
scale = 0.5 / math.sqrt(ndim)
else:
scale = 1.0
sample = numpy.linalg.eigvals(sample_goe_matrix(ndim, scale=scale))
sample.sort()
return sample
def getWignerFakeLevelSequence(E0, levelSpacing):
"""
Random Matrix Theory (RMT) predicts that the Nearest Neighbor Spacing Distribution (NNSD) will have the
shape of a Wigner distribution. If you make levels by drawing spacings from a Wigner distribution,
by construction you have the correct NNSD.
:param E0: first level of the sequence
:param levelSpacing: energy-dependent level spacing, assumed to be in same units as E0
:return: the list of level energies
"""
result = [E0]
WD = WignerDistribution()
domainMax = levelSpacing.domainMax
while True:
s = WD.drawSample()
result.append(result[-1] + s * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.pop() # last point is beyond the levelSpacing domain
return result
def getModifiedWignerFakeLevelSequence(E0, levelSpacing):
"""
Because creating levels using the getWignerFakeLevelSequence above gets the right NNSD,
but fails to create the longer range spectral correlations (e.g. spectral stiffness),
I kludged together a scheme that builds some stiffness into the generated sequence.
:param E0: first level of the sequence
:param levelSpacing: energy-dependent level spacing, assumed to be in same units as E0
:return: the list of level energies
"""
result = [E0]
WD = WignerDistribution()
domainMax = levelSpacing.domainMax
while True:
s = WD.drawSample()
result.append(result[-1] + s * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.append(result[-1] + (1. - s) * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.pop() # last point is beyond the levelSpacing domain
return result
def getPicketFenceFakeLevelSequence(E0, aveD, numLevels):
"""
An evenly spaced set of fake resonances, separated by energy aveD. This gets the
level repulsion right, but otherwise it is so so wrong.
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
return [E0 + s * aveD for s in range(numLevels)]
def getPoissonFakeLevelSequence(E0, aveD, numLevels):
"""
A Poisson distribution keeps the energies positive, but ignores the level repulsion built into sampling
from a Wigner distribution.
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
result = [E0]
for s in PoissonDistribution().drawSample(numLevels - 1):
result.append(result[-1] + s * aveD)
return result
def getBrodyFakeLevelSequence(E0, aveD, numLevels, BrodyW=0.5):
"""
Brody's scheme to interpolate between Wigner and Poisson distributions (need reference, I lost it)
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param BrodyW: trade-off parameter
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
result = [E0]
for s in BrodyDistribution(w=BrodyW).drawSample(numLevels - 1):
result.append(result[-1] + s * aveD)
return result
def getCLDInverse(levelDensity):
if not isinstance(levelDensity, XYs.XYs1d):
raise TypeError("For GOE, levelDensity must be a XYs instance")
DOPLOTS = False
# Normalized level density as a PDF
totalNumLevels = int(float(levelDensity.integrate().value))
fakePDF = levelDensity / totalNumLevels
fakePDF.axes[0].label = "PDF(E)"
if DOPLOTS:
fakePDF.plot(title='fake PDF')
# Convert it to a CDF
fakeCDF = fakePDF.indefiniteIntegral()
fakeCDF.axes[0].unit = ""
fakeCDF.axes[0].label = "CDF(E)"
if DOPLOTS:
fakeCDF.plot(title="fake CDF")
# Invert it to make a probability -> energy converter
return fakeCDF.inverse()
def getGOEFakeLevelSequence(E0, totalNumLevels, levelDensity, paddingNumLevels=100, keepLevelsAboveDomainMax=False,
DOPLOTS=False):
"""
This generates a sequence of energies that is both consistent with the GOE of RMT and has the correct
secular variation contained in the levelDensity
:param E0: Levels are generated with E0 as an energy offset. E0=0 is recommended for GOE realizations
:param totalNumLevels: Number of fake levels to generate
:param levelDensity: Level density we're trying to emulate with a fake GOE inspired level scheme.
Should be an XYs1d
:param paddingNumLevels: We want to grab eigenvalues from the center of the GOE spectrum to avoid edge effects.
This is the number of extra eigenvalues to generate to pad the ends of the GOE spectrum.
:param keepLevelsAboveDomainMax: If True, keep any levels above the domainMax of the levelDensity.
If False, the resulting level scheme may (or may not) have the same number of
levels as totalNumLevels.
:param DOPLOTS: If True, make some plots
:return: the list of level energies
"""
# Get the GOE single eigenvalue distribution as a PDF, this is the secular variation of
# the eigenvalues of the full GOE
if E0 != 0:
print("WARNING: non-zero offset is discouraged when using GOE realizations")
a = 1.0
# Sample a GOE set of "energies"
dimension = totalNumLevels + 2*paddingNumLevels
goeSample = numpy.linalg.eigvalsh( sample_goe_matrix( dimension, scale=a/2.0/math.sqrt(dimension) ) )
# Because we only have a finite number of levels, the GOE distribution never completely matches the
# Wigner semi-circle law (encoded in the GOEDistribution class). The biggest deviations from the semi-circle
# law happen on the fringes. To combat this, we discard paddingNumLevels from either end of the
# simulated spectrum. We also have to discard the same region of the semi-circle distribution.
if paddingNumLevels > 0:
goeSample = goeSample[paddingNumLevels:-paddingNumLevels]
goeSingleLevels = GOEDistribution(a, domainMin=goeSample[0], domainMax=goeSample[-1])
goeSingleLevels = goeSingleLevels.normalize()
goeSingleLevels = UnivariatePDF(XYs1d=goeSingleLevels)
else:
goeSingleLevels = GOEDistribution(a, domainMin=-a, domainMax=a)
if DOPLOTS:
goeSingleLevels.plot()
goeSingleLevels.cdf.plot()
# The list of x's have the full correlations of the GOE in them, except the gross secular variation.
# We'll add that back using the real level density. The "refolds" back in the correct secular variation.
result = unfoldThenRefoldLevelSequence(
originalSequence=goeSample,
originalSequenceSecularVariationDistribution=goeSingleLevels,
finalSecularVariationFunction=levelDensity,
offset=E0,
DOPLOTS=DOPLOTS)
# This is Monte Carlo, so we can accidentally sample things that are too high. Let's get rid of them
if not keepLevelsAboveDomainMax:
result = [x for x in result if x <= levelDensity.domainMax]
return result
def unfoldThenRefoldLevelSequence(
originalSequence,
originalSequenceSecularVariationDistribution,
finalSecularVariationFunction,
offset=0.0,
DOPLOTS=False):
"""
Unfold an original energy sequences secular variation, then add back a different secular variation
This is useful for taking say a GOE level sequence and then stretching it to match a known experimental one.
The final level sequence then has the large energy scale secular variation of the known experimental one with the
short range statistical variation of the original sequence.
:param originalSequence: Original sequence of "energies" whose secular variance is given by
originalSequenceSecularVariationDistribution
:param originalSequenceSecularVariationDistribution: a distribution inheriting from fudge.core.pdf.UnivariatePDF
:param finalSecularVariationFunction: A level density like object that encodes the final variation.
:param offset: add an energy offset to the final list of energies
:param DOPLOTS: Flag to trigger plotting of intermediate distributions (useful for debugging)
:return:
"""
# Check types of inputs
if not isinstance(originalSequenceSecularVariationDistribution, UnivariatePDF):
raise TypeError("Original sequence's secular variation must be given by an instance of UnivariatePDF")
if DOPLOTS:
originalSequenceSecularVariationDistribution.plot()
originalSequenceSecularVariationDistribution.cdf.plot()
# Get the original "energies" and remove the secular variation; this is the "unfolding" step
xList = list(map(originalSequenceSecularVariationDistribution.cdf.evaluate, originalSequence))
# Need to invert the cumulative level distribution for refolding
invFakeCDF = getCLDInverse(finalSecularVariationFunction)
if DOPLOTS:
invFakeCDF.plot() # title='lookup table')
# We'll add that back using the real level density. The "refolds" back in the correct secular variation.
result = [offset + invFakeCDF.evaluate(x) for x in xList]
result.sort()
return result
|
1694403
|
import unittest
import os
import zipfile
from collections import OrderedDict
from ddt import ddt, data, unpack
from testfixtures import tempdir
from testfixtures import TempDirectory
import provider.article_processing as article_processing
@ddt
class TestArticleProcessing(unittest.TestCase):
def setUp(self):
self.directory = TempDirectory()
self.file_name_map_19405 = {
"elife-19405-inf1-v1": "elife-19405-inf1",
"elife-19405-fig1-v1": "elife-19405-fig1",
"elife-19405-v1.pdf": "elife-19405.pdf",
"elife-19405-v1.xml": "elife-19405.xml",
}
def tearDown(self):
TempDirectory.cleanup_all()
# input: s3 archive zip file name (name) and date last modified
# expected output: file name - highest version file (displayed on -v[number]-) then latest last modified date/time
@unpack
@data(
{
"input": [
{
"name": "elife-16747-vor-v1-20160831000000.zip",
"last_modified": "2017-05-18T09:04:11.000Z",
},
{
"name": "elife-16747-vor-v1-20160831132647.zip",
"last_modified": "2016-08-31T06:26:56.000Z",
},
],
"expected": "elife-16747-vor-v1-20160831000000.zip",
},
{
"input": [
{
"name": "elife-16747-vor-v1-20160831000000.zip",
"last_modified": "2017-05-18T09:04:11.000Z",
},
{
"name": "elife-16747-vor-v1-20160831132647.zip",
"last_modified": "2016-08-31T06:26:56.000Z",
},
{
"name": "elife-16747-vor-v2-20160831000000.zip",
"last_modified": "2015-01-05T00:20:50.000Z",
},
],
"expected": "elife-16747-vor-v2-20160831000000.zip",
},
)
def test_latest_archive_zip_revision(self, input, expected):
output = article_processing.latest_archive_zip_revision(
"16747", input, "elife", "vor"
)
self.assertEqual(output, expected)
@unpack
@data(
{
"input": [
{
"name": "elife-16747-vor-v2-20160831000000.zip",
"last_modified": "this_is_junk_for_testing",
}
],
"expected": None,
}
)
def test_latest_archive_zip_revision_exception(self, input, expected):
output = article_processing.latest_archive_zip_revision(
"16747", input, "elife", "vor"
)
self.assertRaises(ValueError)
def test_convert_xml(self):
xml_file = "elife-19405-v1.xml"
file_name_map = self.file_name_map_19405
expected_xml_contains = "elife-19405.pdf"
with open("tests/test_data/pmc/" + xml_file, "rb") as fp:
path = self.directory.write(xml_file, fp.read())
xml_file_path = os.path.join(self.directory.path, xml_file)
article_processing.convert_xml(
xml_file=xml_file_path, file_name_map=file_name_map
)
with open(xml_file_path, "r") as fp:
xml_content = fp.read()
self.assertTrue(expected_xml_contains in xml_content)
def test_convert_xml_extra_xml(self):
xml_file = "tests/test_data/xml_sample_with_directive.xml"
file_name_map = {}
with open(xml_file, "rb") as open_file:
expected = open_file.read()
path = self.directory.write(xml_file, expected)
xml_file_path = path
article_processing.convert_xml(
xml_file=xml_file_path, file_name_map=file_name_map
)
with open(xml_file_path, "rb") as open_file:
xml_string = open_file.read()
self.assertEqual(xml_string, expected)
def test_verify_rename_files(self):
(
verified,
renamed_list,
not_renamed_list,
) = article_processing.verify_rename_files(self.file_name_map_19405)
self.assertTrue(verified)
self.assertEqual(len(renamed_list), 4)
self.assertEqual(len(not_renamed_list), 0)
def test_verify_rename_files_not_renamed(self):
(
verified,
renamed_list,
not_renamed_list,
) = article_processing.verify_rename_files({"elife-19405-v1.xml": None})
self.assertFalse(verified)
self.assertEqual(len(renamed_list), 0)
self.assertEqual(len(not_renamed_list), 1)
@data(
(
[
"elife-99999.xml",
"elife-99999-fig1-v1.tif",
"elife-99999-video1.mp4",
"elife-99999-video2.mp4",
],
OrderedDict(
[
("elife-99999.xml", "elife-99999.xml"),
("elife-99999-fig1-v1.tif", "elife-99999-fig1.tif"),
("elife-99999-video1.mp4", "elife-99999-video1.mp4"),
("elife-99999-video2.mp4", "elife-99999-video2.mp4"),
]
),
),
)
@unpack
def test_stripped_file_name_map(self, file_names, expected_file_name_map):
file_name_map = article_processing.stripped_file_name_map(file_names)
self.assertEqual(file_name_map, expected_file_name_map)
def test_rename_files_remove_version_number(self):
zip_file = "elife-19405-vor-v1-20160802113816.zip"
zip_file_path = "tests/test_data/pmc/" + zip_file
files_dir = "tmp_dir"
output_dir = "output_didr"
# create and set directories
self.directory.makedir(output_dir)
self.directory.makedir(files_dir)
files_dir_path = os.path.join(self.directory.path, files_dir)
output_dir_path = os.path.join(self.directory.path, output_dir)
# unzip the test data
with zipfile.ZipFile(zip_file_path, "r") as zip_file:
zip_file.extractall(files_dir_path)
# now can run the function we are testing
article_processing.rename_files_remove_version_number(
files_dir_path, output_dir_path
)
@unpack
@data(
("elife", "1", "7", None, "elife-01-00007.zip"),
("elife", "1", "7", "1", "elife-01-00007.r1.zip"),
)
def test_new_pmc_zip_filename(self, journal, volume, fid, revision, expected):
self.assertEqual(
article_processing.new_pmc_zip_filename(journal, volume, fid, revision),
expected,
)
if __name__ == "__main__":
unittest.main()
|
1694415
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
handler404 = 'public_api_site.api.views.default'
urlpatterns = patterns('',
# Documentation says location vs locations - adding until it's figured out
# Judas (ston) 6/29
(r'^api/location[/]$', 'public_api_site.api.views.locations'),
(r'^api/speakers[/]$', 'public_api_site.api.views.speakers'),
(r'^api/talks[/]$', 'public_api_site.api.views.talks'),
(r'^api/interests[/]$', 'public_api_site.api.views.interests'),
(r'^api/stats[/]$', 'public_api_site.api.views.stats'),
(r'^api/users[/]$', 'public_api_site.api.views.users'),
(r'^$','public_api_site.api.views.default'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
|
1694429
|
from urllib.parse import urlsplit, SplitResult, urlunsplit
from more_itertools import flatten
def parse_links(funding: dict):
if not funding:
return None
def parse_element(platform: str, element: str):
if platform == 'github':
return {'href': f'https://github.com/sponsors/{element}', 'text': f'{element} on Github'}
elif platform == 'patreon':
return {'href': f'https://www.patreon.com/{element}', 'text': f'{element} on Patreon'}
elif platform == 'ko_fi':
return {'href': f'https://ko-fi.com/{element}', 'text': f'{element} on Ko-fi'}
elif platform == 'custom':
comps = urlsplit(element)
if not comps.scheme:
netloc, path = (comps.path.split('/', 1) + [''])[:2]
comps = SplitResult(scheme='https', netloc=netloc, path=path, query=comps.query,
fragment=comps.fragment)
return {'href': urlunsplit(comps), 'text': f'{comps.netloc}/{comps.path}'.rstrip('/')}
else:
return None
def parse_item(platform, value):
if value is str:
return [parse_element(platform, value)]
return map(lambda e: parse_element(platform, e), value)
return list(filter(lambda x: x, flatten(map(lambda item: parse_item(item[0], item[1]), funding.items()))))
|
1694448
|
import json
from requests import HTTPError
class MailjetError(Exception):
def __init__(self, *args, **kwargs):
self.email_message = kwargs.pop('email_message', None)
self.payload = kwargs.pop('payload', None)
if isinstance(self, HTTPError):
self.response = kwargs.get('response', None)
else:
self.response = kwargs.pop('response', None)
super(MailjetError, self).__init__(*args, **kwargs)
def __str__(self):
parts = [
" ".join([str(arg) for arg in self.args]),
self.describe_send(),
self.describe_response(),
]
return "\n".join(filter(None, parts))
def describe_send(self):
if self.payload is None:
return None
description = "Sending a message"
try:
to_emails = [to['email'] for to in self.payload['message']['to']]
description += " to %s" % ','.join(to_emails)
except KeyError:
pass
try:
description += " from %s" % self.payload['message']['from_email']
except KeyError:
pass
return description
def describe_response(self):
if self.response is None:
return None
description = "Mailjet API response %d: %s" % (self.response.status_code, self.response.reason)
try:
json_response = self.response.json()
description += "\n" + json.dumps(json_response, indent=2)
except (AttributeError, KeyError, ValueError):
try:
description += " " + self.response.text
except AttributeError:
pass
return description
class MailjetAPIError(MailjetError, HTTPError):
def __init__(self, *args, **kwargs):
super(MailjetAPIError, self).__init__(*args, **kwargs)
if self.response is not None:
self.status_code = self.response.status_code
|
1694490
|
from django.test import TestCase
from stave_backend.models import Document, Project
class DocumentTestCase(TestCase):
def setUp(self):
a = Project.objects.create(name="project1",
ontology='I am ontology')
Document.objects.create(name="doc1",
textPack='I am text pack',
project = a)
def test_project_relationship(self):
"""test project relationship"""
doc1 = Document.objects.get(name="doc1")
project1 = Project.objects.get(name="project1")
project1.documents.first()
self.assertEqual(doc1, project1.documents.first())
self.assertEqual(project1, doc1.project)
|
1694551
|
from axltoolkit import AxlToolkit
from credentials import user, password, platform_user, platform_password
# Be sure to update the credentials.py file with your AXL User and Platform User credentials
# Put the IP address of your UCM Publisher
ucm_ip = '172.18.106.58'
axl = AxlToolkit(username=user, password=password, server_ip=ucm_ip, tls_verify=False, version='12.5')
# Example of using Thick AXL to retrieve User Info
# Replace this with a valid User ID from your UCM cluster:
userid = 'pgiralt'
result = axl.get_user(userid)
print(result)
userdata = result['return']['user']
print("Your name is " + userdata['firstName'])
# Example of using thin AXL to retrieve User Info:
query = "select * from enduser where userid = 'pgiralt'"
result = axl.run_sql_query(query)
print(result)
result = axl.list_phone(name='CSF%')
print(result)
|
1694576
|
from django import test
from django.core import mail
from subscriptions import models
from subscriptions.tests.factories import SubscriptionFactory
from subscriptions.utils import send_notifications
class SendNotificationsTest(test.TestCase):
def setUp(self):
SubscriptionFactory.create_batch(size=10)
def test_notifications_sent(self):
send_notifications(subscription_type=models.Subscription.DISCREPANCIES)
self.assertEqual(len(mail.outbox), 10)
|
1694581
|
from os.path import dirname, join
from pybamview.tests import __file__ as test_directory
def test_data(path):
return join(dirname(test_directory), 'data', path)
|
1694584
|
from conans import ConanFile, CMake
class AversivePlusPlusConanModule(ConanFile):
name = "teensy"
version = "0.1"
exports = "CMakeLists.txt"
settings = "os", "compiler", "arch", "target"
requires = "cmake-toolchain/0.1@AversivePlusPlus/dev"
generators = "cmake"
def imports(self):
self.copy("toolchain.cmake")
def source(self):
self.run("git clone https://github.com/PaulStoffregen/cores.git --depth 1")
def build(self):
cmake = CMake(self.settings)
toolchain = '-DCMAKE_TOOLCHAIN_FILE=toolchain.cmake'
self.run('cmake "%s" %s %s' % (self.conanfile_directory, cmake.command_line, toolchain))
self.run('cmake --build . %s' % cmake.build_config)
def package(self):
self.copy("*.hpp", src="cores/teensy3", dst="include")
self.copy("*.h", src="cores/teensy3", dst="include")
self.copy("*.a", src="lib", dst="lib")
self.copy("mk20dx256.ld", src="cores/teensy3", dst="linker")
def package_info(self):
self.cpp_info.libs = ["teensy"]
self.cpp_info.defines += ["F_CPU=48000000", "USB_SERIAL", "LAYOUT_US_ENGLISH", "USING_MAKEFILE"]
self.cpp_info.defines += ["__MK20DX256__", "ARDUINO=10613", "TEENSYDUINO=132"]
self.cpp_info.cflags += ["-Wall", "-g", "-Os"]
self.cpp_info.cppflags += ["-Wall", "-g", "-Os", "-std=gnu++0x", "-felide-constructors", "-fno-exceptions", "-fno-rtti"]
self.cpp_info.exelinkflags += ["-Wl,--gc-sections,--defsym=__rtc_localtime=0", "--specs=nano.specs"]
self.cpp_info.exelinkflags += ["-T{}/linker/mk20dx256.ld".format(self.package_folder)]
|
1694639
|
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from django.urls import reverse
from donor.models import *
def donor_details(request, donor_id):
context = {}
donor = Donor.objects.get(id=donor_id)
context['donor'] = donor
context['contribs2018'] = donor.contributions_2018.filter(active=True).order_by('-contribution_amount')
context['contribs2020'] = donor.contributions_2020.filter(active=True).order_by('-contribution_amount')
return render(request, 'donor/donor_details.html', context)
|
1694678
|
import pyutilib.misc
import pyutilib.component.core
import os
import sys
import logging
currdir = sys.argv[-2] + os.sep
logging.basicConfig(level=logging.DEBUG)
pyutilib.component.core.PluginGlobals.get_env().load_services(
path=currdir + "plugins1")
pyutilib.misc.setup_redirect(currdir + "load1.out")
if sys.argv[-1] == "json":
pyutilib.component.core.PluginGlobals.pprint(json=True)
else:
pyutilib.component.core.PluginGlobals.pprint()
pyutilib.misc.reset_redirect()
|
1694721
|
a=[]
n=int(input())
for i in input().split():
a.append(int(i))
a.sort()
for j in range(len(a)):
a[j] = str(a[j])
print(' '.join(a))
|
1694748
|
import FWCore.ParameterSet.Config as cms
import RecoTracker.MkFit.mkFitGeometryESProducer_cfi as mkFitGeometryESProducer_cfi
import RecoTracker.MkFit.mkFitSiPixelHitConverter_cfi as mkFitSiPixelHitConverter_cfi
import RecoTracker.MkFit.mkFitSiStripHitConverter_cfi as mkFitSiStripHitConverter_cfi
import RecoTracker.MkFit.mkFitEventOfHitsProducer_cfi as mkFitEventOfHitsProducer_cfi
import RecoTracker.MkFit.mkFitSeedConverter_cfi as mkFitSeedConverter_cfi
import RecoTracker.MkFit.mkFitIterationConfigESProducer_cfi as mkFitIterationConfigESProducer_cfi
import RecoTracker.MkFit.mkFitProducer_cfi as mkFitProducer_cfi
import RecoTracker.MkFit.mkFitOutputConverter_cfi as mkFitOutputConverter_cfi
import RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitConverter_cfi as SiStripRecHitConverter_cfi
def customizeHLTIter0ToMkFit(process):
# mkFit needs all clusters, so switch off the on-demand mode
process.hltSiStripRawToClustersFacility.onDemand = False
process.hltSiStripRecHits = SiStripRecHitConverter_cfi.siStripMatchedRecHits.clone(
ClusterProducer = "hltSiStripRawToClustersFacility",
StripCPE = "hltESPStripCPEfromTrackAngle:hltESPStripCPEfromTrackAngle",
doMatching = False,
)
# Use fourth hit if one is available
process.hltIter0PFLowPixelSeedsFromPixelTracks.includeFourthHit = cms.bool(True)
process.hltMkFitGeometryESProducer = mkFitGeometryESProducer_cfi.mkFitGeometryESProducer.clone()
process.hltIter0PFlowCkfTrackCandidatesMkFitSiPixelHits = mkFitSiPixelHitConverter_cfi.mkFitSiPixelHitConverter.clone(
hits = "hltSiPixelRecHits",
ttrhBuilder = ":hltESPTTRHBWithTrackAngle",
)
process.hltIter0PFlowCkfTrackCandidatesMkFitSiStripHits = mkFitSiStripHitConverter_cfi.mkFitSiStripHitConverter.clone(
rphiHits = "hltSiStripRecHits:rphiRecHit",
stereoHits = "hltSiStripRecHits:stereoRecHit",
ttrhBuilder = ":hltESPTTRHBWithTrackAngle",
minGoodStripCharge = dict(refToPSet_ = 'HLTSiStripClusterChargeCutLoose'),
)
process.hltIter0PFlowCkfTrackCandidatesMkFitEventOfHits = mkFitEventOfHitsProducer_cfi.mkFitEventOfHitsProducer.clone(
beamSpot = "hltOnlineBeamSpot",
pixelHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiPixelHits",
stripHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiStripHits",
)
process.hltIter0PFlowCkfTrackCandidatesMkFitSeeds = mkFitSeedConverter_cfi.mkFitSeedConverter.clone(
seeds = "hltIter0PFLowPixelSeedsFromPixelTracks",
ttrhBuilder = ":hltESPTTRHBWithTrackAngle",
)
process.hltIter0PFlowTrackCandidatesMkFitConfig = mkFitIterationConfigESProducer_cfi.mkFitIterationConfigESProducer.clone(
ComponentName = 'hltIter0PFlowTrackCandidatesMkFitConfig',
config = 'RecoTracker/MkFit/data/mkfit-phase1-initialStep.json',
)
process.hltIter0PFlowCkfTrackCandidatesMkFit = mkFitProducer_cfi.mkFitProducer.clone(
pixelHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiPixelHits",
stripHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiStripHits",
eventOfHits = "hltIter0PFlowCkfTrackCandidatesMkFitEventOfHits",
seeds = "hltIter0PFlowCkfTrackCandidatesMkFitSeeds",
config = ('', 'hltIter0PFlowTrackCandidatesMkFitConfig'),
minGoodStripCharge = dict(refToPSet_ = 'HLTSiStripClusterChargeCutLoose'),
)
process.hltIter0PFlowCkfTrackCandidates = mkFitOutputConverter_cfi.mkFitOutputConverter.clone(
seeds = "hltIter0PFLowPixelSeedsFromPixelTracks",
mkFitEventOfHits = "hltIter0PFlowCkfTrackCandidatesMkFitEventOfHits",
mkFitPixelHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiPixelHits",
mkFitStripHits = "hltIter0PFlowCkfTrackCandidatesMkFitSiStripHits",
mkFitSeeds = "hltIter0PFlowCkfTrackCandidatesMkFitSeeds",
tracks = "hltIter0PFlowCkfTrackCandidatesMkFit",
ttrhBuilder = ":hltESPTTRHBWithTrackAngle",
propagatorAlong = ":PropagatorWithMaterialParabolicMf",
propagatorOpposite = ":PropagatorWithMaterialParabolicMfOpposite",
)
process.HLTDoLocalStripSequence += process.hltSiStripRecHits
replaceWith = (process.hltIter0PFlowCkfTrackCandidatesMkFitSiPixelHits +
process.hltIter0PFlowCkfTrackCandidatesMkFitSiStripHits +
process.hltIter0PFlowCkfTrackCandidatesMkFitEventOfHits +
process.hltIter0PFlowCkfTrackCandidatesMkFitSeeds +
process.hltIter0PFlowCkfTrackCandidatesMkFit +
process.hltIter0PFlowCkfTrackCandidates)
process.HLTIterativeTrackingIteration0.replace(process.hltIter0PFlowCkfTrackCandidates, replaceWith)
process.HLT_IsoTrackHB_v4.replace(process.hltIter0PFlowCkfTrackCandidates, replaceWith)
process.HLT_IsoTrackHE_v4.replace(process.hltIter0PFlowCkfTrackCandidates, replaceWith)
return process
|
1694768
|
import torch
import torch.nn as nn
from torch import Tensor as Tensor
import torch._C as _C
class BoundedTensor(Tensor):
@staticmethod
# We need to override the __new__ method since Tensor is a C class
def __new__(cls, x, ptb, *args, **kwargs):
if isinstance(x, Tensor):
tensor = super().__new__(cls, [], *args, **kwargs)
tensor.data = x.data
tensor.requires_grad = x.requires_grad
return tensor
else:
return super().__new__(cls, x, *args, **kwargs)
def __init__(self, x, ptb):
self.ptb = ptb
def __repr__(self):
if hasattr(self, 'ptb') and self.ptb is not None:
return '<BoundedTensor: {}, {}>'.format(super().__repr__(), self.ptb.__repr__())
else:
return '<BoundedTensor: {}, no ptb>'.format(super().__repr__())
def clone(self, *args, **kwargs):
tensor = BoundedTensor(super().clone(*args, **kwargs), self.ptb)
return tensor
def _func(self, func, *args, **kwargs):
temp = func(*args, **kwargs)
new_obj = BoundedTensor([], self.ptb)
new_obj.data = temp.data
new_obj.requires_grad = temp.requires_grad
return new_obj
# Copy to other devices with perturbation
def to(self, *args, **kwargs):
return self._func(super().to, *args, **kwargs)
@classmethod
def _convert(cls, ret):
if cls is Tensor:
return ret
if isinstance(ret, Tensor):
if True:
# The current implementation does not seem to need non-leaf BoundedTensor
return ret
else:
# Enable this branch if non-leaf BoundedTensor should be kept
ret = ret.as_subclass(cls)
if isinstance(ret, tuple):
ret = tuple(cls._convert(r) for r in ret)
return ret
if torch.__version__ >= '1.7':
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with _C.DisableTorchFunction():
ret = func(*args, **kwargs)
return cls._convert(ret)
class BoundedParameter(nn.Parameter):
def __new__(cls, data, ptb, requires_grad=True):
return BoundedTensor._make_subclass(cls, data, requires_grad)
def __init__(self, data, ptb, requires_grad=True):
self.ptb = ptb
self.requires_grad = requires_grad
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(), self.ptb, self.requires_grad)
memo[id(self)] = result
return result
def __repr__(self):
return 'BoundedParameter containing:\n{}\n{}'.format(
self.data.__repr__(), self.ptb.__repr__())
def __reduce_ex__(self, proto):
raise NotImplementedError
|
1694769
|
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
class LRSchedulerWithRestart(_LRScheduler):
"""Proxy learning scheduler with restarts: learning rate follows input scheduler strategy but
the strategy can restart when passed a defined number of epochs. Ideas are taken from SGDR paper.
Args:
scheduler (_LRScheduler): input lr scheduler
restart_every (int): restart input lr scheduler every `restart_every` epoch.
restart_factor (float): factor to rescale `restart_every` after each restart.
For example, if `restart_factor=0.5` then next restart occurs in half of `restart_every` epochs.
init_lr_factor (float): factor to rescale base lr after each restart.
For example, if base lr of the input scheduler is 0.01 and `init_lr_factor=0.5`, then after the restart
base lr of the input scheduler will be `0.01 * 0.5`.
Learning rate strategy formula:
```
t[-1] = 0 # Internal epoch timer dependant of global epoch value
...
t[e] = t[e-1] + 1
if t[e] % restart_every == 0:
t[e] = 0
restart_every *= restart_factor
scheduler.base_lrs = scheduler.base_lrs * init_lr_factor
scheduler.last_epoch = t[e]
lr[e] = scheduler.get_lr()
```
"""
def __init__(self, scheduler, restart_every, restart_factor=1.0, init_lr_factor=1.0, verbose=False):
self.scheduler = scheduler
self.restart_every = restart_every
self.restart_factor = restart_factor
self.init_lr_factor = init_lr_factor
self._t = -1
self.verbose = verbose
# Do not call super method as optimizer is already setup by input scheduler
# super(LRSchedulerWithRestart, self).__init__(optimizer, last_epoch)
def get_lr(self):
return self.scheduler.get_lr()
def step(self, epoch=None):
self._t += 1
if self.restart_every > 0 and self.scheduler.last_epoch > 0 and \
self._t % self.restart_every == 0:
self._t = 0
self.restart_every = int(self.restart_every * self.restart_factor)
self.scheduler.base_lrs = [lr * self.init_lr_factor for lr in self.scheduler.base_lrs]
if self.verbose:
print("LRSchedulerWithRestart: restart lr at epoch %i, next restart at %i"
% (self.scheduler.last_epoch, self.scheduler.last_epoch + self.restart_every))
self.scheduler.step(self._t)
|
1694771
|
import os
import sys
import shutil
import tools
import subprocess
import helpers
sys.path.insert(0,"../..")
def list_to_str(l):
ans=""
if len(l)==0:
return ans
for i in range(0,len(l)-1):
ans+=l[i]+"\n"
ans+=l[len(l)-1]
return ans
def test():
print "test"
def pwd():
return os.getcwd()
def cd(param):
#print "arg:"+arg
#print "param:"+param
#print(d[0])
#print len(d)
print "change current directory to "+param
os.chdir(param)
def ls(*d):
if(len(d)==0):
cur=os.getcwd()
ret=os.listdir(cur)
else:
ret=os.listdir(d[0])
return list_to_str(ret)
def cat(param):
if not os.path.exists(param):
raise Exception("No such file exists!")
f=open(param)
lines=f.readlines()
ans=""
if len(lines)==0:
return ans
for i in range(0,len(lines)-1):
ans+=lines[i]
ans+=lines[len(lines)-1]
return ans
def rm(path):
if '*' in path:
import re
path_dir=os.path.dirname(path) or '.'
file_name=os.path.basename(path)
p=re.compile(file_name,re.IGNORECASE)
fns=os.listdir(path_dir)
for fn in fns:
if p.match(fn):
os.remove(fn)
return
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def grep(match,source):
ans=[]
for s in source.split('\n'):
if match in s:
ans.append(s)
return list_to_str(ans)
def redirect(source,destination):
#if os.path.isfile(destination):
f=open(destination,"w")
f.write(source)
#else:
# f=open(destination,'w')
# raise Exception("No such file "+destination)
def wc(s):
return len(s.split('\n'))
def mkdir(path):
if os.path.exists(path):
raise Exception("Directory already existed!")
else:
os.mkdir(path)
return "Directory "+path+" created!"
def touch(fn):
if os.path.exists(fn):
raise Exception("file "+fn+" already exist!")
else:
f=open(fn,"w")
f.close
return "File created: "+fn
def cp(source,destination):
if not os.path.exists(source):
raise Exception("No such file exists!")
if os.path.isfile(source):
shutil.copy(source,destination)
else:
if(os.path.exists(destination) and os.path.isfile(destination)):
raise Exception("File "+destination+" exists, please specify another directory name for dir copy!")
if(not os.path.exists(destination)):
mkdir(destination)
for f in os.listdir(source):
abf=source+'\\'+f
if os.path.isdir(abf):
cp(abf,destination+'\\'+f)
else:
shutil.copy(abf,destination)
def mv(source,destination):
shutil.move(source,destination)
def sh(fn):
f=open(fn,"r")
for l in f:
tools.parse(l.strip())
def echo(*ss):
ans=""
for s in ss:
ans+=s.replace('"','')+" "
return ans
def diff(fn1,fn2):
ans=[]
f1=open(fn1,"r")
f2=open(fn2,"r")
list1=f1.readlines()
list2=f2.readlines()
max_len=max([len(list1),len(list2)])
min_len=min([len(list1),len(list2)])
for i in range(0,min_len):
s1=list1[i]
s2=list2[i]
if s1!=s2:
ans.append("line "+str(i+1)+": "+s1 +"\t"+s2)
for i in range(min_len,max_len):
s=list1[i]
if s:
ans.append("line "+str(i+1)+": "+s+"\t")
else:
ans.append("line "+str(i+1)+": "+"\t"+s)
return list_to_str(ans)
def read(*ss):
ans=""
for s in ss:
ans+=s.replace('"','')+" "
raw_input(ans)
def call(*p):
ans=""
for cmd in p:
ans+=cmd+" "
os.system(ans)
def findinfile(p,d=os.getcwd()):
ans=""
for f in os.listdir(d):
absf=d+"\\"+f
if os.path.isdir(absf):
ans=ans+findinfile(p,absf)
else:
if helpers.istext(absf):
content=open(absf,'r').readlines()
#print "search in file "+(d+f)
#contains=False
for s in content:
if p in s:
ans+=absf+"\n"
break
return ans
|
1694801
|
from tests import app
@app.route("/error-assert-newline")
def error_assert_newline():
return '<p>Hello</p>\n\n'
|
1694847
|
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
__all__ = ['shellLexer']
class shellLexer(RegexLexer):
name = 'shellLexer'
aliases = ['sL', 'lexer']
filenames = ['*.sL', '*.lexer']
tokens = {
'root': [
(r'\s+', Text),
(r'(help|unset|show|start)(.+)',
bygroups(Keyword, Name.Attribute)),
(r'(^set)(\s*)',
bygroups(Keyword, Text), 'set'),
],
'set': [
(r'(.+)(\s*)(=)(\s*)([+a-zA-Z\.\-0-9]+)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
|
1694850
|
import os, collections
import berserker
from berserker.ext import tokenization
class BERT_Tokenizer(tokenization.FullTokenizer):
def is_cleaned(self, ch):
return ch.isspace() or len(self.basic_tokenizer._clean_text(ch)) == 0
_BERT_TOKENIZER = BERT_Tokenizer(
vocab_file=os.path.join(berserker.ASSETS_PATH, 'vocab.txt'),
do_lower_case=False
)
def convert_ids_to_token(ids):
return _BERT_TOKENIZER.convert_ids_to_token(ids)
def compute_mapping(char_list, bert_tokens):
assert len(char_list) >= len(bert_tokens)
i = 0 # Loop index for bert_tokens
j = 0 # Loop index for char_list
matched_len = [0] # A list storing the number of char matched for each bert tokens, the first element is a dummy
while i < len(bert_tokens):
# Loop invariant:
assert j == sum(matched_len)
assert len(matched_len) == 1 or matched_len[-1] > 0
assert j >= 0 and j < len(char_list), (j, len(char_list), i, len(bert_tokens))
# Invisible token will be mapped to the last available bert_tokens
if _BERT_TOKENIZER.is_cleaned(char_list[j]):
matched_len[-1] += 1
j = j + 1
continue
# If current bert_token is '[UNK]'
if bert_tokens[i] == '[UNK]':
matched_len.append(1)
j = j + matched_len[-1]
i = i + 1
continue
# bert_tokens[i] is not '[UNK]' AND char_list[j] is not cleaned char,
# len(bert_tokens[i]) maybe greater than 1
bert_token = bert_tokens[i][2:] if bert_tokens[i][:2] == '##' else bert_tokens[i]
text_segment = char_list[j]
# Fetch the next len(bert_token) characters from text, ignoring bert cleaned char
l = 1
while len(text_segment) < len(bert_token):
assert j+l < len(char_list), (char_list, bert_token, j, l)
if not _BERT_TOKENIZER.is_cleaned(char_list[j+l]):
text_segment += char_list[j+l]
l += 1
if bert_token == text_segment:
matched_len.append(l)
j = j + matched_len[-1]
i = i + 1
continue
# This is an mismatch, perform a roll back until the last '[UNK]' bert token
while True:
i -= 1
if bert_tokens[i] == '[UNK]':
last_len = matched_len.pop()
matched_len[-1] += 1
j = sum(matched_len)
break
matched_len.pop()
# Match the remaining char to the last bert token
if sum(matched_len) < len(char_list):
matched_len[-1] = len(char_list) - sum(matched_len[:-1])
assert len(char_list) == sum(matched_len)
# Convert matched_len to final mapping
j = 0
i = 0
mapping = {}
for j in range(len(matched_len)):
for k in range(matched_len[j]):
mapping[i] = j if j == 0 else j-1
i += 1
return mapping
def _to_unpadded_bert_inputs(text, truth):
assert len(text) == len(truth)
bert_tokens = _tokenizer.tokenize(text)
# Reconstruct a mapping on how each character in the text map to the
# output of bert tokenizer
#
# With this mapping, we construct a bert truth from raw text truth
# This mapping is also used for postprocessing, where we map bert output
# to prediction values for all character in the text
def _backward_map(mapping, outputs):
max_index = {}
for i, o in mapping.items():
max_index[o] = max(max_index[o], i) if o in max_index else i
inputs = [0.] * len(set(mapping.keys()))
for o, i in max_index.items():
inputs[i] = outputs[o]
return inputs
def _forward_map(mapping, inputs):
outputs = [0.] * len(set(mapping.values()))
for i in range(len(inputs)):
outputs[mapping[i]] = inputs[i]
return outputs
def _unpad_bert_outputs(bert_input, bert_output):
length = sum(bert_input["input_mask"]) - 2
bert_tokens = _BERT_TOKENIZER.convert_ids_to_tokens(bert_input["input_ids"][1:1+length])
bert_preds = bert_output["predictions"][1:1+length]
return bert_tokens, bert_preds
def _pad_bert_inputs(tokens_a, tokens_a_truth, max_seq_length):
assert len(tokens_a) == len(tokens_a_truth)
assert len(tokens_a) <= max_seq_length - 2 # Account for [CLS] and [SEP] with "- 2"
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
truths = []
tokens.append("[CLS]")
truths.append(0.)
for token, truth in zip(tokens_a, tokens_a_truth):
tokens.append(token)
truths.append(truth)
tokens.append("[SEP]")
truths.append(0.)
input_ids = _BERT_TOKENIZER.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
truths.append(0.)
segment_ids = [0] * max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(truths) == max_seq_length
return {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"truths": truths
}
def batch_preprocess(texts, max_seq_length, batch_size):
fields = ["input_ids", "input_mask", "segment_ids", "truths"]
bert_inputs = {f: [] for f in fields}
mappings = []
sizes = []
for text in texts:
bert_input, mapping, size = preprocess(text, max_seq_length)
for f in fields:
bert_inputs[f] += bert_input[f]
mappings.append(mapping)
sizes.append(size)
# pad to batch_size
while len(bert_inputs["input_ids"]) % batch_size != 0:
for f in fields:
bert_inputs[f].append([0] * max_seq_length)
return bert_inputs, mappings, sizes
# Input: single text, Output: multiple bert_inputs
# 1. Unify input from training data and test data (with or without spaces)
# 2. Convert to BERT tokens
# 3. Compute a mapping from input to bert input
# 4. Chunk by max_seq_length into multiple bert inputs
def preprocess(text, max_seq_length, truths=None):
if truths is None:
truths = [0.] * len(text)
bert_tokens = _BERT_TOKENIZER.tokenize(text)
mapping = compute_mapping([ch for ch in text], bert_tokens)
bert_truths = _forward_map(mapping, truths)
assert len(bert_tokens) == len(bert_truths)
# chunking to batch input
SEQ_LENGTH = max_seq_length - 2
bert_inputs = []
while len(bert_tokens) > 0 or len(bert_inputs) == 0:
bert_inputs.append(_pad_bert_inputs(
bert_tokens[:SEQ_LENGTH],
bert_truths[:SEQ_LENGTH],
max_seq_length
))
bert_tokens = bert_tokens[SEQ_LENGTH:]
bert_truths = bert_truths[SEQ_LENGTH:]
return {
"input_ids": [bi["input_ids"] for bi in bert_inputs],
"input_mask": [bi["input_mask"] for bi in bert_inputs],
"segment_ids": [bi["segment_ids"] for bi in bert_inputs],
"truths": [bi["truths"] for bi in bert_inputs]
}, mapping, len(bert_inputs)
def batch_postprocess(texts, mappings, sizes, bert_inputs, bert_outputs, max_seq_length, threshold=0.5):
assert len(bert_inputs["input_ids"]) == len(bert_outputs), (len(bert_inputs["input_ids"]), len(bert_outputs))
results = []
i = 0
for text, mapping, size in zip(texts, mappings, sizes):
bi = [{k: bert_inputs[k][i+j] for k in bert_inputs.keys()} for j in range(size)]
bo = bert_outputs[i:i+size]
results.append(postprocess(text, mapping, bi, bo, threshold))
i += size
return results
def postprocess(text, mapping, bert_inputs, bert_outputs, threshold=0.5):
assert len(bert_inputs) == len(bert_outputs), (len(bert_inputs), len(bert_outputs))
bert_preds = []
for bert_input, bert_output in zip(bert_inputs, bert_outputs):
bert_token, bert_pred = _unpad_bert_outputs(bert_input, bert_output)
bert_preds += bert_pred.tolist()
assert len(bert_preds) == len(set(mapping.values())), (len(bert_preds), len(set(mapping.values())))
preds = _backward_map(mapping, bert_preds)
assert len(text) == len(preds), (text, preds)
result = ""
for ch, pred in zip(text, preds):
result += ch
if pred >= threshold:
result += " "
return list(filter(None, result.split(" ")))
|
1694854
|
from denoiseg.models import DenoiSeg, DenoiSegConfig
from skimage import io
import csv
import numpy as np
import pickle
import os
from os.path import join, exists
from os import makedirs as mkdir
from denoiseg.utils.seg_utils import *
from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg
import argparse
import json
def main():
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
parser = argparse.ArgumentParser(description="Noise2Seg headless score-on-validation-data-script.")
parser.add_argument('--temp_conf')
args = parser.parse_args()
with open(args.temp_conf) as f:
conf = json.load(f)
# load data
trainval_data = np.load(conf['train_data_path'])
val_images = trainval_data['X_val'].astype(np.float32)
val_masks = trainval_data['Y_val']
print("Shape of val_images: ", val_images.shape, ", Shape of val_masks: ", val_masks.shape)
print("Validation Data \n..................")
X_val, Y_val_masks = val_images, val_masks
# one-hot-encoding
X_val = X_val[...,np.newaxis]
Y_val = convert_to_oneHot(Y_val_masks)
print("Shape of validation images: ", X_val.shape, ", Shape of validation masks: ", Y_val.shape)
# load model
n2s_model = DenoiSeg(None, conf['model_name'], conf['basedir'])
# compute AP results
ap_threshold, validation_ap_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_precision())
print("Average precision over all validation images at IOU = 0.5 with threshold = {}: ".format(ap_threshold), validation_ap_score)
# use ap-threshold to compute SEG-scores
predicted_ap_seg_images, ap_seg_result = n2s_model.predict_label_masks(val_images, Y_val_masks, ap_threshold,
measure=measure_seg())
print("SEG score over all validation images at IOU = 0.5 with ap-threshold = {}: ".format(ap_threshold), ap_seg_result)
# compute SEG results
seg_threshold, validation_seg_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_seg())
print("SEG over all validation images at IOU = 0.5 with threshold = {}: ".format(seg_threshold), validation_seg_score)
with open(join(conf['basedir'], "validation_scores.csv"), mode='w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['AP', validation_ap_score])
writer.writerow(['SEG', validation_seg_score])
writer.writerow(['SEG optimized for AP', ap_seg_result])
if __name__=="__main__":
main()
|
1694859
|
import torch.nn as nn
from torch_geometric.nn import RGCNConv, GraphConv
class GCN(nn.Module):
def __init__(self, g_dim, h1_dim, h2_dim, args):
super(GCN, self).__init__()
self.num_relations = 2 * args.n_speakers ** 2
self.conv1 = RGCNConv(g_dim, h1_dim, self.num_relations, num_bases=30)
self.conv2 = GraphConv(h1_dim, h2_dim)
def forward(self, node_features, edge_index, edge_norm, edge_type):
x = self.conv1(node_features, edge_index, edge_type, edge_norm=edge_norm)
x = self.conv2(x, edge_index)
return x
|
1694872
|
import os
def ctx_run(ctx, *args, **kwargs):
kwargs["pty"] = os.name == "posix"
return ctx.run(*args, **kwargs)
|
1695005
|
from collections import OrderedDict
import copy
from functools import reduce
from operator import mul, itemgetter
import torch
import pyro.util
import pyro.infer.autoguide.guides
import pyro.nn.module as pyromodule
def deep_hasattr(obj, name):
try:
pyro.util.deep_getattr(obj, name)
return True
except AttributeError:
return False
deep_setattr = pyro.infer.autoguide.guides._deep_setattr
deep_getattr = pyro.util.deep_getattr
def to_pyro_module_(m, name="", recurse=True):
"""
Same as `pyro.nn.modules.to_pyro_module_` except that it also accepts a name argument and returns the modified
module following the convention in pytorch for inplace functions.
"""
if not isinstance(m, torch.nn.Module):
raise TypeError("Expected an nn.Module instance but got a {}".format(type(m)))
if isinstance(m, pyromodule.PyroModule):
if recurse:
for name, value in list(m._modules.items()):
to_pyro_module_(value)
setattr(m, name, value)
return
# Change m's type in-place.
m.__class__ = pyromodule.PyroModule[m.__class__]
m._pyro_name = name
m._pyro_context = pyromodule._Context()
m._pyro_params = OrderedDict()
m._pyro_samples = OrderedDict()
# Reregister parameters and submodules.
for name, value in list(m._parameters.items()):
setattr(m, name, value)
for name, value in list(m._modules.items()):
if recurse:
to_pyro_module_(value)
setattr(m, name, value)
return m
def to_pyro_module(m, name="", recurse=True):
return to_pyro_module_(copy.deepcopy(m), name, recurse)
def named_pyro_samples(pyro_module, prefix='', recurse=True):
yield from pyro_module._named_members(lambda module: module._pyro_samples.items(), prefix=prefix, recurse=recurse)
def pyro_sample_sites(pyro_module, prefix='', recurse=True):
yield from map(itemgetter(0), named_pyro_samples(pyro_module, prefix=prefix, recurse=recurse))
def prod(iterable, initial_value=1):
return reduce(mul, iterable, initial_value)
def fan_in_fan_out(weight):
# this holds for linear and conv layers, but check e.g. transposed conv
fan_in = prod(weight.shape[1:])
fan_out = weight.shape[0]
return fan_in, fan_out
def calculate_prior_std(method, weight, gain=1., mode="fan_in"):
fan_in, fan_out = fan_in_fan_out(weight)
if method == "radford":
std = fan_in ** -0.5
elif method == "xavier":
std = gain * (2 / (fan_in + fan_out)) ** 0.5
elif method == "kaiming":
fan = fan_in if mode == "fan_in" else fan_out
std = gain * fan ** -0.5
else:
raise ValueError(f"Invalid method: '{method}'. Must be one of ('radford', 'xavier', 'kaiming'.")
return torch.tensor(std, device=weight.device)
|
1695022
|
from flask import url_for
from ward import test
from tests.fixtures import test_client
@test("GET /users")
def _(client=test_client):
resp = client.get(url_for("users.index", id=1))
assert resp.status_code == 200
assert "users" in resp.get_data(as_text=True)
@test("GET /users/1")
def _(client=test_client):
resp = client.get(url_for("users.show", id=1))
assert resp.status_code == 200
assert "user 1" in resp.get_data(as_text=True)
@test("GET /users/new")
def _(client=test_client):
resp = client.get(url_for("users.new"))
assert resp.status_code == 200
assert "form" in resp.get_data(as_text=True)
@test("POST /users")
def _(client=test_client):
resp = client.post(url_for("users.create"))
assert resp.status_code == 201
assert "user created" in resp.get_data(as_text=True)
@test("GET /users/1/edit")
def _(client=test_client):
resp = client.get(url_for("users.edit", id=1))
assert resp.status_code == 200
assert "form" in resp.get_data(as_text=True)
@test("PUT /users/1")
def _(client=test_client):
resp = client.put(url_for("users.update", id=1))
assert resp.status_code == 200
assert "updated user: 1" in resp.get_data(as_text=True)
@test("DELETE /users/1")
def _(client=test_client):
resp = client.delete(url_for("users.delete", id=1))
assert resp.status_code == 200
assert "delete user: 1" in resp.get_data(as_text=True)
|
1695065
|
from django.db import models
from django.db.models import permalink
from django.conf import settings
from basic.people.models import Person
class Genre(models.Model):
"""Genre model"""
title = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
class Meta:
db_table = 'movie_genres'
ordering = ('title',)
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('movie_genre_detail', None, { 'slug': self.slug })
class Studio(models.Model):
"""Studio model"""
title = models.CharField(max_length=100)
prefix = models.CharField(max_length=20, blank=True)
slug = models.SlugField(unique=True)
website = models.URLField(blank=True)
class Meta:
db_table = 'movie_studios'
ordering = ('title',)
def __unicode__(self):
return '%s' % self.full_title
@property
def full_title(self):
return '%s %s' % (self.prefix, self.title)
@permalink
def get_absolute_url(self):
return ('movie_studio_detail', None, { 'slug': self.slug })
class Movie(models.Model):
"""Movie model"""
title = models.CharField(max_length=255)
prefix = models.CharField(max_length=20, blank=True)
subtitle = models.CharField(blank=True, max_length=255)
slug = models.SlugField(unique=True)
directors = models.ManyToManyField(Person, limit_choices_to={'person_types__slug__exact': 'director'}, blank=True)
studio = models.ForeignKey(Studio, blank=True, null=True)
released = models.DateField(blank=True, null=True)
asin = models.CharField(blank=True, max_length=100)
cover = models.FileField(upload_to='films', blank=True)
review = models.TextField(blank=True)
genre = models.ManyToManyField(Genre, blank=True)
class Meta:
db_table = 'movies'
ordering = ('title',)
def __unicode__(self):
return '%s' % self.full_title
@property
def full_title(self):
return '%s %s' % (self.prefix, self.title)
@permalink
def get_absolute_url(self):
return ('movie_detail', None, { 'slug': self.slug })
@property
def amazon_url(self):
try:
return 'http://www.amazon.com/dp/%s/?%s' % (self.asin, settings.AMAZON_AFFILIATE_EXTENTION)
except:
return 'http://www.amazon.com/dp/%s/' % self.asin
@property
def cover_url(self):
return '%s%s' % (settings.MEDIA_URL, self.cover)
|
1695081
|
import cloudscraper
import asyncio
import aiohttp
from cloudscraper.exceptions import (
CloudflareCode1020,
CloudflareIUAMError,
CloudflareChallengeError,
CloudflareCaptchaProvider
)
import re
from copy import deepcopy
from urllib.parse import urlparse, urljoin
import sys
from cloudscraper.user_agent import User_Agent
from cloudscraper import CipherSuiteAdapter
import copyreg
import ssl
class AsyncCloudScraper(cloudscraper.CloudScraper):
def __init__(self, *args, **kwargs):
self.s = None
self.debug = kwargs.pop('debug', False)
self.delay = kwargs.pop('delay', None)
self.cipherSuite = kwargs.pop('cipherSuite', None)
self.ssl_context = kwargs.pop('ssl_context', None)
self.interpreter = kwargs.pop('interpreter', 'native')
self.recaptcha = kwargs.pop('recaptcha', {})
self.requestPreHook = kwargs.pop('requestPreHook', None)
self.requestPostHook = kwargs.pop('requestPostHook', None)
self.allow_brotli = kwargs.pop(
'allow_brotli',
True if 'brotli' in sys.modules.keys() else False
)
self.user_agent = User_Agent(
allow_brotli=self.allow_brotli,
browser=kwargs.pop('browser', None)
)
self._solveDepthCnt = 0
self.solveDepth = kwargs.pop('solveDepth', 3)
super(AsyncCloudScraper, self).__init__(*args, **kwargs)
# pylint: disable=E0203
if 'requests' in self.headers['User-Agent']:
# ------------------------------------------------------------------------------- #
# Set a random User-Agent if no custom User-Agent has been set
# ------------------------------------------------------------------------------- #
self.headers = self.user_agent.headers
if not self.cipherSuite:
self.cipherSuite = self.user_agent.cipherSuite
if isinstance(self.cipherSuite, list):
self.cipherSuite = ':'.join(self.cipherSuite)
self.mount(
'https://',
CipherSuiteAdapter(
cipherSuite=self.cipherSuite,
ssl_context=self.ssl_context
)
)
# purely to allow us to pickle dump
copyreg.pickle(ssl.SSLContext, lambda obj: (obj.__class__, (obj.protocol,)))
async def make_session_if_not_exists(self):
if not self.s:
self.s = aiohttp.ClientSession()
async def request(self, method, url, *args, **kwargs):
await self.make_session_if_not_exists()
# pylint: disable=E0203
if kwargs.get('proxies') and kwargs.get('proxies') != self.proxies:
self.proxies = kwargs.get('proxies')
# ------------------------------------------------------------------------------- #
# Pre-Hook the request via user defined function.
# ------------------------------------------------------------------------------- #
if self.requestPreHook:
(method, url, args, kwargs) = self.requestPreHook(
self,
method,
url,
*args,
**kwargs
)
# ------------------------------------------------------------------------------- #
# Make the request via aiohttp.
# ------------------------------------------------------------------------------- #
# response = await self.decodeBrotli(
# super(AsyncCloudScraper, self).request(method, url, *args, **kwargs)
# )
response = await self.s.request(method, url, *args, **kwargs)
# ------------------------------------------------------------------------------- #
# Debug the request via the Response object.
# ------------------------------------------------------------------------------- #
if self.debug:
self.debugRequest(response)
# ------------------------------------------------------------------------------- #
# Post-Hook the request aka Post-Hook the response via user defined function.
# ------------------------------------------------------------------------------- #
if self.requestPostHook:
response = self.requestPostHook(self, response)
if self.debug:
self.debugRequest(response)
# Check if Cloudflare anti-bot is on
if await self.is_Challenge_Request(response):
# ------------------------------------------------------------------------------- #
# Try to solve the challenge and send it back
# ------------------------------------------------------------------------------- #
if self._solveDepthCnt >= self.solveDepth:
_ = self._solveDepthCnt
self.simpleException(
cloudscraper.CloudflareLoopProtection,
"!!Loop Protection!! We have tried to solve {} time(s) in a row.".format(_)
)
self._solveDepthCnt += 1
response = await self.Challenge_Response(response, **kwargs)
else:
if response.status not in {429, 503}:
self._solveDepthCnt = 0
return response
async def Challenge_Response(self, resp, **kwargs):
if self.is_Captcha_Challenge(resp):
# ------------------------------------------------------------------------------- #
# double down on the request as some websites are only checking
# if cfuid is populated before issuing Captcha.
# ------------------------------------------------------------------------------- #
resp = self.decodeBrotli(
super(AsyncCloudScraper, self).request(resp.method, str(resp.url), **kwargs)
)
if not self.is_Captcha_Challenge(resp):
return resp
# ------------------------------------------------------------------------------- #
# if no Captcha provider raise a runtime error.
# ------------------------------------------------------------------------------- #
if not self.recaptcha or not isinstance(self.recaptcha, dict) or not self.recaptcha.get('provider'):
self.simpleException(
CloudflareCaptchaProvider,
"Cloudflare Captcha detected, unfortunately you haven't loaded an anti Captcha provider "
"correctly via the 'recaptcha' parameter."
)
# ------------------------------------------------------------------------------- #
# if provider is return_response, return the response without doing anything.
# ------------------------------------------------------------------------------- #
if self.recaptcha.get('provider') == 'return_response':
return resp
self.recaptcha['proxies'] = self.proxies
submit_url = self.Captcha_Challenge_Response(
self.recaptcha.get('provider'),
self.recaptcha,
await resp.text(),
str(resp.url)
)
else:
# ------------------------------------------------------------------------------- #
# Cloudflare requires a delay before solving the challenge
# ------------------------------------------------------------------------------- #
if not self.delay:
try:
delay = float(
re.search(
r'submit\(\);\r?\n\s*},\s*([0-9]+)',
await resp.text()
).group(1)
) / float(1000)
if isinstance(delay, (int, float)):
self.delay = delay
except (AttributeError, ValueError):
self.simpleException(
CloudflareIUAMError,
"Cloudflare IUAM possibility malformed, issue extracing delay value."
)
await asyncio.sleep(self.delay)
# ------------------------------------------------------------------------------- #
body = await resp.text()
submit_url = self.IUAM_Challenge_Response(
body,
str(resp.url),
self.interpreter
)
# ------------------------------------------------------------------------------- #
# Send the Challenge Response back to Cloudflare
# ------------------------------------------------------------------------------- #
if submit_url:
def updateAttr(obj, name, newValue):
try:
obj[name].update(newValue)
return obj[name]
except (AttributeError, KeyError):
obj[name] = {}
obj[name].update(newValue)
return obj[name]
cloudflare_kwargs = deepcopy(kwargs)
cloudflare_kwargs['allow_redirects'] = False
cloudflare_kwargs['data'] = updateAttr(
cloudflare_kwargs,
'data',
submit_url['data']
)
urlParsed = urlparse(str(resp.url))
cloudflare_kwargs['headers'] = updateAttr(
cloudflare_kwargs,
'headers',
{
'Origin': '{}://{}'.format(urlParsed.scheme, urlParsed.netloc),
'Referer': str(resp.url)
}
)
challengeSubmitResponse = await self.request(
'POST',
submit_url['url'],
**cloudflare_kwargs
)
# ------------------------------------------------------------------------------- #
# Return response if Cloudflare is doing content pass through instead of 3xx
# else request with redirect URL also handle protocol scheme change http -> https
# ------------------------------------------------------------------------------- #
if not str(challengeSubmitResponse.status)[0] == '3':
return challengeSubmitResponse
else:
cloudflare_kwargs = deepcopy(kwargs)
cloudflare_kwargs['headers'] = updateAttr(
cloudflare_kwargs,
'headers',
{'Referer': str(challengeSubmitResponse.url)}
)
if not urlparse(challengeSubmitResponse.headers['Location']).netloc:
redirect_location = urljoin(
str(challengeSubmitResponse.url),
challengeSubmitResponse.headers['Location']
)
else:
redirect_location = challengeSubmitResponse.headers['Location']
return await self.request(
resp.method,
redirect_location,
**cloudflare_kwargs
)
# ------------------------------------------------------------------------------- #
# We shouldn't be here...
# Re-request the original query and/or process again....
# ------------------------------------------------------------------------------- #
return await self.request(resp.method, str(resp.url), **kwargs)
# ------------------------------------------------------------------------------- #
async def is_Challenge_Request(self, resp):
if self.is_Firewall_Blocked(resp):
self.simpleException(
CloudflareCode1020,
'Cloudflare has blocked this request (Code 1020 Detected).'
)
if self.is_New_IUAM_Challenge(resp):
self.simpleException(
CloudflareChallengeError,
'Detected the new Cloudflare challenge.'
)
if self.is_Captcha_Challenge(resp) or await self.is_IUAM_Challenge(resp):
if self.debug:
print('Detected Challenge.')
return True
return False
@staticmethod
async def is_IUAM_Challenge(resp):
try:
return (
resp.headers.get('Server', '').startswith('cloudflare')
and resp.status in [429, 503]
and re.search(
r'<form .*?="challenge-form" action="/.*?__cf_chl_jschl_tk__=\S+"',
await resp.text(),
re.M | re.S
)
)
except AttributeError:
pass
return False
|
1695107
|
import numpy as np
import numpy.testing as npt
from stumpy import stamp, core
import pytest
import naive
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
substitution_values = [np.nan, np.inf]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_mass_PI(T_A, T_B):
m = 3
trivial_idx = 2
zone = int(np.ceil(m / 2))
Q = T_B[trivial_idx : trivial_idx + m]
M_T, Σ_T = core.compute_mean_std(T_B, m)
ref_P, ref_I, ref_left_I, ref_right_I = naive.mass(
Q, T_B, m, trivial_idx=trivial_idx, excl_zone=zone, ignore_trivial=True
)
comp_P, comp_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone
)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
comp_left_P, comp_left_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone, left=True
)
npt.assert_almost_equal(ref_left_I, comp_left_I)
comp_right_P, comp_right_I = stamp._mass_PI(
Q, T_B, M_T, Σ_T, trivial_idx=trivial_idx, excl_zone=zone, right=True
)
npt.assert_almost_equal(ref_right_I, comp_right_I)
def test_stamp_int_input():
with pytest.raises(TypeError):
T = np.arange(10)
stamp(T, T, 5, ignore_trivial=True)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T_B, T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stamp_A_B_join(T_A, T_B):
m = 3
ref_mp = naive.stamp(T_A, m, T_B=T_B)
comp_mp = stamp.stamp(T_A, T_B, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_stamp_nan_inf_self_join(T_A, T_B, substitute_B, substitution_locations):
m = 3
T_B_sub = T_B.copy()
for substitution_location_B in substitution_locations:
T_B_sub[:] = T_B[:]
T_B_sub[substitution_location_B] = substitute_B
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T_B_sub, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T_B_sub, T_B_sub, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_A", substitution_values)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_stamp_nan_inf_A_B_join(
T_A, T_B, substitute_A, substitute_B, substitution_locations
):
m = 3
T_A_sub = T_A.copy()
T_B_sub = T_B.copy()
for substitution_location_B in substitution_locations:
for substitution_location_A in substitution_locations:
T_A_sub[:] = T_A[:]
T_B_sub[:] = T_B[:]
T_A_sub[substitution_location_A] = substitute_A
T_B_sub[substitution_location_B] = substitute_B
ref_mp = naive.stamp(T_A_sub, m, T_B=T_B_sub)
comp_mp = stamp.stamp(T_A_sub, T_B_sub, m)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
def test_stamp_nan_zero_mean_self_join():
T = np.array([-1, 0, 1, np.inf, 1, 0, -1])
m = 3
zone = int(np.ceil(m / 2))
ref_mp = naive.stamp(T, m, exclusion_zone=zone)
comp_mp = stamp.stamp(T, T, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, :2], comp_mp)
|
1695146
|
from ..theming.Theme import Theme
from ..theming.SimpleColorScale import SimpleColorScale
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import numpy as np
def figure(values, theme: Theme, color_scale: SimpleColorScale):
""" Show a map of convolutional filters """
if values.shape[2] < 12:
num_maps = values.shape[2]
title = 'Filters'
values_to_show = values
titles = [str(i) for i in range(num_maps)]
else:
num_maps = 12
title = 'Filters (top %d out of %d)' % (num_maps, values.shape[2])
values_var = np.var(values.reshape(-1, values.shape[2]), axis=0)
topn_idx = np.argpartition(values_var, -num_maps, axis=0)[-1:-(num_maps+1):-1]
values_to_show = values[:, :, topn_idx]
titles = [str(i) for i in topn_idx]
# Number of columns on map depending on the kernel size
if values_to_show.shape[1] < 4:
num_cols = 3
elif values_to_show.shape[1] < 6:
num_cols = 2
else:
num_cols = 1
num_rows = max(num_cols, int(np.ceil(num_maps / num_cols)))
fig = make_subplots(rows=num_rows, cols=num_cols, subplot_titles=titles,
shared_xaxes=True, shared_yaxes=True,
horizontal_spacing=0.02, vertical_spacing=0.06)
# Draw filters as subplots
for i in range(num_maps):
fig.add_trace(go.Heatmap(z=values_to_show[:, :, i], coloraxis="coloraxis"),
row=(i // num_cols) + 1, col=(i % num_cols) + 1)
fig.update_layout(margin=theme.bottom_figure_margins,
title=dict(text=title, font=dict(size=14)),
coloraxis=color_scale.as_dict(),
template=theme.plotly,
font=dict(size=12))
return fig
|
1695152
|
from __future__ import print_function
import keras.backend as K
import keras.losses as losses
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, RepeatVector, Reshape
from keras.layers.embeddings import Embedding
from keras.layers.merge import Concatenate, Multiply
from keras.losses import binary_crossentropy
from keras.models import Model, Sequential
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import Adam
from matplotlib import pyplot as plt
from .callbacks import *
from .pretrain_image_gan import *
from .planner import *
class ConditionalImageGan(PretrainImageGan):
'''
Version of the sampler that only produces results conditioned on a
particular action; this version does not bother trying to learn a separate
distribution for each possible state.
This one generates:
- image
- arm command
- gripper command
'''
def __init__(self, *args, **kwargs):
'''
As in the other models, we call super() to parse arguments from the
command line and set things like our optimizer and learning rate.
Parameters:
-----------
taskdef: definition of the problem used to create a task model
'''
super(ConditionalImageGan, self).__init__(*args, **kwargs)
self.PredictorCb = ImageWithFirstCb
self.rep_size = 256
self.num_transforms = 3
self.do_all = True
self.save_encoder_decoder = self.retrain
self.noise_iters = 2
def _makePredictor(self, features):
# =====================================================================
# Create many different image decoders
(images, arm, gripper) = features
img_shape, image_size, arm_size, gripper_size = self._sizes(
images,
arm,
gripper)
# =====================================================================
# Load the image decoders
img_in = Input(img_shape,name="predictor_img_in")
img0_in = Input(img_shape,name="predictor_img0_in")
arm_in = Input((arm_size,))
gripper_in = Input((gripper_size,))
arm_gripper = Concatenate()([arm_in, gripper_in])
label_in = Input((1,))
next_option_in = Input((1,), name="next_option_in")
next_option2_in = Input((1,), name="next_option2_in")
ins = [img0_in, img_in, next_option_in, next_option2_in]
encoder = self._makeImageEncoder(img_shape, perm_drop=True)
decoder = self._makeImageDecoder(self.hidden_shape, perm_drop=True)
LoadEncoderWeights(self, encoder, decoder, gan=True)
# create input for controlling noise output if that's what we decide
# that we want to do
if self.use_noise:
z1 = Input((self.noise_dim,), name="z1_in")
z2 = Input((self.noise_dim,), name="z2_in")
ins += [z1, z2]
h = encoder([img0_in, img_in])
# =====================================================================
# Actually get the right outputs
y = Flatten()(OneHot(self.num_options)(next_option_in))
y2 = Flatten()(OneHot(self.num_options)(next_option2_in))
x = h
tform = self._makeTransform(perm_drop=True)
l = [h, y, z1] if self.use_noise else [h, y]
x = tform(l)
l = [x, y2, z2] if self.use_noise else [x, y2]
x2 = tform(l)
image_out, image_out2 = decoder([x]), decoder([x2])
# =====================================================================
# Save
self.transform_model = tform
# =====================================================================
# Make the discriminator
image_discriminator = self._makeImageDiscriminator(img_shape)
self.discriminator = image_discriminator
image_discriminator.trainable = False
is_fake = image_discriminator([
img0_in, img_in,
next_option_in, next_option2_in,
image_out, image_out2])
# =====================================================================
# Create generator model to train
lfn = self.loss
predictor = Model(ins, [image_out, image_out2])
predictor.compile(
loss=[lfn, lfn], # ignored since we don't train G
optimizer=self.getOptimizer())
self.generator = predictor
# =====================================================================
# And adversarial model
loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
weights = [0.1, 0.1, 1.] if self.use_wasserstein else [100., 100., 1.]
model = Model(ins, [image_out, image_out2, is_fake])
model.compile(
loss=['mae', 'mae', loss],
loss_weights=weights,
optimizer=self.getOptimizer())
self.model = model
self.discriminator.summary()
self.model.summary()
return predictor, model, model, ins, h
def _getData(self, *args, **kwargs):
features, targets = GetAllMultiData(self.num_options, *args, **kwargs)
[I, q, g, oin, label, q_target, g_target,] = features
tt, o1, v, qa, ga, I_target = targets
# Create the next image including input image
I0 = I[0,:,:,:]
length = I.shape[0]
I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])
# Extract the next goal
I_target2, o2 = GetNextGoal(I_target, o1)
if not self.validate:
return [I0, I, o1, o2], [ I_target, I_target2 ]
else:
features = [I0, I, o1, o2, oin]
o1_1h = ToOneHot(o1, self.num_options)
o2_1h = ToOneHot(o2, self.num_options)
return (features,
[I_target, I_target2, o1_1h, v, qa, ga, o2_1h])
def _makeImageDiscriminator(self, img_shape):
'''
create image-only encoder to extract keypoints from the scene.
Params:
-------
img_shape: shape of the image to encode
'''
img0 = Input(img_shape,name="img0_encoder_in")
img = Input(img_shape,name="img_encoder_in")
img_goal = Input(img_shape,name="goal_encoder_in")
img_goal2 = Input(img_shape,name="goal2_encoder_in")
option = Input((1,),name="disc_options")
option2 = Input((1,),name="disc2_options")
ins = [img0, img, option, option2, img_goal, img_goal2]
dr = self.dropout_rate
# common arguments
kwargs = { "dropout_rate" : dr,
"padding" : "same",
"lrelu" : True,
"bn" : False,
"perm_drop" : True,
}
x0 = AddConv2D(img0, 64, [4,4], 1, **kwargs)
xobs = AddConv2D(img, 64, [4,4], 1, **kwargs)
xg1 = AddConv2D(img_goal, 64, [4,4], 1, **kwargs)
xg2 = AddConv2D(img_goal2, 64, [4,4], 1, **kwargs)
#x1 = Add()([x0, xobs, xg1])
#x2 = Add()([x0, xg1, xg2])
x1 = Add()([xobs, xg1])
x2 = Add()([xg1, xg2])
# -------------------------------------------------------------
y = OneHot(self.num_options)(option)
y = AddDense(y, 64, "lrelu", dr, perm_drop=True)
x1 = TileOnto(x1, y, 64, (64,64), add=True)
x1 = AddConv2D(x1, 64, [4,4], 2, **kwargs)
# -------------------------------------------------------------
y = OneHot(self.num_options)(option2)
y = AddDense(y, 64, "lrelu", dr, perm_drop=True)
x2 = TileOnto(x2, y, 64, (64,64), add=True)
x2 = AddConv2D(x2, 64, [4,4], 2, **kwargs)
#x = Concatenate()([x1, x2])
x = x2
x = AddConv2D(x, 128, [4,4], 2, **kwargs)
x = AddConv2D(x, 256, [4,4], 2, **kwargs)
if self.use_wasserstein:
x = Flatten()(x)
x = AddDense(x, 1, "linear", 0., output=True, bn=False)
else:
x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
bn=False)
x = GlobalAveragePooling2D()(x)
#x = Flatten()(x)
#x = AddDense(x, 1, "sigmoid", 0., output=True, bn=False, perm_drop=True)
discrim = Model(ins, x, name="image_discriminator")
self.lr *= 2.
loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
discrim.compile(loss=loss, optimizer=self.getOptimizer())
self.lr *= 0.5
self.image_discriminator = discrim
return discrim
|
1695155
|
from copy import copy
from dataclasses import dataclass, fields, is_dataclass
from typing import TypeVar, Any, Generic, List, Optional, Tuple, Union, cast
T = TypeVar("T")
class Empty:
def __repr__(self) -> str:
return "<empty>"
empty = Empty()
class ImmerframeError(RuntimeError):
pass
class NoAttributeToCallError(ImmerframeError):
pass
class ProduceError(ImmerframeError):
pass
class HandleTypeError(ImmerframeError):
pass
@dataclass(frozen=True)
class El:
type: str # getattr|getitem|setattr|call
key: Any = empty
value: Any = empty
args: Any = empty
kwargs: Any = empty
class Path(List[El]):
def __init__(self) -> None:
self.op: Union[str, Empty] = empty
self.other: Any = empty
super().__init__()
class Proxy(Generic[T]):
def __init__(self, value: T = None) -> None:
self._value = value
self._return_value: T = empty
if value is not None:
self._return_value = copy(self._value)
self._paths: List[Path] = []
self._current_path = Path()
def __repr__(self) -> str:
return f"<Proxy of: {self._value}>"
def __enter__(self) -> Tuple[T, T]: # the typing here is a lie on-purpose
return cast(T, self), self._return_value
def __exit__(self, type, value, tb):
final_value = produce(self)
v = self._return_value
if isinstance(v, list):
v.clear()
v.extend(final_value)
elif isinstance(v, (dict, set)):
v.clear()
v.update(final_value)
elif is_dataclass(v):
for field in fields(v):
value = getattr(final_value, field.name)
setattr(v, field.name, value)
else: # assume attrs
for field in attr.fields(v.__class__):
value = getattr(final_value, field.name)
setattr(v, field.name, value)
def _terminate_current_path(self) -> None:
self._paths.append(self._current_path)
self._current_path = Path()
def __getattr__(self, key: str) -> "Proxy":
self._current_path.append(El(type="getattr", key=key))
return self
def __getitem__(self, key: Any) -> "Proxy":
self._current_path.append(El(type="getitem", key=key))
return self
def __setattr__(self, key: str, value: Any) -> None:
if key in {
"_value",
"_return_value",
"_paths",
"_current_path",
"_terminate_current_path",
}:
self.__dict__[key] = value
return
self._current_path.append(El(type="getattr", key=key))
self._current_path.append(El(type="setattr", value=value))
self._terminate_current_path()
def __setitem__(self, key: Any, value: Any) -> None:
self._current_path.append(El(type="getitem", key=key))
self._current_path.append(El(type="setitem", value=value))
self._terminate_current_path()
def __call__(self, *args: Any, **kwargs: Any) -> None:
if not self._current_path:
raise NoAttributeToCallError("cannot call an unmodified Proxy object")
prev_path = self._current_path.pop()
if prev_path.type != "getattr":
raise NoAttributeToCallError("can only call methods on known attributes")
el = El(type="call", key=prev_path.key, args=args, kwargs=kwargs)
self._current_path.append(el)
self._terminate_current_path()
# TODO: fill in all the magic methods
def __add__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__add__"
self._current_path.other = other
def __sub__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__sub__"
self._current_path.other = other
def __mul__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__mul__"
self._current_path.other = other
def __truediv__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__truediv__"
self._current_path.other = other
def _safe_getitem(obj: Any, key: Any) -> Any:
try:
return obj[key]
except (KeyError, IndexError):
return empty
def _get(obj: Any, el: El) -> Any:
gets = {"getattr": getattr, "getitem": _safe_getitem}
return gets[el.type](obj, el.key)
def produce(proxy: Proxy, obj: Optional[T] = None) -> T:
if obj is None:
obj = proxy._value
for path_ in proxy._paths:
op, other = path_.op, path_.other
*path, final = path_
chain = [obj]
for el in path:
*_, tip = chain
chain.append(_get(tip, el))
tip = chain.pop()
if final.type in {"setattr", "setitem"}:
if op is empty:
value = final.value
else:
value = getattr(tip, op)(other)
elif final.type == "call":
# shallow copy, then run whatever mutatey function
value = copy(tip)
getattr(value, final.key)(*final.args, **final.kwargs)
else:
raise ProduceError("final path appears no have no effect")
for inner_obj, el in reversed(list(zip(chain, path))):
value = _copy_and_set(inner_obj, el, value)
obj = value
return obj
def _copy_and_set(obj: T, el: El, value: Any) -> T:
new = copy(obj)
if isinstance(obj, (dict, list)):
new[el.key] = value
return new
setattr(new, el.key, value)
return new
|
1695209
|
from dataclasses import dataclass
from imix.utils.registry import Registry
from .default import root_path
import os
MODEL_CFG = Registry('model_cfg')
@dataclass
class ModelConfig:
model: dict
task: list
@dataclass
@MODEL_CFG.register_module()
class LXMERT(ModelConfig):
model = dict(type='LxmertBot', weight=os.path.join(root_path, 'model_pth/lxmert_vqa.pth'))
task = [
dict(type='vqa', answer_table='/home/datasets/mix_data/lxmert/vqa/trainval_label2ans.json'),
]
|
1695210
|
from amadeus.client.decorator import Decorator
class FlightAvailabilities(Decorator, object):
def post(self, body):
'''
Get available seats in different fare classes
.. code-block:: python
amadeus.shopping.availability.flight_availabilities.post(body)
:param body: the parameters to send to the API
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.post(
'/v1/shopping/availability/flight-availabilities', body)
|
1695250
|
from copy import deepcopy
class callback(object):
def __init__(self, var_names):
self.store = {v: [] for v in var_names}
def __call__(self, params):
for p in self.store.keys():
if p in params.keys():
self.store[p].append(deepcopy(params[p]))
def __getitem__(self, key):
return self.store[key]
def get_keys(self):
return self.store.keys()
|
1695280
|
from flask import Flask
from app.routes.routes import blueprint
from app.auth.auth import auth_blueprint
from app.fine_tune.fine_tune import fine_tune_blueprint
from app.select_tracks.select_tracks import select_blueprint
from app.result.result import result_blueprint
from app.home.home import home_blueprint
from app.loading.loading import loading_blueprint
from app.error.error import error_blueprint
def create_app():
"""
Creating and returning the app
"""
app = Flask(__name__)
app.config['SECRET_KEY'] = 'JUSTARANDOMKEY'
app.register_blueprint(blueprint)
app.register_blueprint(home_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(fine_tune_blueprint)
app.register_blueprint(select_blueprint)
app.register_blueprint(result_blueprint)
app.register_blueprint(loading_blueprint)
app.register_blueprint(error_blueprint)
return app
|
1695283
|
import codecs
import sys
from antlr4.InputStream import InputStream
class StdinStream(InputStream):
def __init__(self, encoding:str='ascii', errors:str='strict') -> None:
bytes = sys.stdin.buffer.read()
data = codecs.decode(bytes, encoding, errors)
super().__init__(data)
|
1695284
|
import json
import oandapyV20
import oandapyV20.endpoints.accounts as accounts
import oandapyV20.endpoints.instruments as instruments
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.positions as positions
import oandapyV20.endpoints.pricing as pricing
import oandapyV20.endpoints.trades as trades
import oandapyV20.endpoints.transactions as transactions
from oandapyV20.contrib.requests import (LimitOrderRequest, MarketOrderRequest,
MITOrderRequest, StopLossDetails,
StopLossOrderRequest,
StopOrderRequest, TakeProfitDetails,
TakeProfitOrderRequest,
TrailingStopLossDetails,
TrailingStopLossOrderRequest)
from oandapyV20.exceptions import StreamTerminated
from retry import retry
class OandaAPI(object):
def __init__(self, accountID, access_token):
self.access_token = access_token
self.accountID = accountID
self.client = oandapyV20.API(access_token=access_token)
######################### Account #########################
@retry(tries=20, delay=0.1)
def get_accountID(self, access_token):
return self.accountID
@retry(tries=20, delay=0.1)
def get_AccountDetails(self):
r = accounts.AccountDetails(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_AccountSummary(self):
r = accounts.AccountSummary(accountID=self.accountID)
return self.client.request(r)
######################### Order #########################
@retry(tries=20, delay=0.1)
def get_OrderList(self, ticker):
"""
可以获得特定ticker的 Pending Order
"""
r = orders.OrderList(accountID=self.accountID,
params={"instrument": ticker})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_OrdersPending(self):
r = orders.OrdersPending(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def OrderCreate_mkt(self, ticker, size, takeprofit=None, stoploss=None,
trailingstop=None):
"""
建立市价单
requesttype:
MarketOrder
"""
d = dict(instrument=ticker, units=size)
if takeprofit:
d['takeProfitOnFill'] = TakeProfitDetails(price=takeprofit).data
if stoploss:
d['stopLossOnFill'] = StopLossDetails(price=stoploss).data
if trailingstop:
d['trailingStopLossOnFill'] = TrailingStopLossDetails(
distance=trailingstop).data
Order = MarketOrderRequest(**d).data
r = orders.OrderCreate(accountID=self.accountID, data=Order)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def OrderCreate_pending(self, ticker, size, price, takeprofit=None,
stoploss=None, trailingstop=None,
requesttype='MarketIfTouchedOrder'):
"""
建立挂单
requesttype:
LimitOrder, StopOrder, MarketIfTouchedOrder,
"""
d = dict(instrument=ticker, units=size, price=price)
if takeprofit:
d['takeProfitOnFill'] = TakeProfitDetails(price=takeprofit).data
if stoploss:
d['stopLossOnFill'] = StopLossDetails(price=stoploss).data
if trailingstop:
d['trailingStopLossOnFill'] = TrailingStopLossDetails(
distance=trailingstop).data
if requesttype is 'MarketIfTouchedOrder':
Order = MITOrderRequest(**d).data
elif requesttype is 'LimitOrder':
Order = LimitOrderRequest(**d).data
elif requesttype is 'StopOrder':
Order = StopOrderRequest(**d).data
r = orders.OrderCreate(accountID=self.accountID, data=Order)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def cancel_all_OrdersPending(self, ordertype, long_short=None):
"""
撤销全部挂单
ordertype: LIMIT,STOP,MARKET_IF_TOUCHED,
buy_sell: LONG, SHORT
"""
rv = self.get_OrdersPending()
rv = [dict(id=i.get('id'), units=i.get('units'))
for i in rv['orders'] if i['type'] in ordertype and i.get('units')]
if long_short is 'LONG':
idsToCancel = [order.get('id') for order in rv
if float(order['units']) > 0]
elif long_short is 'SHORT':
idsToCancel = [order.get('id') for order in rv
if float(order['units']) < 0]
elif long_short is None:
idsToCancel = [order.get('id') for order in rv]
for orderID in idsToCancel:
r = orders.OrderCancel(accountID=self.accountID, orderID=orderID)
rv = self.client.request(r)
@retry(tries=20, delay=0.1)
def cancel_all_TSTOrder(self, ticker, ordertype):
"""
撤销全部 止盈, 止损, 追踪止损
ordertype: TAKE_PROFIT, STOP_LOSS, TRAILING_STOP_LOSS
"""
rv = self.get_OrderList(ticker)
idsToCancel = [order.get('id')
for order in rv['orders'] if order['type'] in ordertype]
for orderID in idsToCancel:
r = orders.OrderCancel(accountID=self.accountID, orderID=orderID)
rv = self.client.request(r)
# def OrderCreate_TakeProfit(self,ticker,long_short,price):
# """
# 为所有单添加止盈,但是若止盈已经存在则会报错,此函数暂时不用
# long_short: LONG, SHORT
# """
# rv = self.get_tradeslist(ticker)
#
# if long_short is 'LONG':
# idsToCreate = [trade.get('id') for trade in rv['trades']
# if float(trade['currentUnits']) > 0]
# elif long_short is 'SHORT':
# idsToCreate = [trade.get('id') for trade in rv['trades']
# if float(trade['currentUnits']) < 0]
# for tradeID in idsToCreate:
# Order = TakeProfitOrderRequest(tradeID=tradeID,price=price).data
# r = orders.OrderCreate(accountID = self.accountID, data=Order)
# rv = self.client.request(r)
#
######################### Trades #########################
@retry(tries=20, delay=0.1)
def get_all_open_trades(self):
r = trades.OpenTrades(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_tradeslist(self, ticker):
r = trades.TradesList(accountID=self.accountID, params={
'instrument': ticker})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_trade_details(self, tradeID):
r = trades.TradeDetails(accountID=self.accountID, tradeID=tradeID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def Exitall_trades(self):
rv = self.get_all_open_trades()
idsToClose = [trade.get('id') for trade in rv['trades']]
for tradeID in idsToClose:
r = trades.TradeClose(accountID=self.accountID, tradeID=tradeID)
self.client.request(r)
######################### Positions #########################
@retry(tries=20, delay=0.1)
def close_all_position(self, ticker, closetype='long'):
"""
closetype: long, short
"""
if closetype is 'long':
d = dict(longUnits='ALL')
elif closetype is 'short':
d = dict(shortUnits='ALL')
r = positions.PositionClose(accountID=self.accountID,
instrument=ticker,
data=d)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_positions(self):
r = positions.OpenPositions(accountID=self.accountID)
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_tickstream(self, ticker):
r = pricing.PricingStream(accountID=self.accountID, params={
"instruments": ticker})
n = 0
# let's terminate after receiving 3 ticks
stopAfter = 999999999999999999999999999
try:
# the stream requests returns a generator so we can do ...
for tick in self.client.request(r):
print(json.dumps(tick, indent=2))
if n >= stopAfter:
r.terminate()
n += 1
except StreamTerminated as err:
print(
"Stream processing ended because we made it stop after {} ticks".format(n))
######################### Transactions #########################
@retry(tries=20, delay=0.1)
def get_TransactionsSinceID(self, transactionID):
"""TransactionsSinceID.
Get a range of Transactions for an Account starting at (but not including)
a provided Transaction ID.
"""
r = transactions.TransactionsSinceID(accountID=self.accountID,
params={'id': transactionID})
return self.client.request(r)
@retry(tries=20, delay=0.1)
def get_TransactionDetails(self, transactionID):
r = transactions.TransactionDetails(accountID=self.accountID,
transactionID=transactionID)
return self.client.request(r)
######################### ticker #########################
@retry(tries=20, delay=0.1)
def get_candlestick_list(self, ticker, granularity, count=50,
fromdate=None, todate=None, price='M',
smooth=False, includeFirst=None):
"""
See http://developer.oanda.com/rest-live-v20/instrument-ep/
date: 'YYYY-MM-DDTHH-mm:ssZ'
instrument: Name of the Instrument [required]
price: str, 'M' or 'B' or 'A'
granularity: (S5, S10, S30, M1, M2, M4, M5) <- BAD Interval
M10, M15, M30
H1, H2, H3, H4, H6, H8, H12,
D, W, M
count: number of candle, default=50, maximum=5000
fromdate: format '2017-01-01'
todate: format '2017-01-01'
smooth: A smoothed candlestick uses the previous candle’s close
price as its open price, while an unsmoothed candlestick
uses the first price from its time range as its open price.
includeFirst: A flag that controls whether the candlestick that
is covered by the from time should be included
in the results.
"""
params = dict(granularity=granularity,
count=count,
price=price,
smooth=smooth,
includeFirst=includeFirst)
if fromdate:
# fromdate += 'T00:00:00Z'
params.update({'from': fromdate})
if todate:
# todate += 'T00:00:00Z'
params.update({'to': todate})
r = instruments.InstrumentsCandles(instrument=ticker,
params=params)
return self.client.request(r)
# 其他
@retry(tries=20, delay=0.01)
def get_pricinginfo(self, ticker):
r = pricing.PricingInfo(accountID=self.accountID,
params={"instruments": ticker})
return self.client.request(r)
if __name__ == "__main__":
from oandakey import access_token, accountID
from OnePy.utils.awesome_func import run_multithreading
oanda = OandaAPI(accountID, access_token)
instrument = "EUR_USD"
#
n = 0
# for i in range(200):
# n += 1
# print(n)
# data = oanda.OrderCreate_mkt('EUR_USD', 100)
def submit(a):
data = oanda.OrderCreate_mkt('EUR_USD', 100)
# run_multithreading(submit, [i for i in range(100)], 100)
# data = oanda.close_all_position('EUR_USD', 'long')
# data = oanda.Exitall_trades()
# data = oanda.OrderCreate_mkt('EUR_USD', -100, takeprofit=1.4)
data = oanda.OrderCreate_mkt('EUR_USD', 100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD',100)
# data = oanda.OrderCreate_mkt('EUR_USD', 10, trailingstop=0.002)
#
# data = oanda.OrderCreate_mkt('EUR_GBP',-10)
# data = oanda.OrderCreate_mkt('USD_JPY', 10)
# data = oanda.OrderCreate_pending('EUR_USD',200,1.0,requesttype='LimitOrder')
# data = oanda.OrderCreate_pending(
# 'EUR_USD', 200, 1.2, trailingstop=1, takeprofit=1.3, requesttype='StopOrder')
# data = oanda.cancel_all_OrdersPending('MARKET_IF_TOUCHED', 'LONG')
# data = oanda.cancel_all_OrdersPending('MARKET_IF_TOUCHED', 'SHORT')
# data = oanda.cancel_all_OrdersPending('LIMIT', 'LONG')
# data = oanda.cancel_all_OrdersPending('LIMIT', 'SHORT')
# data = oanda.cancel_all_OrdersPending('STOP', 'SHORT')
# data = oanda.cancel_all_OrdersPending('STOP', 'LONG')
# data = oanda.cancel_all_TSTOrder('EUR_USD', 'TAKE_PROFIT')
# data = oanda.get_OrdersPending()
# oanda.get_tickstream([instrument])
# data = oanda.get_candlestick_list(
# 'EUR_USD', 'S5', count=5, fromdate="2015-01-08T07:00:00Z")
# data = oanda.get_tradeslist(instrument)
# data = oanda.get_AccountDetails()
# print(data)
# data = oanda.get_AccountSummary()
# data = oanda.close_all_position('EUR_USD','long')
# data= oanda.get_TransactionsSinceID(1)
# data = oanda.get_positions()
# print(json.dumps(data['candles'], indent=2))
# print(len(data['positions'][0]['long']['tradeIDs'])) # 获得订单数
# Check interval test
# from datetime import timedelta
# def check_candles_interval(data, interval):
# bar_list = []
# for i in range(len(data['candles'])):
# gg = arrow.get(data['candles'][i+1]['time']) - \
# arrow.get(data['candles'][i]['time'])
# if gg != timedelta(interval/60/60/24) and gg != timedelta(interval/60/60/24 + 2):
# print(gg)
# print(data['candles'][i+1]['time'])
# print(arrow.get(data['candles'][i]['time']))
# # check_candles_interval(data,30)
print(json.dumps(data, indent=2))
|
1695306
|
from .hred import HRED
from .cvae import CVAE
from .seq2seq import Seq2Seq
from .seq2seq_attention import Seq2SeqAttention
from .transformer import Transformer
|
1695327
|
import csv
import glob
import os
from collections import namedtuple
Split = namedtuple('Split', ['filename', 'weight'])
def write_list(out_filename, dataset):
with open(out_filename, 'w') as fout:
for line in dataset:
fout.write(line)
fout.write("\n")
def write_splits(out_directory, snippets, splits):
total_weight = sum(split.weight for split in splits)
divs = []
subtotal = 0.0
for split in splits:
divs.append(int(len(snippets) * subtotal / total_weight))
subtotal = subtotal + split.weight
divs.append(len(snippets))
for i, split in enumerate(splits):
filename = os.path.join(out_directory, split.filename)
print("Writing {}:{} to {}".format(divs[i], divs[i+1], filename))
write_list(filename, snippets[divs[i]:divs[i+1]])
def clean_tokenized_tweet(line):
line = list(line)
if len(line) > 3 and line[0] == 'RT' and line[1][0] == '@' and line[2] == ':':
line = line[3:]
elif len(line) > 4 and line[0] == 'RT' and line[1] == '@' and line[3] == ':':
line = line[4:]
elif line[0][0] == '@':
line = line[1:]
for i in range(len(line)):
if line[i][0] == '@' or line[i][0] == '#':
line[i] = line[i][1:]
if line[i].startswith("http:") or line[i].startswith("https:"):
line[i] = ' '
return line
def get_scare_snippets(nlp, csv_dir_path, text_id_map, filename_pattern="*.csv"):
num_short_items = 0
snippets = []
csv_files = glob.glob(os.path.join(csv_dir_path, filename_pattern))
for csv_filename in csv_files:
with open(csv_filename, newline='') as fin:
cin = csv.reader(fin, delimiter='\t', quotechar='"')
lines = list(cin)
for line in lines:
ann_id, begin, end, sentiment = [line[i] for i in [1, 2, 3, 6]]
begin = int(begin)
end = int(end)
if sentiment.lower() == 'unknown':
continue
elif sentiment.lower() == 'positive':
sentiment = 2
elif sentiment.lower() == 'neutral':
sentiment = 1
elif sentiment.lower() == 'negative':
sentiment = 0
else:
raise ValueError("Tell John he screwed up and this is why he can't have Mox Opal: {}".format(sentiment))
if ann_id not in text_id_map:
print("Found snippet which can't be found: {}-{}".format(csv_filename, ann_id))
continue
snippet = text_id_map[ann_id][begin:end]
doc = nlp(snippet)
text = " ".join(" ".join(token.text for token in sentence.tokens) for sentence in doc.sentences)
num_tokens = sum(len(sentence.tokens) for sentence in doc.sentences)
if num_tokens < 4:
num_short_items = num_short_items + 1
snippets.append("%d %s" % (sentiment, text))
print("Number of short items: {}".format(num_short_items))
return snippets
|
1695329
|
import os
import sys
import copy
import logging
from checker import *
from .ofp import register_ofp_creators
from .ofp import OfpBase
from .ofp_match import SCE_MATCH
from .ofp_match import OfpMatchCreator
# YAML:
# flow_removed:
# cookie: 0
# priority: 0
# reason: 0
# table_id: 0
# idle_timeout: 0
# hard_timeout: 0
# packet_count: 0
# byte_count: 0
# match:
# in_port: 1
# eth_dst: "ff:ff:ff:ff:ff:ff"
SCE_FLOW_REMOVED = "flow_removed"
SCE_FLOW_STATS_BODY = "body"
@register_ofp_creators(SCE_FLOW_REMOVED)
class OfpFlowRemovedCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# FlowRemoved.
kws = copy.deepcopy(params)
# match.
match = None
if SCE_MATCH in params:
match = OfpMatchCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_MATCH])
kws[SCE_MATCH] = match
# create FlowRemoved.
msg = ofp_parser.OFPFlowRemoved(dp, **kws)
msg._set_targets(["cookie", "priority", "reason",
"table_id", "idle_timeout",
"hard_timeout", "packet_count",
"byte_count", "match"])
return msg
|
1695389
|
import os
import asyncio
import logging
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.cell as s_cell
import synapse.lib.coro as s_coro
import synapse.lib.nexus as s_nexus
import synapse.lib.jsonstor as s_jsonstor
import synapse.lib.lmdbslab as s_lmdbslab
logger = logging.getLogger(__name__)
class AhaApi(s_cell.CellApi):
async def getAhaUrls(self):
return self.cell.conf.get('aha:urls', ())
async def getAhaSvc(self, name):
'''
Return an AHA service description dictionary for a fully qualified service name.
'''
svcinfo = await self.cell.getAhaSvc(name)
if svcinfo is None:
return None
svcnetw = svcinfo.get('svcnetw')
await self._reqUserAllowed(('aha', 'service', 'get', svcnetw))
return svcinfo
async def getAhaSvcs(self, network=None):
'''
Yield AHA svcinfo dictionaries.
Args:
network (str): Optionally specify a network to filter on.
'''
if network is None:
await self._reqUserAllowed(('aha', 'service', 'get'))
else:
await self._reqUserAllowed(('aha', 'service', 'get', network))
async for info in self.cell.getAhaSvcs(network=network):
yield info
async def addAhaSvc(self, name, info, network=None):
'''
Register a service with the AHA discovery server.
NOTE: In order for the service to remain marked "up" a caller
must maintain the telepath link.
'''
svcname, svcnetw, svcfull = self.cell._nameAndNetwork(name, network)
await self._reqUserAllowed(('aha', 'service', 'add', svcnetw, svcname))
# dont disclose the real session...
sess = s_common.guid(self.sess.iden)
info['online'] = sess
if self.link.sock is not None:
host, port = self.link.sock.getpeername()
urlinfo = info.get('urlinfo', {})
urlinfo.setdefault('host', host)
async def fini():
if self.cell.isfini: # pragma: no cover
mesg = f'{self.cell.__class__.__name__} is fini. Unable to set {name}@{network} as down.'
logger.warning(mesg)
return
logger.debug(f'AhaCellApi fini, tearing down [{name}]')
coro = self.cell.setAhaSvcDown(name, sess, network=network)
self.cell.schedCoro(coro) # this will eventually execute or get cancelled.
self.onfini(fini)
return await self.cell.addAhaSvc(name, info, network=network)
async def delAhaSvc(self, name, network=None):
'''
Remove an AHA service entry.
'''
svcname, svcnetw, svcfull = self.cell._nameAndNetwork(name, network)
await self._reqUserAllowed(('aha', 'service', 'del', svcnetw, svcname))
return await self.cell.delAhaSvc(name, network=network)
async def getCaCert(self, network):
await self._reqUserAllowed(('aha', 'ca', 'get'))
return await self.cell.getCaCert(network)
async def genCaCert(self, network):
await self._reqUserAllowed(('aha', 'ca', 'gen'))
return await self.cell.genCaCert(network)
async def signHostCsr(self, csrtext, signas=None, sans=None):
await self._reqUserAllowed(('aha', 'csr', 'host'))
return await self.cell.signHostCsr(csrtext, signas=signas, sans=sans)
async def signUserCsr(self, csrtext, signas=None):
await self._reqUserAllowed(('aha', 'csr', 'user'))
return await self.cell.signUserCsr(csrtext, signas=signas)
class AhaCell(s_cell.Cell):
cellapi = AhaApi
confdefs = {
'aha:urls': {
'description': 'A list of all available AHA server URLs.',
'type': ['string', 'array'],
'items': {'type': 'string'},
},
}
async def initServiceStorage(self):
# TODO plumb using a remote jsonstor?
dirn = s_common.gendir(self.dirn, 'slabs', 'jsonstor')
slab = await s_lmdbslab.Slab.anit(dirn)
self.jsonstor = await s_jsonstor.JsonStor.anit(slab, 'aha') # type: s_jsonstor.JsonStor
async def fini():
await self.jsonstor.fini()
await slab.fini()
self.onfini(fini)
async def initServiceRuntime(self):
self.addActiveCoro(self._clearInactiveSessions)
async def _clearInactiveSessions(self):
async for svc in self.getAhaSvcs():
if svc.get('svcinfo', {}).get('online') is None:
continue
current_sessions = {s_common.guid(iden) for iden in self.dmon.sessions.keys()}
svcname = svc.get('svcname')
network = svc.get('svcnetw')
linkiden = svc.get('svcinfo').get('online')
if linkiden not in current_sessions:
logger.debug(f'AhaCell activecoro tearing down [{svcname}.{network}]')
await self.setAhaSvcDown(svcname, linkiden, network=network)
# Wait until we are cancelled or the cell is fini.
await self.waitfini()
async def getAhaSvcs(self, network=None):
path = ('aha', 'services')
if network is not None:
path = path + (network,)
async for path, item in self.jsonstor.getPathObjs(path):
yield item
def _nameAndNetwork(self, name, network):
if network is None:
svcfull = name
try:
svcname, svcnetw = name.split('.', 1)
except ValueError:
raise s_exc.BadArg(name=name, arg='name',
mesg='Name must contain at least one "."') from None
else:
svcname = name
svcnetw = network
svcfull = f'{name}.{network}'
return svcname, svcnetw, svcfull
@s_nexus.Pusher.onPushAuto('aha:svc:add')
async def addAhaSvc(self, name, info, network=None):
svcname, svcnetw, svcfull = self._nameAndNetwork(name, network)
full = ('aha', 'svcfull', svcfull)
path = ('aha', 'services', svcnetw, svcname)
unfo = info.get('urlinfo')
logger.debug(f'Adding service [{svcfull}] from [{unfo.get("scheme")}://{unfo.get("host")}:{unfo.get("port")}]')
svcinfo = {
'name': svcfull,
'svcname': svcname,
'svcnetw': svcnetw,
'svcinfo': info,
}
await self.jsonstor.setPathObj(path, svcinfo)
await self.jsonstor.setPathLink(full, path)
# mostly for testing...
await self.fire('aha:svcadd', svcinfo=svcinfo)
@s_nexus.Pusher.onPushAuto('aha:svc:del')
async def delAhaSvc(self, name, network=None):
svcname, svcnetw, svcfull = self._nameAndNetwork(name, network)
full = ('aha', 'svcfull', svcfull)
path = ('aha', 'services', svcnetw, svcname)
await self.jsonstor.delPathObj(path)
await self.jsonstor.delPathObj(full)
# mostly for testing...
await self.fire('aha:svcdel', svcname=svcname, svcnetw=svcnetw)
async def setAhaSvcDown(self, name, linkiden, network=None):
svcname, svcnetw, svcfull = self._nameAndNetwork(name, network)
path = ('aha', 'services', svcnetw, svcname)
svcinfo = await self.jsonstor.getPathObjProp(path, 'svcinfo')
if svcinfo.get('online') is None:
return
await self._push('aha:svc:down', name, linkiden, network=network)
@s_nexus.Pusher.onPush('aha:svc:down')
async def _setAhaSvcDown(self, name, linkiden, network=None):
svcname, svcnetw, svcfull = self._nameAndNetwork(name, network)
path = ('aha', 'services', svcnetw, svcname)
await self.jsonstor.cmpDelPathObjProp(path, 'svcinfo/online', linkiden)
# Check if we have any links which may need to be removed
current_sessions = {s_common.guid(iden): sess for iden, sess in self.dmon.sessions.items()}
sess = current_sessions.get(linkiden)
if sess is not None:
for link in [lnk for lnk in self.dmon.links if lnk.get('sess') is sess]:
await link.fini()
await self.fire('aha:svcdown', svcname=svcname, svcnetw=svcnetw)
logger.debug(f'Set [{svcfull}] offline.')
async def getAhaSvc(self, name):
path = ('aha', 'svcfull', name)
svcinfo = await self.jsonstor.getPathObj(path)
if svcinfo is not None:
return svcinfo
async def genCaCert(self, network):
path = self.certdir.getCaCertPath(network)
if path is not None:
with open(path, 'rb') as fd:
return fd.read().decode()
logger.info(f'Generating CA certificate for {network}')
fut = s_coro.executor(self.certdir.genCaCert, network, save=False)
pkey, cert = await fut
cakey = self.certdir._pkeyToByts(pkey).decode()
cacert = self.certdir._certToByts(cert).decode()
# nexusify storage..
await self.saveCaCert(network, cakey, cacert)
return cacert
async def getCaCert(self, network):
path = self.certdir.getCaCertPath(network)
if path is None:
return None
with open(path, 'rb') as fd:
return fd.read().decode()
@s_nexus.Pusher.onPushAuto('aha:ca:save')
async def saveCaCert(self, name, cakey, cacert):
# manually save the files to a certpath compatible location
with s_common.genfile(self.dirn, 'certs', 'cas', f'{name}.key') as fd:
fd.write(cakey.encode())
with s_common.genfile(self.dirn, 'certs', 'cas', f'{name}.crt') as fd:
fd.write(cacert.encode())
async def signHostCsr(self, csrtext, signas=None, sans=None):
xcsr = self.certdir._loadCsrByts(csrtext.encode())
hostname = xcsr.get_subject().CN
hostpath = s_common.genpath(self.dirn, 'certs', 'hosts', f'{hostname}.crt')
if os.path.isfile(hostpath):
os.unlink(hostpath)
if signas is None:
signas = hostname.split('.', 1)[1]
logger.info(f'Signing host CSR for [{hostname}], signas={signas}, sans={sans}')
pkey, cert = self.certdir.signHostCsr(xcsr, signas=signas, sans=sans)
return self.certdir._certToByts(cert).decode()
async def signUserCsr(self, csrtext, signas=None):
xcsr = self.certdir._loadCsrByts(csrtext.encode())
username = xcsr.get_subject().CN
userpath = s_common.genpath(self.dirn, 'certs', 'users', f'{username}.crt')
if os.path.isfile(userpath):
os.unlink(userpath)
if signas is None:
signas = username.split('@', 1)[1]
logger.info(f'Signing user CSR for [{username}], signas={signas}')
pkey, cert = self.certdir.signUserCsr(xcsr, signas=signas)
return self.certdir._certToByts(cert).decode()
|
1695420
|
import wrapt
from thundra.config import config_names
from thundra.config.config_provider import ConfigProvider
from thundra.integrations.sqlalchemy import SqlAlchemyIntegration
def _wrapper(wrapped, instance, args, kwargs):
engine = wrapped(*args, **kwargs)
SqlAlchemyIntegration(engine)
return engine
def patch():
if not ConfigProvider.get(config_names.THUNDRA_TRACE_INTEGRATIONS_SQLALCHEMY_DISABLE):
try:
from sqlalchemy.event import listen
from sqlalchemy.engine.interfaces import ExecutionContext
wrapt.wrap_function_wrapper(
'sqlalchemy',
'create_engine',
_wrapper
)
wrapt.wrap_function_wrapper(
'sqlalchemy.engine',
'create_engine',
_wrapper
)
except:
pass
|
1695466
|
import os
from pathlib import Path
from shutil import copyfile, copytree, rmtree
def copy_setup_files():
# The root directory is where the workspace will be created, setup files are stored locally
root = os.getcwd()
install_dir = os.path.dirname(os.path.realpath(__file__))
# Create temp directory
tmp_dir = Path(root) / 'tmp'
tmp_dir.mkdir(exist_ok=True)
# Copy over basic files
copyfile(install_dir + '/.gitignore', root + '/.gitignore')
copyfile(install_dir + '/sourceme.sh', root + '/sourceme.sh')
copytree(install_dir + '/../examples' + '/tech', root + '/example_tech')
def copy_test_files():
# The root directory is where the workspace will be created, setup files are stored locally
root = os.getcwd()
install_dir = os.path.dirname(os.path.realpath(__file__))
# Copy over basic files
rmtree(root + '/bpg_test_suite', ignore_errors=True)
copytree(install_dir + '/../../tests', root + '/bpg_test_suite')
|
1695474
|
from flask import Flask, request, jsonify
import redis
import random
import os
import os
app = Flask(__name__)
@app.route('/adduser')
def adduser():
port = random.randint(50000, 60000)
if os.system(f"redis-server --port {port} --daemonize yes --protected-mode no") == 0:
return str(port), 200
else:
return "0", 500
@app.route('/getuser/<port>', methods=["GET"])
def getuser(port):
r = redis.Redis(port=port)
res = []
for key in r.scan_iter("*"):
res.append({key.decode(): r.get(key).decode()})
return jsonify(res)
@app.route('/putuser/<port>', methods=["POST"])
def putuser(port):
r = redis.Redis(port=port)
r.mset(request.json)
return "", 200
@app.route("/bio/<port>", methods=["POST", "GET"])
def bio(port):
if request.method == "GET":
if os.path.exists(f"/tmp/{port}.txt"):
with open(f"/tmp/{port}.txt") as f:
return f.read()
else:
return ""
elif request.method == "POST":
with open(f"/tmp/{port}.txt", 'w') as f:
f.write(request.json.get("bio"))
return ""
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
1695491
|
import random
import numpy as np
import torch
from IPython.display import display, HTML
from rdkit import rdBase
def set_seed(seed=42):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def clear_torch(model=None):
if model:
del model
torch.cuda.empty_cache()
def disable_rdkit_log():
rdBase.DisableLog('rdApp.*')
def enable_rdkit_log():
rdBase.EnableLog('rdApp.*')
def header_str(a_str, n=80):
"""Returns a string formatted as a header."""
return '{{:=^{:d}}}'.format(n).format(' ' + a_str + ' ')
def header_html(a_str, level=1):
"""Returns a string formatted as a header."""
return display(HTML(f'<h{level}>{a_str}</h{level}>'))
def subset_list(alist, indices):
return [alist[index] for index in indices]
|
1695505
|
import doctest
from importlib import import_module
import pytest
@pytest.mark.parametrize('module_name', [
'document.tree',
])
def test_doctests(module_name):
_, test_count = doctest.testmod(
import_module(module_name),
report=True,
verbose=True,
raise_on_error=True,
optionflags=doctest.NORMALIZE_WHITESPACE,
)
assert test_count > 0
|
1695536
|
from dolfin import *
from finmag.demag import solver_base as sb
import numpy as np
import progressbar as pb
##class FKSolver(sb.DeMagSolver):
## """Class containing methods shared by FK solvers"""
## def __init__(self, problem, degree=1):
## super(FKSolver, self).__init__(problem, degree)
class FKSolverTrunc(sb.TruncDeMagSolver):
"""FK Solver using domain truncation"""
###Only partially implemented at the moment
def __init__(self,problem, degree = 1):
self.problem = problem
self.degree = degree
def solve(self):
#Set up spaces,functions, measures etc.
V = FunctionSpace(self.problem.mesh,"CG",self.degree)
if self.problem.mesh.topology().dim() == 1:
Mspace = FunctionSpace(self.problem.mesh,"DG",self.degree)
else:
Mspace = VectorFunctionSpace(self.problem.mesh,"DG",self.degree)
phi0 = Function(V)
phi1 = Function(V)
dxC = self.problem.dxC
dSC = self.problem.dSC
N = FacetNormal(self.problem.coremesh)
#Define the magnetisation
M = interpolate(Expression(self.problem.M),Mspace)
########################################
#Solve for phi0
########################################
## #A boundary point used to specify the pure neumann problem
r = self.problem.r
class BoundPoint(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.5 - r)
dbc1 = DirichletBC(V, 0.0, BoundPoint())
#Forms for Neumann Poisson Equation for phi0
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u),grad(v))*dxC
f = (div(M)*v)*dxC #Source term in core
f += (dot(M,N)*v)('-')*dSC #Neumann Conditions on edge of core
A = assemble(a,cell_domains = self.problem.corefunc, interior_facet_domains = self.problem.coreboundfunc)
F = assemble(f,cell_domains = self.problem.corefunc, interior_facet_domains = self.problem.coreboundfunc)
dbc1.apply(A,F)
A.ident_zeros()
print A.array()
solve(A,phi0.vector(),F)
########################################
#Solve for phi1
########################################
L = FunctionSpace(self.problem.mesh,"CG",self.degree)
VD = FunctionSpace(self.problem.mesh,"DG",self.degree)
W = MixedFunctionSpace((V,L))
u,l = TrialFunctions(W)
v,q = TestFunctions(W)
sol = Function(W)
#Forms for phi1
a = dot(grad(u),grad(v))*dx
f = q('-')*phi0('-')*dSC
a += q('-')*jump(u)*dSC #Jump in solution on core boundary
a += (l*v)('-')*dSC
#Dirichlet BC at our approximate boundary
dbc = DirichletBC(W.sub(0),0.0,"on_boundary")
A = assemble(a,cell_domains = self.problem.corefunc, interior_facet_domains = self.problem.coreboundfunc)
F = assemble(f,cell_domains = self.problem.corefunc, interior_facet_domains = self.problem.coreboundfunc)
dbc.apply(A)
dbc.apply(F)
A.ident_zeros()
solve(A, sol.vector(),F)
solphi,sollag = sol.split()
phi1.assign(solphi)
phitot = Function(V)
print phi0.vector().array()
print phi1.vector().array()
phitot.vector()[:] = phi0.vector() + phi1.vector()
#Store Variables for outside testing
self.V = V
self.phitot = phitot
self.phi0 = phi0
self.phi1 = phi1
self.sol = sol
self.M = M
self.Mspace = Mspace
return phitot
|
1695556
|
def word_search(documents, keyword):
# list to hold the indices of matching documents
indices = []
# Iterate through the indices (i) and elements (doc) of documents
for i, doc in enumerate(documents):
# Split the string doc into a list of words (according to whitespace)
tokens = doc.split()
# Make a transformed list where we 'normalize' each word to facilitate matching.
# Periods and commas are removed from the end of each word, and it's set to all lowercase.
normalized = [token.rstrip('.,').lower() for token in tokens]
# Is there a match? If so, update the list of matching indices.
if keyword.lower() in normalized:
indices.append(i)
return indices
|
1695557
|
from dask.distributed import Client, LocalCluster
from jmetal.lab.visualization import Plot
from jmetal.util.observer import ProgressBarObserver
from jmetal.util.termination_criterion import StoppingByEvaluations
from pymsa.core.score import SumOfPairs, PercentageOfTotallyConservedColumns
from sequoya.algorithm.multiobjective.nsgaii import DistributedNSGAII
from sequoya.operator import SPXMSA, ShiftClosedGapGroups
from sequoya.problem import BAliBASE
from sequoya.util.solution import get_representative_set
from sequoya.util.visualization import MSAPlot
if __name__ == '__main__':
# setup Dask client (web interface will be initialized at http://127.0.0.1:8787/workers)
cluster = LocalCluster(processes=True)
client = Client(cluster)
ncores = sum(client.ncores().values())
print(f'{ncores} cores available')
# creates the problem
problem = BAliBASE(instance='BB50011', path='../resources',
score_list=[SumOfPairs(), PercentageOfTotallyConservedColumns()])
# creates the algorithm
max_evaluations = 25000
algorithm = DistributedNSGAII(
problem=problem,
population_size=100,
mutation=ShiftClosedGapGroups(probability=0.4),
crossover=SPXMSA(probability=0.7),
termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations),
number_of_cores=ncores,
client=client
)
algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations))
algorithm.run()
front = algorithm.get_result()
# plot front
plot_front = Plot(title='Pareto front approximation', axis_labels=['%SOP', '%TC'])
plot_front.plot(front, label='NSGAII-BB50011', filename='NSGAII-BB50011')
plot_front = MSAPlot(title='Pareto front approximation', axis_labels=['%SOP', '%TC'])
plot_front.plot(front, label='NSGAII-BB50011', filename='NSGAII-BB50011', format='HTML')
# find extreme solutions
solutions = get_representative_set(front)
for solution in solutions:
print(solution.objectives)
print('Computing time: ' + str(algorithm.total_computing_time))
|
1695559
|
from django.test import override_settings
from django_otp import user_has_device
from django_otp.middleware import OTPMiddleware as _OTPMiddleware
from wagtail.admin.menu import MenuItem
from wagtail_2fa.wagtail_hooks import remove_menu_if_unverified
class TestHooks:
def test_remove_menu_if_unverified(self, user, rf):
with override_settings(WAGTAIL_2FA_REQUIRED=True):
request = rf.get("/cms/")
request.user = user
middleware = _OTPMiddleware()
user = middleware._verify_user(request, user)
assert not user_has_device(user)
assert user.is_authenticated
menu_items = [
MenuItem("Dummy item 1", "/stub1/"),
MenuItem("Dummy item 2", "/stub2/"),
MenuItem("Dummy item 3", "/stub3/"),
]
remove_menu_if_unverified(request, menu_items)
assert len(menu_items) == 1
assert menu_items[0].label == "2FA Setup"
assert menu_items[0].url == "/cms/2fa/devices/1"
def test_do_not_remove_menu_if_verified(self, verified_user, rf):
with override_settings(WAGTAIL_2FA_REQUIRED=True):
request = rf.get("/cms/")
request.user = verified_user
menu_items = [
MenuItem("Dummy item 1", "/stub1/"),
MenuItem("Dummy item 2", "/stub2/"),
MenuItem("Dummy item 3", "/stub3/"),
]
remove_menu_if_unverified(request, menu_items)
assert menu_items == menu_items
def test_do_not_remove_menu_if_2fa_required_is_false(self, user, rf):
with override_settings(WAGTAIL_2FA_REQUIRED=False):
request = rf.get("/cms/")
# Use a regular user here to make sure the menu still works
# even when the middleware is not loaded and the user does not have the
# enable_2fa permission.
request.user = user
assert getattr(request.user, "enable_2fa", None) is None
menu_items = [
MenuItem("Dummy item 1", "/stub1/"),
MenuItem("Dummy item 2", "/stub2/"),
MenuItem("Dummy item 3", "/stub3/"),
]
remove_menu_if_unverified(request, menu_items)
assert menu_items == menu_items
def test_do_not_remove_menu_if_2fa_required_is_false_for_verified_user(
self, verified_user, rf
):
with override_settings(WAGTAIL_2FA_REQUIRED=False):
request = rf.get("/cms/")
request.user = verified_user
menu_items = [
MenuItem("Dummy item 1", "/stub1/"),
MenuItem("Dummy item 2", "/stub2/"),
MenuItem("Dummy item 3", "/stub3/"),
]
remove_menu_if_unverified(request, menu_items)
assert menu_items == menu_items
|
1695579
|
import pytest
create_user = True
@pytest.mark.usefixtures("testapp")
class TestLogin:
def test_login(self, testapp):
""" Tests if the login form functions """
rv = testapp.post('/login', data=dict(
email='<EMAIL>',
password="<PASSWORD>"
), follow_redirects=True)
assert rv.status_code == 200
assert 'Logged in successfully.' in str(rv.data)
def test_login_bad_email(self, testapp):
""" Tests if the login form rejects invalid email """
rv = testapp.post('/login', data=dict(
email='admin',
password=""
), follow_redirects=True)
assert rv.status_code == 200
assert 'Invalid email address' in str(rv.data)
def test_login_bad_password(self, testapp):
""" Tests if the login form fails correctly """
rv = testapp.post('/login', data=dict(
email='<EMAIL>',
password="<PASSWORD>"
), follow_redirects=True)
assert rv.status_code == 200
assert 'Invalid email or password' in str(rv.data)
|
1695587
|
import sbol3
class DataSheet(sbol3.CustomIdentified):
TYPE_URI = 'http://example.org/sbol3#DataSheet'
RATE_URI = 'http://example.org/sbol3#txRate'
def __init__(self, rate=None):
super().__init__(type_uri=DataSheet.TYPE_URI)
self.transcription_rate = sbol3.FloatProperty(self,
DataSheet.RATE_URI,
0, 1,
initial_value=rate)
class Analysis(sbol3.CustomTopLevel):
TYPE_URI = 'http://example.org/sbol3#Analysis'
MODEL_URI = 'http://example.org/sbol3#fittedModel'
DATA_SHEET_URI = 'http://example.org/sbol3#dataSheet'
def __init__(self, identity=None, model=None):
super().__init__(identity=identity,
type_uri=Analysis.TYPE_URI)
self.fitted_model = sbol3.ReferencedObject(self,
Analysis.MODEL_URI,
0, 1,
initial_value=model)
self.data_sheet = sbol3.OwnedObject(self,
Analysis.DATA_SHEET_URI,
0, 1,
type_constraint=DataSheet)
# Register the constructor with the parser
sbol3.Document.register_builder(DataSheet.TYPE_URI, DataSheet)
sbol3.Document.register_builder(Analysis.TYPE_URI, Analysis)
|
1695598
|
from .exchangeability import exch_test
from .goodness_of_fit import gof_copula
from .radial_symmetry import rad_sym_test
|
1695636
|
import logging
import os
import asyncio
import aioredis
import uvicorn
from starlette.applications import Starlette
from starlette.middleware.gzip import GZipMiddleware
from starlette.responses import HTMLResponse, PlainTextResponse, RedirectResponse, UJSONResponse
from starlette.schemas import SchemaGenerator, OpenAPIResponse
from starlette.staticfiles import StaticFiles
logger = logging.getLogger()
logger.setLevel(logging.INFO)
loop = asyncio.get_event_loop()
editors = [
'emacs',
'vim',
'vscode',
'sublimetext',
]
app = Starlette(debug=('DEV' in os.environ), template_directory='templates')
app.schema_generator = SchemaGenerator(
{"openapi": "3.0.0", "info": {"title": "Leaderboard", "version": "1.0"}}
)
app.template_env.auto_reload = 'DEV' in os.environ
app.mount('/static', StaticFiles(directory="static"))
redis = None
@app.on_event('startup')
async def create_pool():
global redis
redis = await aioredis.create_redis_pool(
'redis://redis:{}'.format(os.environ.get('PORT', 3306)),
minsize=0,
maxsize=10,
loop=loop,
encoding='utf-8')
@app.on_event('shutdown')
async def close_pool():
redis.close()
await redis.wait_closed()
@app.route('/')
async def index(request):
return HTMLResponse(app.get_template('index.html').render(
request=request,
editors=editors,
votes=await redis.hgetall('editors')))
@app.route('/vote')
async def get_board(request):
return UJSONResponse({
'status': 'ok',
'users': await redis.hgetall('editors'),
})
@app.route('/vote/{editor}/minus')
async def down_vote(request):
editor = request.path_params.get('editor')
val = await redis.hincrby('editors', editor, -1)
if val < 0:
await redis.hset('editors', editor, 0)
return RedirectResponse(url='/')
@app.route('/vote/{editor}/plus')
async def up_vote(request):
await redis.hincrby(
'editors', request.path_params.get('editor'))
return RedirectResponse(url='/')
# Swagger spec
@app.route("/schema", methods=["GET"], include_in_schema=False)
def schema(request):
return OpenAPIResponse(app.schema)
|
1695699
|
import logging
from ..utils import json
from .basic import BasicChatbaseObject
logger = logging.getLogger(f'chatbase.{__name__}')
class Click(BasicChatbaseObject):
def __init__(self, api_key, url, platform, user_id=None, version=None, session=None):
"""
:param api_key: the Chatbase ID of the bot
:type api_key: str
:param url: The full URL to redirect to.
:type url: str
:param user_id: the id of the user who clicked the link
:type user_id: str
:param platform: valid values "Facebook", "SMS", "Web", "Android", "iOS", "Actions", "Alexa", "Cortana", "Kik",
"Skype", "Twitter", "Viber", "Telegram", "Slack", "WhatsApp", "WeChat", "Line", "Kakao"
or a custom name like "Workplace" or "OurPlatform"
:type platform: str
:param version: set for user and bot messages the version of the bot processing the message
:type version: str
"""
# aiohttp
self.session = session
# required
self.api_key = api_key
self.url = url
self.platform = platform
# optional
self.user_id = str(user_id) if user_id else None
self.version = str(version) if version else None
self._api_url = f"https://chatbase.com/api/click"
def to_json(self):
""" Return a JSON version for use with the Chatbase API """
data = {
'api_key': self.api_key,
'url': self.url,
'platform': self.platform,
'user_id': self.user_id or '',
'version': self.version or '',
}
return json.dumps(data)
async def send(self):
result = await self._send(self.session)
if result.get('status') == 200:
return True
|
1695715
|
from algorithms.backtrack import (
add_operators,
permute,
permute_iter,
anagram,
array_sum_combinations,
unique_array_sum_combinations,
combination_sum,
get_factors,
recursive_get_factors,
find_words,
generate_abbreviations,
generate_parenthesis_v1,
generate_parenthesis_v2,
letter_combinations,
palindromic_substrings,
pattern_match,
permute_unique,
permute,
permute_recursive,
subsets_unique,
subsets,
subsets_v2,
)
import unittest
class TestAddOperator(unittest.TestCase):
def test_add_operators(self):
# "123", 6 -> ["1+2+3", "1*2*3"]
s = "123"
target = 6
self.assertEqual(add_operators(s, target), ["1+2+3", "1*2*3"])
# "232", 8 -> ["2*3+2", "2+3*2"]
s = "232"
target = 8
self.assertEqual(add_operators(s, target), ["2+3*2", "2*3+2"])
s = "123045"
target = 3
answer = ['1+2+3*0*4*5',
'1+2+3*0*45',
'1+2-3*0*4*5',
'1+2-3*0*45',
'1-2+3+0-4+5',
'1-2+3-0-4+5',
'1*2+3*0-4+5',
'1*2-3*0-4+5',
'1*23+0-4*5',
'1*23-0-4*5',
'12+3*0-4-5',
'12-3*0-4-5']
self.assertEqual(add_operators(s, target), answer)
class TestPermuteAndAnagram(unittest.TestCase):
def test_permute(self):
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
self.assertEqual(perms, permute("abc"))
def test_permute_iter(self):
it = permute_iter("abc")
perms = ['abc', 'bac', 'bca', 'acb', 'cab', 'cba']
for i in range(len(perms)):
self.assertEqual(perms[i], next(it))
def test_angram(self):
self.assertTrue(anagram('apple', 'pleap'))
self.assertFalse(anagram("apple", "cherry"))
class TestArrayCombinationSum(unittest.TestCase):
def test_array_sum_combinations(self):
A = [1, 2, 3, 3]
B = [2, 3, 3, 4]
C = [2, 3, 3, 4]
target = 7
answer = [[1, 2, 4], [1, 3, 3], [1, 3, 3], [1, 3, 3],
[1, 3, 3], [1, 4, 2], [2, 2, 3], [2, 2, 3],
[2, 3, 2], [2, 3, 2], [3, 2, 2], [3, 2, 2]]
answer.sort()
self.assertListEqual(sorted(array_sum_combinations(A, B, C, target)), answer)
def test_unique_array_sum_combinations(self):
A = [1, 2, 3, 3]
B = [2, 3, 3, 4]
C = [2, 3, 3, 4]
target = 7
answer = [(2, 3, 2), (3, 2, 2), (1, 2, 4),
(1, 4, 2), (2, 2, 3), (1, 3, 3)]
answer.sort()
self.assertListEqual(sorted(unique_array_sum_combinations(A, B, C, target)), answer)
class TestCombinationSum(unittest.TestCase):
def check_sum(self, nums, target):
if sum(nums) == target:
return (True, nums)
else:
return (False, nums)
def test_combination_sum(self):
candidates1 = [2, 3, 6, 7]
target1 = 7
answer1 = [
[2, 2, 3],
[7]
]
self.assertEqual(combination_sum(candidates1, target1), answer1)
candidates2 = [2, 3, 5]
target2 = 8
answer2 = [
[2, 2, 2, 2],
[2, 3, 3],
[3, 5]
]
self.assertEqual(combination_sum(candidates2, target2), answer2)
class TestFactorCombinations(unittest.TestCase):
def test_get_factors(self):
target1 = 32
answer1 = [
[2, 16],
[2, 2, 8],
[2, 2, 2, 4],
[2, 2, 2, 2, 2],
[2, 4, 4],
[4, 8]
]
self.assertEqual(sorted(get_factors(target1)), sorted(answer1))
target2 = 12
answer2 = [
[2, 6],
[2, 2, 3],
[3, 4]
]
self.assertEqual(sorted(get_factors(target2)), sorted(answer2))
self.assertEqual(sorted(get_factors(1)), [])
self.assertEqual(sorted(get_factors(37)), [])
def test_recursive_get_factors(self):
target1 = 32
answer1 = [
[2, 16],
[2, 2, 8],
[2, 2, 2, 4],
[2, 2, 2, 2, 2],
[2, 4, 4],
[4, 8]
]
self.assertEqual(sorted(recursive_get_factors(target1)), sorted(answer1))
target2 = 12
answer2 = [
[2, 6],
[2, 2, 3],
[3, 4]
]
self.assertEqual(sorted(recursive_get_factors(target2)), sorted(answer2))
self.assertEqual(sorted(recursive_get_factors(1)), [])
self.assertEqual(sorted(recursive_get_factors(37)), [])
class TestFindWords(unittest.TestCase):
def test_normal(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
words = ["oath", "pea", "eat", "rain"]
self.assertEqual(find_words(board, words).sort(),
['oath', 'eat'].sort())
def test_none(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
words = ["chicken", "nugget", "hello", "world"]
self.assertEqual(find_words(board, words), [])
def test_empty(self):
board = []
words = []
self.assertEqual(find_words(board, words), [])
def test_uneven(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e']
]
words = ["oath", "pea", "eat", "rain"]
self.assertEqual(find_words(board, words), ['eat'])
def test_repeat(self):
board = [
['a', 'a', 'a'],
['a', 'a', 'a'],
['a', 'a', 'a']
]
words = ["a", "aa", "aaa", "aaaa", "aaaaa"]
self.assertTrue(len(find_words(board, words)) == 5)
class TestGenerateAbbreviations(unittest.TestCase):
def test_generate_abbreviations(self):
word1 = "word"
answer1 = ['word', 'wor1', 'wo1d', 'wo2', 'w1rd', 'w1r1', 'w2d', 'w3',
'1ord', '1or1', '1o1d', '1o2', '2rd', '2r1', '3d', '4']
self.assertEqual(sorted(generate_abbreviations(word1)), sorted(answer1))
word2 = "hello"
answer2 = ['hello', 'hell1', 'hel1o', 'hel2', 'he1lo', 'he1l1', 'he2o',
'he3', 'h1llo', 'h1ll1', 'h1l1o', 'h1l2', 'h2lo', 'h2l1', 'h3o', 'h4',
'1ello', '1ell1', '1el1o', '1el2', '1e1lo', '1e1l1', '1e2o', '1e3',
'2llo', '2ll1', '2l1o', '2l2', '3lo', '3l1', '4o', '5']
self.assertEqual(sorted(generate_abbreviations(word2)), sorted(answer2))
class TestPatternMatch(unittest.TestCase):
def test_pattern_match(self):
pattern1 = "abab"
string1 = "redblueredblue"
pattern2 = "aaaa"
string2 = "asdasdasdasd"
pattern3 = "aabb"
string3 = "xyzabcxzyabc"
self.assertTrue(pattern_match(pattern1, string1))
self.assertTrue(pattern_match(pattern2, string2))
self.assertFalse(pattern_match(pattern3, string3))
class TestGenerateParenthesis(unittest.TestCase):
def test_generate_parenthesis(self):
self.assertEqual(generate_parenthesis_v1(2), ['()()', '(())'])
self.assertEqual(generate_parenthesis_v1(3), ['()()()', '()(())', '(())()', '(()())', '((()))'])
self.assertEqual(generate_parenthesis_v2(2), ['(())', '()()'])
self.assertEqual(generate_parenthesis_v2(3), ['((()))', '(()())', '(())()', '()(())', '()()()'])
class TestLetterCombinations(unittest.TestCase):
def test_letter_combinations(self):
digit1 = "23"
answer1 = ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]
self.assertEqual(sorted(letter_combinations(digit1)), sorted(answer1))
digit2 = "34"
answer2 = ['dg', 'dh', 'di', 'eg', 'eh', 'ei', 'fg', 'fh', 'fi']
self.assertEqual(sorted(letter_combinations(digit2)), sorted(answer2))
class TestPalindromicSubstrings(unittest.TestCase):
def test_palindromic_substrings(self):
string1 = "abc"
answer1 = [['a', 'b', 'c']]
self.assertEqual(palindromic_substrings(string1), sorted(answer1))
string2 = "abcba"
answer2 = [['abcba'], ['a', 'bcb', 'a'], ['a', 'b', 'c', 'b', 'a']]
self.assertEqual(sorted(palindromic_substrings(string2)), sorted(answer2))
string3 = "abcccba"
answer3 = [['abcccba'], ['a', 'bcccb', 'a'], ['a', 'b', 'ccc', 'b', 'a'],
['a', 'b', 'cc', 'c', 'b', 'a'], ['a', 'b', 'c', 'cc', 'b', 'a'],
['a', 'b', 'c', 'c', 'c', 'b', 'a']]
self.assertEqual(sorted(palindromic_substrings(string3)), sorted(answer3))
class TestPermuteUnique(unittest.TestCase):
def test_permute_unique(self):
nums1 = [1, 1, 2]
answer1 = [[2, 1, 1], [1, 2, 1], [1, 1, 2]]
self.assertEqual(sorted(permute_unique(nums1)), sorted(answer1))
nums2 = [1, 2, 1, 3]
answer2 = [[3, 1, 2, 1], [1, 3, 2, 1], [1, 2, 3, 1], [1, 2, 1, 3], [3, 2, 1, 1],
[2, 3, 1, 1], [2, 1, 3, 1], [2, 1, 1, 3], [3, 1, 1, 2], [1, 3, 1, 2], [1, 1, 3, 2], [1, 1, 2, 3]]
self.assertEqual(sorted(permute_unique(nums2)), sorted(answer2))
nums3 = [1, 2, 3]
answer3 = [[3, 2, 1], [2, 3, 1], [2, 1, 3], [3, 1, 2], [1, 3, 2], [1, 2, 3]]
self.assertEqual(sorted(permute_unique(nums3)), sorted(answer3))
class TestPermute(unittest.TestCase):
def test_permute(self):
nums1 = [1, 2, 3, 4]
answer1 = [[1, 2, 3, 4], [2, 1, 3, 4], [2, 3, 1, 4], [2, 3, 4, 1], [1, 3, 2, 4],
[3, 1, 2, 4], [3, 2, 1, 4], [3, 2, 4, 1], [1, 3, 4, 2], [3, 1, 4, 2],
[3, 4, 1, 2], [3, 4, 2, 1], [1, 2, 4, 3], [2, 1, 4, 3], [2, 4, 1, 3],
[2, 4, 3, 1], [1, 4, 2, 3], [4, 1, 2, 3], [4, 2, 1, 3], [4, 2, 3, 1],
[1, 4, 3, 2], [4, 1, 3, 2], [4, 3, 1, 2], [4, 3, 2, 1]]
self.assertEqual(sorted(permute(nums1)), sorted(answer1))
nums2 = [1, 2, 3]
answer2 = [[3, 2, 1], [2, 3, 1], [2, 1, 3], [3, 1, 2], [1, 3, 2], [1, 2, 3]]
self.assertEqual(sorted(permute(nums2)), sorted(answer2))
def test_permute_recursive(self):
nums1 = [1, 2, 3, 4]
answer1 = [[1, 2, 3, 4], [2, 1, 3, 4], [2, 3, 1, 4], [2, 3, 4, 1], [1, 3, 2, 4],
[3, 1, 2, 4], [3, 2, 1, 4], [3, 2, 4, 1], [1, 3, 4, 2], [3, 1, 4, 2],
[3, 4, 1, 2], [3, 4, 2, 1], [1, 2, 4, 3], [2, 1, 4, 3], [2, 4, 1, 3],
[2, 4, 3, 1], [1, 4, 2, 3], [4, 1, 2, 3], [4, 2, 1, 3], [4, 2, 3, 1],
[1, 4, 3, 2], [4, 1, 3, 2], [4, 3, 1, 2], [4, 3, 2, 1]]
self.assertEqual(sorted(permute_recursive(nums1)), sorted(answer1))
nums2 = [1, 2, 3]
answer2 = [[3, 2, 1], [2, 3, 1], [2, 1, 3], [3, 1, 2], [1, 3, 2], [1, 2, 3]]
self.assertEqual(sorted(permute_recursive(nums2)), sorted(answer2))
class TestSubsetsUnique(unittest.TestCase):
def test_subsets_unique(self):
nums1 = [1, 2, 2]
answer1 = [(1, 2), (1,), (1, 2, 2), (2,), (), (2, 2)]
self.assertEqual(sorted(subsets_unique(nums1)), sorted(answer1))
nums2 = [1, 2, 3, 4]
answer2 = [(1, 2), (1, 3), (1, 2, 3, 4), (1,), (2,), (3,), (1, 4), (1, 2, 3),
(4,), (), (2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4), (3, 4), (2, 4)]
self.assertEqual(sorted(subsets_unique(nums2)), sorted(answer2))
class TestSubsets(unittest.TestCase):
def test_subsets(self):
nums1 = [1, 2, 3]
answer1 = [[1, 2, 3], [1, 2], [1, 3], [1], [2, 3], [2], [3], []]
self.assertEqual(sorted(subsets(nums1)), sorted(answer1))
nums2 = [1, 2, 3, 4]
answer2 = [[1, 2, 3, 4], [1, 2, 3], [1, 2, 4], [1, 2], [1, 3, 4],
[1, 3], [1, 4], [1], [2, 3, 4], [2, 3], [2, 4], [2], [3, 4], [3], [4], []]
self.assertEqual(sorted(subsets(nums2)), sorted(answer2))
def test_subsets_v2(self):
nums1 = [1, 2, 3]
answer1 = [[1, 2, 3], [1, 2], [1, 3], [1], [2, 3], [2], [3], []]
self.assertEqual(sorted(subsets_v2(nums1)), sorted(answer1))
nums2 = [1, 2, 3, 4]
answer2 = [[1, 2, 3, 4], [1, 2, 3], [1, 2, 4], [1, 2], [1, 3, 4],
[1, 3], [1, 4], [1], [2, 3, 4], [2, 3], [2, 4], [2], [3, 4], [3], [4], []]
self.assertEqual(sorted(subsets_v2(nums2)), sorted(answer2))
if __name__ == '__main__':
unittest.main()
|
1695733
|
import subprocess
import sublime, sublime_plugin
import re
# RailsCopyMigrationVersion
# ========
#
# A Sublime Text 2 plugin that provides `copy_migration_version` command to copy migration version of current migration file.
#
class CopyMigrationVersionCommand(sublime_plugin.TextCommand):
def run(self, edit):
match = re.search('\d{14}', self.view.file_name())
if match:
version = match.group()
sublime.set_clipboard(version)
sublime.status_message("copied: %s" % version)
|
1695757
|
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from dynamic_search.classes import SearchModel
from .permissions import permission_metadata_type_view
metadata_type_search = SearchModel(
app_label='metadata', model_name='MetadataType',
permission=permission_metadata_type_view,
serializer_string='metadata.serializers.MetadataTypeSerializer'
)
metadata_type_search.add_model_field(
field='name', label=_('Name')
)
metadata_type_search.add_model_field(
field='label', label=_('Label')
)
metadata_type_search.add_model_field(
field='default', label=_('Default')
)
metadata_type_search.add_model_field(
field='lookup', label=_('Lookup')
)
metadata_type_search.add_model_field(
field='validation', label=_('Validator')
)
metadata_type_search.add_model_field(
field='parser', label=_('Parser')
)
|
1695811
|
import numpy as np
from pygesture.pipeline import PipelineBlock
class FeatureExtractor(PipelineBlock):
def __init__(self, features, n_channels):
super(FeatureExtractor, self).__init__()
self.features = features
self.n_channels = n_channels
self.n_features = n_channels*sum(
[f.dim_per_channel for f in self.features])
self.output = np.zeros(self.n_channels*self.n_features)
def process(self, data):
# TODO use pre-allocated output array instead of hstack
return np.hstack([f.compute(data) for f in self.features])
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__,
self.__class__.__name__,
str([str(f) for f in self.features])
)
class Feature(object):
def __repr__(self):
return "%s.%s()" % (
self.__class__.__module__,
self.__class__.__name__
)
class MAV(Feature):
"""
Calculates the mean absolute value of a signal.
"""
def __init__(self):
self.dim_per_channel = 1
def compute(self, x):
y = np.mean(np.absolute(x), axis=0)
return y
class WL(Feature):
"""
Calculates the waveform length of a signal. Waveform length is just the
sum of the absolute value of all deltas (between adjacent taps) of a
signal.
"""
def __init__(self):
self.dim_per_channel = 1
def compute(self, x):
y = np.sum(np.absolute(np.diff(x, axis=0)), axis=0)
return y
class ZC(Feature):
"""
Calculates the number of zero crossings in a signal, subject to a threshold
for discarding noisy fluctuations above and below zero.
Parameters
----------
thresh : float (default=0.0)
The threshold for discriminating true zero crossings from those caused
by noise.
use_sm : bool (default=False)
Specifies if spectral moments should be used for the computation. This
is much faster, but the threshold is not taken into account, making it
potentially affected by noise.
"""
def __init__(self, thresh=0.0, use_sm=False):
self.dim_per_channel = 1
self.thresh = thresh
self.use_sm = use_sm
def compute(self, x):
if self.use_sm:
y = np.sqrt(
SpectralMoment(2).compute(x) / SpectralMoment(0).compute(x))
else:
xrows, xcols = x.shape
y = np.zeros(xcols)
for i in range(xcols):
for j in range(1, xrows):
if ((x[j, i] > 0 and x[j-1, i] < 0) or
(x[j, i] < 0 and x[j-1, i] > 0)):
if np.absolute(x[j, i] - x[j-1, i]) > self.thresh:
y[i] += 1
return y
class SSC(Feature):
"""
Calculates the number of slope sign changes in a signal, subject to a
threshold for discarding noisy fluctuations.
Parameters
----------
thresh : float (default=0.0)
The threshold for discriminating true slope sign changes from those
caused by noise.
use_sm : bool (deafult=False)
Specifies if spectral moments should be used for the computation. This
is much faster, but the threshold is not taken into account, making it
potentially affected by noise.
"""
def __init__(self, thresh=0.0, use_sm=False):
self.dim_per_channel = 1
self.thresh = thresh
self.use_sm = use_sm
def compute(self, x):
if self.use_sm:
y = np.sqrt(
SpectralMoment(4).compute(x) / SpectralMoment(2).compute(x))
else:
xrows, xcols = x.shape
y = np.zeros(xcols)
for i in range(xcols):
for j in range(1, xrows-1):
if ((x[j, i] > x[j-1, i] and x[j, i] > x[j+1, i]) or
(x[j, i] < x[j-1, i] and x[j, i] < x[j+1, i])):
if (np.absolute(x[j, i]-x[j-1, i]) > self.thresh or
np.absolute(x[j, i]-x[j+1, i]) > self.thresh):
y[i] += 1
return y
class SpectralMoment(Feature):
"""
Calculates the nth-order spectral moment.
Parameters
----------
n : int
The spectral moment order. Should be even and greater than or equal to
zero.
"""
def __init__(self, n):
self.dim_per_channel = 1
self.n = n
def compute(self, x):
xrows, xcols = x.shape
y = np.zeros(xcols)
if self.n % 2 != 0:
return y
# special case, zeroth order moment is just the power
if self.n == 0:
y = np.sum(np.multiply(x, x), axis=0)
else:
y = SpectralMoment(0).compute(np.diff(x, int(self.n/2), axis=0))
return y
class KhushabaSet(Feature):
"""
Calcuates a set of 5 features introduced by Khushaba et al. at ISCIT 2012.
(see reference [1]). They are:
1. log of the 0th order spectral moment
2. log of normalized 2nd order spectral moment (m2 / m0^u)
3. log of normalized 4th order spectral moment (m4 / m0^(u+2))
4. log of the sparseness (see paper)
5. log of the irregularity factor / waveform length (see paper)
Parameters
----------
u : int (default=0)
Used in the exponent of m0 for normalizing higher-orer moments
References
----------
.. [1] `<NAME>, <NAME>, and <NAME>, "Time-dependent spectral
features for limb position invariant myoelectric pattern recognition,"
Communications and Information Technologies (ISCIT), 2012 International
Symposium on, 2012.`
"""
def __init__(self, u=0):
self.dim_per_channel = 5
self.u = u
def compute(self, x):
xrows, xcols = x.shape
# TODO fill this instead of using hstack
# y = np.zeros(self.dim_per_channel*xcols)
m0 = SpectralMoment(0).compute(x)
m2 = SpectralMoment(2).compute(x)
m4 = SpectralMoment(4).compute(x)
S = m0 / np.sqrt(np.abs((m0-m2)*(m0-m4)))
IF = np.sqrt(m2**2 / (m0*m4))
return np.hstack((
np.log(m0),
np.log(m2 / m0**2),
np.log(m4 / m0**4),
np.log(S),
np.log(IF / WL().compute(x))))
class SampEn(Feature):
"""
Calculates the sample entropy of time series data. See reference [1].
The basic idea is to take all possible m-length subsequences of the
time series and count the number of these subsequences whose Chebyshev
distance from all other subsequences is less than the tolerance parameter,
r (self-matches excluded). This is repeated for (m+1)-length subsequences,
and SampEn is given by the log of the number of m-length matches divided
by the number of (m+1)-length matches.
This feature can have some issues if the tolerance r is too low and/or the
subsequence length m is too high. A typical value for r is apparently
0.2*std(x).
Parameters
----------
m : int
Length of sequences to compare (>1)
r : float
Tolerance for counting matches.
References
----------
.. [1] `<NAME> and <NAME>, "Physiological time series
analysis using approximate entropy and sample entropy," American
Journal of Physiology -- Heart and Circulatory Physiology, vol. 278
no. 6, 2000.`
"""
def __init__(self, m, r):
self.dim_per_channel = 1
self.m = m
self.r = r
def compute(self, x):
xrows, xcols = x.shape
y = np.zeros(xcols)
m = self.m
N = xrows
for c in range(xcols):
correl = np.zeros(2) + np.finfo(np.float).eps
xmat = np.zeros((m+1, N-m+1))
for i in range(m):
xmat[i, :] = x[i:N-m+i+1, c]
# handle last row separately
xmat[m, :-1] = x[m:N, c]
xmat[-1, -1] = 10*np.max(xmat) # something that won't get matched
for mc in [m, m+1]:
count = 0
for i in range(N-mc-1):
dist = np.max(
np.abs(xmat[:mc, i+1:] - xmat[:mc, i][:, np.newaxis]),
axis=0)
count += np.sum(dist <= self.r)
correl[mc-m] = count
y[c] = np.log(correl[0] / correl[1])
return y
|
1695816
|
import datetime
import decimal
import unittest
from typing import Sequence
from yabc import basis
from yabc import coinpool
from yabc import transaction
from yabc.formats import bybit
class BybitXLSXTest(unittest.TestCase):
def setUp(self) -> None:
bybit_parser = bybit.BybitPNLParser(
open("testdata/bybit/assets_history_account.xlsx", "br")
)
self.txs = list(bybit_parser) # type: Sequence[transaction.Transaction]
bp = basis.BasisProcessor(coinpool.PoolMethod.FIFO, self.txs)
self.reports = bp.process()
bybit_parser.cleanup()
def test_reports(self):
self.assertEqual(len(self.reports), 2)
r1 = self.reports[0]
gain = 33
self.assertEqual(r1.get_gain_or_loss(), gain)
self.assertEqual(r1.basis, 0)
self.assertEqual(r1.proceeds, gain)
loss = 4
r2 = self.reports[1]
self.assertEqual(r2.get_gain_or_loss(), -4)
self.assertEqual(r2.basis, loss)
self.assertEqual(r2.proceeds, 0)
for r in self.reports:
self.assertIn("BTCUSD perpetual", r.description())
def test_bybit_gain(self):
self.assertEqual(len(self.txs), 2)
gain = self.txs[0]
self.assertEqual(gain.date.date(), datetime.date(2019, 3, 2))
self.assertEqual(gain.operation, transaction.Operation.PERPETUAL_PNL)
self.assertEqual(gain.quantity_received, decimal.Decimal("0.00839186"))
self.assertEqual(gain.quantity_traded, 0)
self.assertEqual(gain.symbol_traded, "BTCUSD")
def test_bybit_loss(self):
self.assertEqual(len(self.txs), 2)
loss = self.txs[1]
self.assertEqual(loss.date.date(), datetime.date(2019, 10, 10))
self.assertEqual(loss.operation, transaction.Operation.PERPETUAL_PNL)
self.assertEqual(loss.quantity_received, decimal.Decimal("-0.00047"))
self.assertEqual(loss.quantity_traded, 0)
self.assertEqual(loss.symbol_traded, "BTCUSD")
|
1695835
|
import torch
from torch import nn
import torchvision.models
from torchvision.models.resnet import BasicBlock, Bottleneck
import argparse
import numpy as np
import json
import sys
from types import SimpleNamespace
from cdfsl import make_cdfsl_loader
from core import EvaluateFewShot
from datasets import ImagenetBasedDataset, MiniImageNet
from hebb import stage_two, hebb_rule
from res12 import resnet12
from res10 import res10
from models import conv64
from utils import prepare_meta_batch, make_task_loader
def basic_block_forward(layer, x):
identity = x[-1]
x.append(layer.conv1(x[-1]))
x.append(layer.bn1(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv2(x[-1]))
x.append(layer.bn2(x[-1]))
if layer.downsample is not None:
identity = layer.downsample(identity)
x.append(x[-1] + identity)
#print('basic_block_forward', len(x)-1)
x.append(layer.relu(x[-1]))
return x
def bottleneck_forward(layer, x):
identity = x[-1]
x.append(layer.conv1(x[-1]))
x.append(layer.bn1(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv2(x[-1]))
x.append(layer.bn2(x[-1]))
x.append(layer.relu(x[-1]))
x.append(layer.conv3(x[-1]))
x.append(layer.bn3(x[-1]))
if layer.downsample is not None:
identity = layer.downsample(identity)
x.append(x[-1] + identity)
x.append(layer.relu(x[-1]))
return x
def recursive_forward(module, x):
if isinstance(module, BasicBlock):
x = basic_block_forward(module, x)
return x
elif isinstance(module, Bottleneck):
x = bottleneck_forward(module, x)
return x
elif isinstance(module, nn.AdaptiveAvgPool2d):
x.append(module.forward(x[-1]).flatten(1))
return x
elif hasattr(module, '_modules') and module._modules:
for m in module._modules.values():
x = recursive_forward(m, x)
return x
else:
x.append(module.forward(x[-1]))
return x
class ModelWrapper(nn.Module):
def __init__(self, embed):
super(ModelWrapper, self).__init__()
self.embed = embed
self.feature_index = [-1]
def forward(self, x, output_layer=True):
self.x = [x]
self.x = recursive_forward(self.embed, self.x)
self.features = [self.x[fi].flatten(1) for fi in self.feature_index]
#[print(f.shape) for f in self.features]
#exit()
return self.x[-1]
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('config', type=str)
parser.add_argument('--dataset', type=str)
parser.add_argument('--n', type=int)
parser.add_argument('--k', type=int)
parser.add_argument('--q', type=int)
parser.add_argument('--eval-batches', type=int)
parser.add_argument('--gpu', type=int, nargs='+')
parser.add_argument('--num-workers', type=int)
parser.add_argument('--hebb-lr', type=float)
parser.add_argument('--inner-val-steps', type=int)
parser.add_argument('--meta-batch-size', type=int)
parser.add_argument('--seed', type=int)
parser.add_argument('--feature-index', type=int, nargs='+')
# res18 ablation: -1 59 52 45 38 31
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# override config with cmd line args
config.update(vars(args))
args = SimpleNamespace(**config)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
device = torch.device(args.gpu[0])
eval_few_shot_args = {
'num_tasks': args.eval_batches,
'n_shot': args.n,
'k_way': args.k,
'q_queries': args.q,
'prepare_batch': prepare_meta_batch(
args.n, args.k, args.q, args.meta_batch_size, 2, device),
'inner_train_steps': args.inner_val_steps,
'hebb_lr': args.hebb_lr,
'device': device,
'xdom': hasattr(args, 'dataset') and args.dataset not in ('mini', 'tier'),
}
model = torchvision.models.resnet18(pretrained=True)
#model = torchvision.models.resnet34(pretrained=True)
#model = torchvision.models.resnet50(pretrained=True)
#model = torchvision.models.resnet101(pretrained=True)
#model = torchvision.models.resnet152(pretrained=True)
#model_orig = model # FIXME integrity check
model = ModelWrapper(model)
model.feature_index = args.feature_index #[-1, -2, -3, -8]
model = nn.DataParallel(model, device_ids=args.gpu)
model = model.to(device, dtype=torch.double)
model.eval()
# FIXME integrity check
#model_orig = model_orig.to(device, dtype=torch.double)
#x = torch.rand(1, 3, 224, 244).to(device, dtype=torch.double)
#print((model(x)-model_orig(x)).sum())
#exit()
if (not hasattr(args, 'dataset')) or args.dataset == 'mini':
#test_loader = make_task_loader(MiniImageNet('test', small=False),
# args, train=False, meta=True)
test_loader = make_task_loader(ImagenetBasedDataset('test', small=False),
args, train=False, meta=True)
elif args.dataset == 'tier':
test_loader = make_task_loader(ImagenetBasedDataset('test', small=False, tier=True),
args, train=False, meta=True)
else:
test_loader = make_cdfsl_loader(args.dataset,
args.eval_batches,
args.n,
args.k,
args.q,
small=False)
loss_fn = nn.CrossEntropyLoss().to(device)
evaluator = EvaluateFewShot(eval_fn=hebb_rule,
taskloader=test_loader,
**eval_few_shot_args)
#logs = {'dummy': 0} # it's important to have logs be non-empty
logs = {
'dataset': args.dataset if hasattr(args, 'dataset') else 'miniImagenet',
'feature_index': args.feature_index,
}
evaluator.model = {'sys1': model}
evaluator.optimiser = None
evaluator.loss_fn = loss_fn
evaluator.on_epoch_end(0, logs)
print(logs)
feature_index = 'ensemble' if len(args.feature_index) > 1 else args.feature_index
print(f'res18,{args.dataset},{args.n},{feature_index},{logs[evaluator.metric_name]}')
|
1695838
|
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
from utils.tqdm_op import tqdm_range
import numpy as np
import copy
def np_softmax(x):
'''
Args:
x - Numpy 2D array
'''
x_softmax = np.zeros_like(x)
ndata, nfeature = x.shape
for idx in range(ndata):
tmp_max = np.max(x[idx])
tmp_exp = np.exp(x[idx] - tmp_max)
x_softmax[idx] = tmp_exp/np.sum(tmp_exp)
return x_softmax
def get_ginni_variance_conti(array):
''' FactorVAE https://arxiv.org/pdf/1802.05983.pdf
Args:
array - Numpy 1D array
'''
ndata = array.shape[0]
return ndata/(ndata-1)*np.var(array)
def get_ginni_variance_discrete(array):
''' FactorVAE https://arxiv.org/pdf/1802.05983.pdf
Args: array - Numpy 1D array, argmax index
'''
array = array.astype(int)
ndata = array.shape[0]
count = np.zeros([np.max(array)+1])
for idx in range(ndata): count[array[idx]]+=1
count = count.astype(float)
return (ndata*ndata - np.sum(np.square(count)))/(2*ndata*(ndata-1))
def zero_padding2nmul(inputs, mul):
'''Add zero padding to inputs to be multiple of mul
Args:
inputs - np array
mul - int
Return:
np array (inputs + zero_padding)
int original input size
'''
input_shape = list(inputs.shape)
ndata = input_shape[0]
if ndata%mul==0: return inputs, ndata
input_shape[0] = mul-ndata%mul
return np.concatenate([inputs, np.zeros(input_shape)], axis=0), ndata
def np_random_crop_4d(imgs, size):
'''
Args:
imgs - 4d image NHWC
size - list (rh, rw)
'''
rh, rw = size
on, oh, ow, oc = imgs.shape
cropped_imgs = np.zeros([on, rh, rw, oc])
ch = np.random.randint(low=0, high=oh-rh, size=on)
cw = np.random.randint(low=0, high=ow-rw, size=on)
for idx in range(on): cropped_imgs[idx] = imgs[idx,ch[idx]:ch[idx]+rh,cw[idx]:cw[idx]+rw]
return cropped_imgs
|
1695842
|
from django.apps import AppConfig
class CutoutConfig(AppConfig):
name = 'daiquiri.cutout'
label = 'daiquiri_cutout'
verbose_name = 'Cutout'
|
1695853
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from . import resnet, resnext
try:
from lib.nn import SynchronizedBatchNorm2d
except ImportError:
from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
@staticmethod
def pixel_acc(pred, label, ignore_index=-1):
_, preds = torch.max(pred, dim=1)
valid = (label != ignore_index).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
@staticmethod
def part_pixel_acc(pred_part, gt_seg_part, gt_seg_object, object_label, valid):
mask_object = (gt_seg_object == object_label)
_, pred = torch.max(pred_part, dim=1)
acc_sum = mask_object * (pred == gt_seg_part)
acc_sum = torch.sum(acc_sum.view(acc_sum.size(0), -1), dim=1)
acc_sum = torch.sum(acc_sum * valid)
pixel_sum = torch.sum(mask_object.view(mask_object.size(0), -1), dim=1)
pixel_sum = torch.sum(pixel_sum * valid)
return acc_sum, pixel_sum
@staticmethod
def part_loss(pred_part, gt_seg_part, gt_seg_object, object_label, valid):
mask_object = (gt_seg_object == object_label)
loss = F.nll_loss(pred_part, gt_seg_part * mask_object.long(), reduction='none')
loss = loss * mask_object.float()
loss = torch.sum(loss.view(loss.size(0), -1), dim=1)
nr_pixel = torch.sum(mask_object.view(mask_object.shape[0], -1), dim=1)
sum_pixel = (nr_pixel * valid).sum()
loss = (loss * valid.float()).sum() / torch.clamp(sum_pixel, 1).float()
return loss
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, labeldata, loss_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit_dict = nn.ModuleDict()
if loss_scale is None:
self.loss_scale = {"object": 1, "part": 0.5, "scene": 0.25, "material": 1}
else:
self.loss_scale = loss_scale
# criterion
self.crit_dict["object"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["material"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["scene"] = nn.NLLLoss(ignore_index=-1) # ignore unlabelled -1
# Label data - read from json
self.labeldata = labeldata
object_to_num = {k: v for v, k in enumerate(labeldata['object'])}
part_to_num = {k: v for v, k in enumerate(labeldata['part'])}
self.object_part = {object_to_num[k]:
[part_to_num[p] for p in v]
for k, v in labeldata['object_part'].items()}
self.object_with_part = sorted(self.object_part.keys())
self.decoder.object_part = self.object_part
self.decoder.object_with_part = self.object_with_part
def forward(self, feed_dict, *, seg_size=None):
if seg_size is None: # training
if feed_dict['source_idx'] == 0:
output_switch = {"object": True, "part": True, "scene": True, "material": False}
elif feed_dict['source_idx'] == 1:
output_switch = {"object": False, "part": False, "scene": False, "material": True}
else:
raise ValueError
pred = self.decoder(
self.encoder(feed_dict['img'], return_feature_maps=True),
output_switch=output_switch
)
# loss
loss_dict = {}
if pred['object'] is not None: # object
loss_dict['object'] = self.crit_dict['object'](pred['object'], feed_dict['seg_object'])
if pred['part'] is not None: # part
part_loss = 0
for idx_part, object_label in enumerate(self.object_with_part):
part_loss += self.part_loss(
pred['part'][idx_part], feed_dict['seg_part'],
feed_dict['seg_object'], object_label, feed_dict['valid_part'][:, idx_part])
loss_dict['part'] = part_loss
if pred['scene'] is not None: # scene
loss_dict['scene'] = self.crit_dict['scene'](pred['scene'], feed_dict['scene_label'])
if pred['material'] is not None: # material
loss_dict['material'] = self.crit_dict['material'](pred['material'], feed_dict['seg_material'])
loss_dict['total'] = sum([loss_dict[k] * self.loss_scale[k] for k in loss_dict.keys()])
# metric
metric_dict= {}
if pred['object'] is not None:
metric_dict['object'] = self.pixel_acc(
pred['object'], feed_dict['seg_object'], ignore_index=0)
if pred['material'] is not None:
metric_dict['material'] = self.pixel_acc(
pred['material'], feed_dict['seg_material'], ignore_index=0)
if pred['part'] is not None:
acc_sum, pixel_sum = 0, 0
for idx_part, object_label in enumerate(self.object_with_part):
acc, pixel = self.part_pixel_acc(
pred['part'][idx_part], feed_dict['seg_part'], feed_dict['seg_object'],
object_label, feed_dict['valid_part'][:, idx_part])
acc_sum += acc
pixel_sum += pixel
metric_dict['part'] = acc_sum.float() / (pixel_sum.float() + 1e-10)
if pred['scene'] is not None:
metric_dict['scene'] = self.pixel_acc(
pred['scene'], feed_dict['scene_label'], ignore_index=-1)
return {'metric': metric_dict, 'loss': loss_dict}
else: # inference
output_switch = {"object": True, "part": True, "scene": True, "material": True}
pred = self.decoder(self.encoder(feed_dict['img'], return_feature_maps=True),
output_switch=output_switch, seg_size=seg_size)
return pred
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=has_bias)
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
SynchronizedBatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class ModelBuilder:
def __init__(self):
pass
# custom weights initialization
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
pretrained = True if len(weights) == 0 else False
if arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
else:
raise Exception('Architecture undefined!')
# net_encoder.apply(self.weights_init)
if len(weights) > 0:
# print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
def build_decoder(self, nr_classes,
arch='ppm_bilinear_deepsup', fc_dim=512,
weights='', use_softmax=False):
if arch == 'upernet_lite':
net_decoder = UPerNet(
nr_classes=nr_classes,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
nr_classes=nr_classes,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(self.weights_init)
if len(weights) > 0:
# print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
# upernet
class UPerNet(nn.Module):
def __init__(self, nr_classes, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
# Lazy import so that compilation isn't needed if not being used.
from .prroi_pool import PrRoIPool2D
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
# we use the feature map size instead of input image size, so down_scale = 1.0
self.ppm_pooling.append(PrRoIPool2D(scale, scale, 1.))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_fusion = conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1)
# background included. if ignore in loss, output channel 0 will not be trained.
self.nr_scene_class, self.nr_object_class, self.nr_part_class, self.nr_material_class = \
nr_classes['scene'], nr_classes['object'], nr_classes['part'], nr_classes['material']
# input: PPM out, input_dim: fpn_dim
self.scene_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(fpn_dim, self.nr_scene_class, kernel_size=1, bias=True)
)
# input: Fusion out, input_dim: fpn_dim
self.object_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_object_class, kernel_size=1, bias=True)
)
# input: Fusion out, input_dim: fpn_dim
self.part_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_part_class, kernel_size=1, bias=True)
)
# input: FPN_2 (P2), input_dim: fpn_dim
self.material_head = nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, self.nr_material_class, kernel_size=1, bias=True)
)
def forward(self, conv_out, output_switch=None, seg_size=None):
output_dict = {k: None for k in output_switch.keys()}
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
roi = [] # fake rois, just used for pooling
for i in range(input_size[0]): # batch size
roi.append(torch.Tensor([i, 0, 0, input_size[3], input_size[2]]).view(1, -1)) # b, x0, y0, x1, y1
roi = torch.cat(roi, dim=0).type_as(conv5)
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(F.interpolate(
pool_scale(conv5, roi.detach()),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
if output_switch['scene']: # scene
output_dict['scene'] = self.scene_head(f)
if output_switch['object'] or output_switch['part'] or output_switch['material']:
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = F.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
# material
if output_switch['material']:
output_dict['material'] = self.material_head(fpn_feature_list[0])
if output_switch['object'] or output_switch['part']:
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(F.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_fusion(fusion_out)
if output_switch['object']: # object
output_dict['object'] = self.object_head(x)
if output_switch['part']:
output_dict['part'] = self.part_head(x)
if self.use_softmax: # is True during inference
# inference scene
x = output_dict['scene']
x = x.squeeze(3).squeeze(2)
x = F.softmax(x, dim=1)
output_dict['scene'] = x
# inference object, material
for k in ['object', 'material']:
x = output_dict[k]
x = F.interpolate(x, size=seg_size, mode='bilinear', align_corners=False)
x = F.softmax(x, dim=1)
output_dict[k] = x
# inference part
x = output_dict['part']
x = F.interpolate(x, size=seg_size, mode='bilinear', align_corners=False)
part_pred_list, head = [], 0
for idx_part, object_label in enumerate(self.object_with_part):
n_part = len(self.object_part[object_label])
_x = F.interpolate(x[:, head: head + n_part], size=seg_size, mode='bilinear', align_corners=False)
_x = F.softmax(_x, dim=1)
part_pred_list.append(_x)
head += n_part
output_dict['part'] = part_pred_list
else: # Training
# object, scene, material
for k in ['object', 'scene', 'material']:
if output_dict[k] is None:
continue
x = output_dict[k]
x = F.log_softmax(x, dim=1)
if k == "scene": # for scene
x = x.squeeze(3).squeeze(2)
output_dict[k] = x
if output_dict['part'] is not None:
part_pred_list, head = [], 0
for idx_part, object_label in enumerate(self.object_with_part):
n_part = len(self.object_part[object_label])
x = output_dict['part'][:, head: head + n_part]
x = F.log_softmax(x, dim=1)
part_pred_list.append(x)
head += n_part
output_dict['part'] = part_pred_list
return output_dict
|
1695865
|
from credmark.cmf.model import Model
from credmark.cmf.types import (
Token,
Account,
Accounts,
Portfolio,
NativeToken,
NativePosition,
TokenPosition
)
from credmark.cmf.types.ledger import TokenTransferTable
@Model.describe(
slug="account.portfolio",
version="1.0",
display_name="Account Portfolio",
description="All of the token holdings for an account",
developer="Credmark",
input=Account,
output=Portfolio)
class WalletInfoModel(Model):
def run(self, input: Account) -> Portfolio:
token_addresses = self.context.ledger.get_erc20_transfers(
columns=[TokenTransferTable.Columns.TOKEN_ADDRESS],
where=' '.join(
[f"{TokenTransferTable.Columns.FROM_ADDRESS}='{input.address}'",
"or",
f"{TokenTransferTable.Columns.TO_ADDRESS}='{input.address}'"]))
positions = []
positions.append(
NativePosition(
amount=self.context.web3.eth.get_balance(input.address),
asset=NativeToken()
)
)
for t in list(dict.fromkeys([t['token_address'] for t in token_addresses])):
try:
token = Token(address=t)
balance = float(token.functions.balanceOf(input.address).call())
if balance > 0.0:
positions.append(
TokenPosition(asset=token, amount=balance))
except Exception as _err:
# TODO: currently skip NFTs
pass
return Portfolio(
positions=positions
)
@Model.describe(
slug="account.portfolio-aggregate",
version="1.0",
display_name="Account Portfolios for a list of Accounts",
description="All of the token holdings for an account",
developer="Credmark",
input=Accounts,
output=Portfolio)
class AccountsPortfolio(Model):
def run(self, input: Accounts) -> Portfolio:
token_addresses = []
native_balance = 0.0
for a in input:
token_addresses += self.context.ledger.get_erc20_transfers(
columns=[TokenTransferTable.Columns.TOKEN_ADDRESS],
where=' '.join(
[f"{TokenTransferTable.Columns.FROM_ADDRESS}='{a.address}'",
"or",
f"{TokenTransferTable.Columns.TO_ADDRESS}='{a.address}'"]))
native_balance += self.context.web3.eth.get_balance(a.address)
positions = []
for t in set(dict.fromkeys([t['token_address'] for t in token_addresses])):
try:
token = Token(address=t)
balance = sum([float(token.functions.balanceOf(a.address).call()) for a in input])
if balance > 0.0:
found = False
for p in positions:
if p.asset.address == token.address:
p.amount += balance
found = True
break
if not found:
positions.append(
TokenPosition(asset=token, amount=balance))
except Exception as _err:
# TODO: currently skip NFTs
pass
positions.append(
NativePosition(
amount=native_balance,
asset=NativeToken()
)
)
return Portfolio(
positions=positions
)
|
1695917
|
from logging import Logger
from character import Character
from engine import Engine
class Runner:
def __init__(self, engine: Engine, logger: Logger):
"""Create a new test runner."""
self.engine = engine
self.logger = logger
def run(self, character: Character):
"""Run tests."""
self.logger.debug(f"Running tests for character: {character.name}")
self.engine.load_url(character.url)
self.engine.expect_toast("Connected to VTT Bridge v.*?!")
self._check_left_panel()
self.logger.debug("Moving forwards through tabs")
self._check_combat_tab(character)
self._check_proficiencies_tab(character)
self._check_features_tab(character)
self._check_spells_tab(character)
self._check_equipment_tab(character)
self.logger.debug("Moving backwards through tabs")
self._check_equipment_tab(character)
self._check_spells_tab(character)
self._check_features_tab(character)
self._check_proficiencies_tab(character)
self._check_combat_tab(character)
self.logger.debug("Checking user interaction")
self._check_roll_strength_button()
for spell in character.tested_spells:
self._check_spell_expansion(spell)
def _toggle_visibility(self):
self.logger.debug("Toggling visibility")
self.engine.find(".vtt-toggle-visibility")[0].click()
def _check_left_panel(self):
self.logger.debug("Checking left panel")
ability_score = self.engine.find(".vtt-roll-ability-score")
assert len(ability_score) == 6, "Wrong number of roll ability score buttons"
skill = self.engine.find(".vtt-roll-skill")
assert len(skill) == 18, "Wrong number of roll skill buttons"
saving_throw = self.engine.find(".vtt-roll-saving-throw")
assert len(saving_throw) == 6, "Wrong number of roll saving throw buttons"
def _check_combat_tab(self, c: Character):
self.logger.debug("Checking combat tab")
self.engine.select_tab_by_index(0)
initiative = self.engine.find(".vtt-roll-initiative")
assert len(initiative) == 1, "Wrong number of roll initiative buttons"
attack = self.engine.find(".vtt-attack-with-weapon")
assert (
len(attack) == c.num_total_weapons
), "Wrong number of attack with weapon buttons"
damage = self.engine.find(".vtt-roll-weapon-damage")
assert (
len(damage) == c.num_total_weapons + c.num_versatile_weapons
), "Wrong number of roll weapon damage buttons"
def _check_proficiencies_tab(self, c: Character):
self.logger.debug("Checking proficiencies tab")
self.engine.select_tab_by_index(1)
proficiency = self.engine.find(".vtt-roll-proficiency")
assert (
len(proficiency) == 18 + c.num_tools
), "Wrong number of roll proficiency buttons"
def _check_spells_tab(self, c: Character):
self.logger.debug("Checking spells tab")
self.engine.select_tab_by_index(2)
attack = self.engine.find(".vtt-attack-with-spell")
assert len(attack) == c.num_spells, "Wrong number of attack with spell buttons"
def _check_features_tab(self, c: Character):
self.logger.debug("Checking features tab")
self.engine.select_tab_by_index(3)
feature = self.engine.find(".vtt-use-feature")
assert len(feature) == c.num_features, "Wrong number of use feature buttons"
def _check_equipment_tab(self, c: Character):
self.logger.debug("Checking equipment tab")
self.engine.select_tab_by_index(4)
attack = self.engine.find(".vtt-attack-with-weapon")
assert (
len(attack) == c.num_total_weapons
), "Wrong number of attack with weapon buttons"
damage = self.engine.find(".vtt-roll-weapon-damage")
assert (
len(damage) == c.num_total_weapons + c.num_versatile_weapons
), "Wrong number of roll weapon damage buttons"
def _check_roll_strength_button(self):
self.logger.debug("Checking roll strength button")
self.engine.select_tab_by_index(0)
ability_score = self.engine.find(".vtt-roll-ability-score")
strength = ability_score[0]
strength.click()
self.engine.expect_toast(".*? rolled STR check!")
self.engine.control_click(strength)
self.engine.expect_toast(".*? rolled STR check with advantage!")
self.engine.shift_click(strength)
self.engine.expect_toast(".*? rolled STR check with disadvantage!")
self._toggle_visibility()
strength.click()
self.engine.expect_toast(r".*? rolled STR check! \(hidden\)")
self.engine.control_click(strength)
self.engine.expect_toast(r".*? rolled STR check with advantage! \(hidden\)")
self.engine.shift_click(strength)
self.engine.expect_toast(r".*? rolled STR check with disadvantage! \(hidden\)")
self._toggle_visibility()
def _check_spell_expansion(self, spell: str):
self.logger.debug("Checking spell expansion")
self.engine.select_tab_by_index(2)
pointer_cells = self.engine.find(".spell.pointer td")
for cell in pointer_cells:
if cell.get_attribute("innerText").strip() == spell:
cell.click()
break
else:
raise ValueError(f"Can't find spell: {spell}")
cast = self.engine.find(".vtt-cast-spell")
assert len(cast) == 1, "Wrong number of cast spell buttons"
cast[0].click()
self.engine.expect_toast(f".*? cast {spell}!")
cell.click() # Cleanup expansion
|
1695925
|
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintProviderWithdrawalRequstList(PreprintRequestTestMixin):
def url(self, provider):
return '/{}providers/preprints/{}/withdraw_requests/'.format(API_BASE, provider._id)
def test_list(self, app, admin, moderator, write_contrib, noncontrib, pre_mod_provider, post_mod_provider, pre_mod_preprint, post_mod_preprint, pre_request, post_request):
# test_no_perms
res = app.get(self.url(pre_mod_provider), auth=admin.auth, expect_errors=True) # preprint admin, not reviews admin
assert res.status_code == 403
res = app.get(self.url(pre_mod_provider), auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
res = app.get(self.url(pre_mod_provider), auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
res = app.get(self.url(pre_mod_provider), expect_errors=True)
assert res.status_code == 401
# test_moderator_can_view
res = app.get(self.url(pre_mod_provider), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == pre_request._id
res = app.get(self.url(post_mod_provider), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == post_request._id
# test_embed
res = app.get('{}?embed=target'.format(self.url(pre_mod_provider)), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == pre_request._id
assert res.json['data'][0]['embeds']['target']['data']['id'] == pre_mod_preprint._id
res = app.get('{}?embed=target'.format(self.url(post_mod_provider)), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == post_request._id
assert res.json['data'][0]['embeds']['target']['data']['id'] == post_mod_preprint._id
# test_filter
res = app.get('{}?filter[target]={}'.format(self.url(pre_mod_provider), pre_mod_preprint._id), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == pre_request._id
res = app.get('{}?filter[target]={}'.format(self.url(post_mod_provider), post_mod_preprint._id), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == post_request._id
|
1695934
|
from models.discriminators.GeneralDiscriminator import GeneralDiscriminator
import torch.nn as nn
import torch
from utils.architecture_utils import Flatten
class PixelDiscriminator(GeneralDiscriminator):
def __init__(self,imsize, n_channels_in=3, n_hidden=64, n_layers=3,use_dropout: bool=False,
device: str="cpu", **kwargs):
super(PixelDiscriminator, self).__init__(n_channels_in, device, **kwargs)
# If normalizing layer is batch normalization, don't add bias because nn.BatchNorm2d has affine params
use_bias = False
layers = []
out = []
# Add input block auditor
layers += [nn.ReflectionPad2d(1)]
layers += [nn.Conv2d(n_channels_in, n_hidden, kernel_size=3, stride=1, bias=True)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
# Add hidden blocks auditor
for i in range(2):
layers += [nn.ReflectionPad2d(1)]
layers += [nn.Conv2d(n_hidden, n_hidden, kernel_size=3, stride=1, bias=True)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
layers += [nn.ReflectionPad2d(1)]
layers += [nn.Conv2d(n_hidden, n_hidden, kernel_size=4, stride=2, padding=1)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
layers += [nn.Dropout(0.1 * int(use_dropout))]
# Set factor of change for input and output channels for hidden layers
mult_in = 1
mult_out = 1
# Add hidden layers
for i in range(1, n_layers + 1):
mult_in = mult_out
mult_out = min(2 ** i, 8)
if i == n_layers:
layers += [nn.Conv2d(n_hidden * mult_in, n_hidden * mult_out, kernel_size=4, stride=1, padding=1,
bias=use_bias)] # stride = 1
else:
layers += [nn.Conv2d(n_hidden * mult_in, n_hidden * mult_out, kernel_size=4, stride=2, padding=1,
bias=use_bias)] # stride = 2
layers += [nn.LeakyReLU(0.2, inplace=True)]
layers += [nn.Dropout(0.1*int(use_dropout))]
# output layer (1 channel prediction map)
layers += [nn.Conv2d(n_hidden * mult_out, n_hidden, kernel_size=4, stride=1, padding=1)]
layers += [nn.LeakyReLU(0.2, inplace=True)]
layers += [nn.Dropout(0.1*int(use_dropout))]
out += [Flatten()]
out += [nn.Linear(n_hidden * ((imsize[0]//2**n_layers) - 2) * ((imsize[1]//2**n_layers) - 2), 1)]
out += [nn.Dropout(0.1*int(use_dropout))]
out += [nn.Sigmoid()]
# Save model
self.model = nn.Sequential(*layers)
self.out = nn.Sequential(*out)
def forward(self, x) -> torch.Tensor:
assert not torch.isnan(x).any(), "Discriminator input is NaN"
assert not torch.isinf(x).any(), "Discriminator input is inf"
feats = self.model(x)
y = self.out(feats).clamp(min=1e-7)
assert not torch.isnan(y).any(), "Discriminator output is NaN"
assert not torch.isinf(y).any(), "Discriminator output is inf"
return y.squeeze()
if __name__ == '__main__':
# Test if working
dummy_batch = torch.rand((32, 71, 256, 256))
D = PixelDiscriminator(imsize=(256, 256), n_channels_in=71)
score = D(dummy_batch)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.