id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1705910
|
import os
import itertools
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils as tutils
import torchvision.transforms as transforms
import numpy as np
from tqdm import tqdm
from fsgan.utils.obj_factory import obj_factory
from fsgan.utils.tensorboard_logger import TensorBoardLogger
from fsgan.utils import utils, img_utils
from fsgan.utils.seg_utils import blend_seg_pred, blend_seg_label
from fsgan.utils.iou_metric import IOUMetric
from fsgan.datasets import img_landmarks_transforms
class IOUBenchmark(IOUMetric):
def __init__(self, num_classes, normalized=False, ignore_index=None):
super(IOUBenchmark, self).__init__(num_classes, normalized, ignore_index)
def to(self, device):
return self
def __call__(self, pred, target):
self.add(pred, target)
_, miou = self.value()
return {'iou': miou}
def main(
# General arguments
exp_dir, resume_dir=None, start_epoch=None, epochs=(90,), iterations=None, resolutions=(128, 256),
learning_rate=(1e-1,), gpus=None, workers=4, batch_size=(64,), seed=None, log_freq=20,
# Data arguments
train_dataset='fsgan.image_seg_dataset.ImageSegDataset', val_dataset=None, numpy_transforms=None,
tensor_transforms=('img_landmarks_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
# Training arguments
optimizer='optim.SGD(momentum=0.9,weight_decay=1e-4)', scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
criterion='nn.CrossEntropyLoss', model='fsgan.models.simple_unet.UNet(n_classes=3,feature_scale=1)',
pretrained=False, benchmark='fsgan.train_segmentation.IOUBenchmark(3)'
):
def proces_epoch(dataset_loader, train=True):
stage = 'TRAINING' if train else 'VALIDATION'
total_iter = len(dataset_loader) * dataset_loader.batch_size * epoch
pbar = tqdm(dataset_loader, unit='batches')
# Set networks training mode
model.train(train)
# Reset logger
logger.reset(prefix='{} {}X{}: Epoch: {} / {}; LR: {:.0e}; '.format(
stage, res, res, epoch + 1, res_epochs, scheduler.get_lr()[0]))
# For each batch in the training data
for i, (input, target) in enumerate(pbar):
# Prepare input
input = input.to(device)
target = target.to(device)
with torch.no_grad():
target = target.argmax(dim=1)
# Execute model
pred = model(input)
# Calculate loss
loss_total = criterion(pred, target)
# Run benchmark
benchmark_res = benchmark(pred, target) if benchmark is not None else {}
if train:
# Update generator weights
optimizer.zero_grad()
loss_total.backward()
optimizer.step()
logger.update('losses', total=loss_total)
logger.update('bench', **benchmark_res)
total_iter += dataset_loader.batch_size
# Batch logs
pbar.set_description(str(logger))
if train and i % log_freq == 0:
logger.log_scalars_val('%dx%d/batch' % (res, res), total_iter)
# Epoch logs
logger.log_scalars_avg('%dx%d/epoch/%s' % (res, res, 'train' if train else 'val'), epoch)
if not train:
# Log images
seg_pred = blend_seg_pred(input, pred)
seg_gt = blend_seg_label(input, target)
grid = img_utils.make_grid(input, seg_pred, seg_gt)
logger.log_image('%dx%d/vis' % (res, res), grid, epoch)
return logger.log_dict['losses']['total'].avg
#################
# Main pipeline #
#################
# Validation
resolutions = resolutions if isinstance(resolutions, (list, tuple)) else [resolutions]
learning_rate = learning_rate if isinstance(learning_rate, (list, tuple)) else [learning_rate]
epochs = epochs if isinstance(epochs, (list, tuple)) else [epochs]
batch_size = batch_size if isinstance(batch_size, (list, tuple)) else [batch_size]
iterations = iterations if iterations is None or isinstance(iterations, (list, tuple)) else [iterations]
learning_rate = learning_rate * len(resolutions) if len(learning_rate) == 1 else learning_rate
epochs = epochs * len(resolutions) if len(epochs) == 1 else epochs
batch_size = batch_size * len(resolutions) if len(batch_size) == 1 else batch_size
if iterations is not None:
iterations = iterations * len(resolutions) if len(iterations) == 1 else iterations
iterations = utils.str2int(iterations)
if not os.path.isdir(exp_dir):
raise RuntimeError('Experiment directory was not found: \'' + exp_dir + '\'')
assert len(learning_rate) == len(resolutions)
assert len(epochs) == len(resolutions)
assert len(batch_size) == len(resolutions)
assert iterations is None or len(iterations) == len(resolutions)
# Seed
utils.set_seed(seed)
# Check CUDA device availability
device, gpus = utils.set_device(gpus)
# Initialize loggers
logger = TensorBoardLogger(log_dir=exp_dir)
# Initialize datasets
numpy_transforms = obj_factory(numpy_transforms) if numpy_transforms is not None else []
tensor_transforms = obj_factory(tensor_transforms) if tensor_transforms is not None else []
img_transforms = img_landmarks_transforms.Compose(numpy_transforms + tensor_transforms)
train_dataset = obj_factory(train_dataset, transform=img_transforms)
if val_dataset is not None:
val_dataset = obj_factory(val_dataset, transform=img_transforms)
# Create networks
arch = utils.get_arch(model, num_classes=len(train_dataset.classes))
model = obj_factory(model, num_classes=len(train_dataset.classes)).to(device)
# Resume from a checkpoint or initialize the networks weights randomly
checkpoint_dir = exp_dir if resume_dir is None else resume_dir
model_path = os.path.join(checkpoint_dir, 'model_latest.pth')
best_loss = 1e6
curr_res = resolutions[0]
optimizer_state = None
if os.path.isfile(model_path):
print("=> loading checkpoint from '{}'".format(checkpoint_dir))
# model
checkpoint = torch.load(model_path)
if 'resolution' in checkpoint:
curr_res = checkpoint['resolution']
start_epoch = checkpoint['epoch'] if start_epoch is None else start_epoch
# else:
# curr_res = resolutions[1] if len(resolutions) > 1 else resolutions[0]
best_loss_key = 'best_loss_%d' % curr_res
best_loss = checkpoint[best_loss_key] if best_loss_key in checkpoint else best_loss
model.apply(utils.init_weights)
model.load_state_dict(checkpoint['state_dict'], strict=False)
optimizer_state = checkpoint['optimizer']
else:
print("=> no checkpoint found at '{}'".format(checkpoint_dir))
if not pretrained:
print("=> randomly initializing networks...")
model.apply(utils.init_weights)
# Lossess
criterion = obj_factory(criterion).to(device)
# Benchmark
benchmark = obj_factory(benchmark).to(device)
# Support multiple GPUs
if gpus and len(gpus) > 1:
model = nn.DataParallel(model, gpus)
# For each resolution
start_res_ind = int(np.log2(curr_res)) - int(np.log2(resolutions[0]))
start_epoch = 0 if start_epoch is None else start_epoch
for ri in range(start_res_ind, len(resolutions)):
res = resolutions[ri]
res_lr = learning_rate[ri]
res_epochs = epochs[ri]
res_iterations = iterations[ri] if iterations is not None else None
res_batch_size = batch_size[ri]
# Optimizer and scheduler
optimizer = obj_factory(optimizer, model.parameters(), lr=res_lr)
scheduler = obj_factory(scheduler, optimizer)
if optimizer_state is not None:
optimizer.load_state_dict(optimizer_state)
# Initialize data loaders
if res_iterations is None:
train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, len(train_dataset))
else:
train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, res_iterations)
train_loader = tutils.data.DataLoader(train_dataset, batch_size=res_batch_size, sampler=train_sampler,
num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)
if val_dataset is not None:
if res_iterations is None:
val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, len(val_dataset))
else:
val_iterations = (res_iterations * len(val_dataset)) // len(train_dataset)
val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, val_iterations)
val_loader = tutils.data.DataLoader(val_dataset, batch_size=res_batch_size, sampler=val_sampler,
num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)
else:
val_loader = None
# For each epoch
for epoch in range(start_epoch, res_epochs):
total_loss = proces_epoch(train_loader, train=True)
if val_loader is not None:
with torch.no_grad():
total_loss = proces_epoch(val_loader, train=False)
if hasattr(benchmark, 'reset'):
benchmark.reset()
# Schedulers step (in PyTorch 1.1.0+ it must follow after the epoch training and validation steps)
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(total_loss)
else:
scheduler.step()
# Save models checkpoints
is_best = total_loss < best_loss
best_loss = min(best_loss, total_loss)
utils.save_checkpoint(exp_dir, 'model', {
'resolution': res,
'epoch': epoch + 1,
'state_dict': model.module.state_dict() if gpus and len(gpus) > 1 else model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_loss_%d' % res: best_loss,
'arch': arch,
}, is_best)
# Reset start epoch to 0 because it's should only effect the first training resolution
start_epoch = 0
best_loss = 1e6
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('train_segmentation_ces')
general = parser.add_argument_group('general')
general.add_argument('exp_dir', metavar='DIR',
help='path to experiment directory')
general.add_argument('-rd', '--resume_dir', metavar='DIR',
help='path to resume directory (default: None)')
general.add_argument('-se', '--start-epoch', metavar='N',
help='manual epoch number (useful on restarts)')
general.add_argument('-e', '--epochs', default=90, type=int, nargs='+', metavar='N',
help='number of total epochs to run')
general.add_argument('-i', '--iterations', nargs='+', metavar='N',
help='number of iterations per resolution to run')
general.add_argument('-r', '--resolutions', default=(128, 256), type=int, nargs='+', metavar='N',
help='the training resolutions list (must be power of 2)')
general.add_argument('-lr', '--learning-rate', default=(1e-1,), type=float, nargs='+', metavar='F',
help='initial learning rate per resolution')
general.add_argument('--gpus', nargs='+', type=int, metavar='N',
help='list of gpu ids to use (default: all)')
general.add_argument('-w', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
general.add_argument('-b', '--batch-size', default=(64,), type=int, nargs='+', metavar='N',
help='mini-batch size (default: 64)')
general.add_argument('--seed', type=int, metavar='N',
help='random seed')
general.add_argument('-lf', '--log_freq', default=20, type=int, metavar='N',
help='number of steps between each loss plot')
data = parser.add_argument_group('data')
data.add_argument('-td', '--train_dataset', default='fsgan.image_seg_dataset.ImageSegDataset',
help='train dataset object')
data.add_argument('-vd', '--val_dataset',
help='val dataset object')
data.add_argument('-nt', '--numpy_transforms', nargs='+',
help='Numpy transforms')
data.add_argument('-tt', '--tensor_transforms', nargs='+', help='tensor transforms',
default=('img_landmarks_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'))
training = parser.add_argument_group('training')
training.add_argument('-o', '--optimizer', default='optim.SGD(momentum=0.9,weight_decay=1e-4)',
help='network\'s optimizer object')
training.add_argument('-s', '--scheduler', default='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
help='scheduler object')
training.add_argument('-c', '--criterion', default='nn.CrossEntropyLoss',
help='criterion object')
training.add_argument('-m', '--model', default='fsgan.models.simple_unet.UNet(n_classes=3,feature_scale=1)',
help='model object')
training.add_argument('-p', '--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
training.add_argument('-be', '--benchmark', default='fsgan.train_segmentation.IOUBenchmark(3)',
help='benchmark object')
main(**vars(parser.parse_args()))
|
1705951
|
from uio.utils import fix_ctypes_struct
import ctypes
from ctypes import c_uint8 as ubyte, c_uint16 as ushort, c_uint32 as uint
from .eirq import EIrq
pos_t = uint # position counter
tim_t = uint # unit timer
wdt_t = ushort # watchdog timer
imt_t = ushort # interval measurement timer
@fix_ctypes_struct
class EQep( ctypes.Structure ):
_fields_ = [
#-------- position counter --------------------------------------------
#
("position", pos_t), #<w
("ld_position", pos_t), #rw loaded into position on strobe/index if enabled
("maximum", pos_t), #rw = period - 1
("compare", pos_t), #rw (may be ld_compare depending on config)
("index_latch", pos_t), #r-
("strobe_latch", pos_t), #r-
("timer_latch", pos_t), #r-
#-------- unit timer (frequency measurement base) ---------------------
#
("timer_counter", tim_t), #rw
("timer_maximum", tim_t), #rw = period - 1
#-------- watchdog timer (motor stall detection) ----------------------
#
# increments on module clock / 64.
# resets on every position counter change.
#
("wdog_counter", wdt_t), #rw
("wdog_compare", wdt_t), #rw
#-------- configuration -----------------------------------------------
#
("io_config", ushort),
# bits 0- 4 z-
# bit 5 rw invert strobe input
# bit 6 rw invert index input
#
# bit 7 rw invert input B
# bit 8 rw invert input A
#
# bit 9 rw gate index using strobe
#
# bit 10 rw (qd) swap A/B inputs (invert direction)
# bit 11 rw (non-qd) count on: 0 both edges 1 rising edge
#
# bit 12 rw compare-output to: 0 index pin 1 strobe pin
# bit 13 rw compare-output enabled
#
# bits 14-15 rw counter mode:
# 0 quadrature decoding of A/B
# 1 up(B=1)/down(B=0) counting on A
# 2 up counting on A
# 3 down counting on A
#
# In quadrature mode, the decoder issues up/down events based on
# transitions of inputs A/B. Bit 11 has no effect in this mode.
# Bits 7, 8, and 10 all have equivalent function of inverting dir.
#
# The non-quadrature modes combine an event source:
# 0 << 11 any edge of input A
# 1 << 11 | 0 << 8 rising edge of input A
# 1 << 11 | 1 << 8 falling edge of input A
# with a direction:
# 1 << 14 | 0 << 7 down if B=0, up if B=1
# 1 << 14 | 1 << 7 up if B=0, down if B=1
# 2 << 14 up regardless of B
# 3 << 14 down regardless of B
#
# Bit 10 has no effect in non-quadrature modes.
("ctr_config", ushort),
# bit 0 rw watchdog timer enabled
# bit 1 rw unit timer enabled
# bit 2 rw latch imt on: 0 position read 1 unit period
# bit 3 rw position counter enabled
# bits 4- 5 rw latch position on index mode
# 1 latch position on rising edge of index
# 2 latch position on falling edge of index
# 3 latch position on index marker
# bit 6 rw latch position on strobe edge
# 0 rising edge
# 1 direction-dependent
# bit 7 rw load position now (XXX does it auto-clear?)
# bit 8 rw load position on index edge: 0 rising 1 falling
# bit 9 rw load position on index enabled
# bit 10 rw load position on strobe edge: (see latch on strobe)
# bit 11 rw load position on strobe enabled
# bits 12-13 rw reset position on
# 0 index event
# 1 (disabled)
# 2 first index event
# 3 unit timer event
# bit 14 rw emu soft suspend (counters run till zero)
# bit 15 rw emu-free (bit 14 ignored)
("imt_config", ushort),
# bits 0- 3 rw imt position event prescaler, log2 (0..11)
# bits 4- 6 rw imt counter prescaler, log2
# bit 15 rw imt enabled
("cmp_config", ushort),
# bits 0-11 rw output pulse width (cycles / 4 - 1)
# bit 12 rw position-compare enabled
# bit 13 rw invert output
# bit 14 rw load compare on 0 position zero 1 compare register
# bit 15 rw load compare enabled
#-------- status / event reporting ------------------------------------
#
("irq", EIrq),
# bit 0 (pending) irq active / (clear) eoi
#
# bit 1 position counter error
# bit 2 quadrature decoder error (A and B toggled at same time)
# bit 3 direction change event
# bit 4 watchdog timeout
# bit 5 position counter underflow
# bit 6 position counter overflow
# bit 7 compare load event
# bit 8 compare event
# bit 9 strobe latch event
# bit 10 index latch event
# bit 11 timer event
("status", ushort),
# bit 0 r- position counter error (updated on index)
# bit 1 rc first index event
# bit 2 rc direction change event
# bit 3 rc imt overflow event
# bit 4 r- direction of last index
# bit 5 r- direction of last position change
# bit 6 r- direction of first index
# bit 7 rc imt capture event
#-------- interval measurement timer (aka "capture timer") ------------
#
# increments on prescaled module clock.
# counter is captured and reset on prescaled position event.
# counter and capture are latched on position read or unit timer event.
#
("imt_counter", imt_t), #rw
("imt_capture", imt_t), #r-
("imt_counter_latch", imt_t), #r-
("imt_capture_latch", imt_t), #r-
("", ubyte * (0x5c - 0x42)),
#-------- identification ----------------------------------------------
#
("ident", uint), #r-
#
# 0x4_4d3_11_03 (v1.3.2 on subarctic 2.1)
]
def reset( self ):
self.irq.enabled = 0
self.imt_config = 0
self.ctr_config = 0
self.io_config = 0
self.cmp_config = 0
self.status = 0xffff
self.irq.clear( 0xffff )
self.position = 0
self.ld_position = 0
self.maximum = 0xffffffff
self.compare = 0
self.timer_counter = 0
self.timer_maximum = 0xffffffff
self.wdog_counter = 0
self.wdog_compare = 0
self.imt_counter = 0
EQep.ld_compare = EQep.compare
assert ctypes.sizeof(EQep) == 0x60
|
1705970
|
from django.conf import settings
from django.views.generic import TemplateView
class HelpView(TemplateView):
template_name = 'devilry_help/help.django.html'
def get_context_data(self, **kwargs):
context = super(HelpView, self).get_context_data(**kwargs)
context['official_help_url'] = settings.DEVILRY_OFFICIAL_HELP_URL
context['organization_specific_documentation_url'] = getattr(
settings, 'DEVILRY_ORGANIZATION_SPECIFIC_DOCUMENTATION_URL', None)
context['organization_specific_documentation_text'] = getattr(
settings, 'DEVILRY_ORGANIZATION_SPECIFIC_DOCUMENTATION_TEXT', None)
return context
|
1705989
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import Point
from django.db import transaction
from geotrek.core.models import Topology
from geotrek.trekking.models import POI, POIType
class Command(BaseCommand):
help = 'Load a layer with point geometries in a model\n'
can_import_settings = True
counter = 0
def add_arguments(self, parser):
parser.add_argument('point_layer')
parser.add_argument('--encoding', '-e', action='store', dest='encoding', default='utf-8', help='File encoding, default utf-8')
parser.add_argument('--name-field', '-n', action='store', dest='name_field', help='Name of the field that contains the name attribute. Required or use --name-default instead.')
parser.add_argument('--type-field', '-t', action='store', dest='type_field', help='Name of the field that contains the POI Type attribute. Required or use --type-default instead.')
parser.add_argument('--description-field', '-d', action='store', dest='description_field', help='Name of the field that contains the description of the POI (optional)')
parser.add_argument('--name-default', action='store', dest='name_default', help='Default value for POI name. Use only if --name-field is not set')
parser.add_argument('--type-default', action='store', dest='type_default', help='Default value for POI Type. Use only if --type-field is not set')
def handle(self, *args, **options):
filename = options['point_layer']
if not os.path.exists(filename):
raise CommandError('File does not exists at: %s' % filename)
data_source = DataSource(filename, encoding=options.get('encoding'))
verbosity = options.get('verbosity')
field_name = options.get('name_field')
field_poitype = options.get('type_field')
field_description = options.get('description_field')
sid = transaction.savepoint()
try:
for layer in data_source:
if verbosity >= 1:
self.stdout.write("- Layer '{}' with {} objects found".format(layer.name, layer.num_feat))
available_fields = layer.fields
if (field_name and field_name not in available_fields)\
or (not field_name and not options.get('name_default')):
self.stdout.write(self.style.ERROR(
"Field '{}' not found in data source.".format(field_name)))
self.stdout.write(self.style.ERROR(
"Set it with --name-field, or set a default value with --name-default"))
break
if (field_poitype and field_poitype not in available_fields)\
or (not field_poitype and not options.get('type_default')):
self.stdout.write(self.style.ERROR(
"Field '{}' not found in data source.".format(field_poitype)))
self.stdout.write(self.style.ERROR(
"Set it with --type-field, or set a default value with --type-default"))
break
for feature in layer:
feature_geom = feature.geom
name = feature.get(field_name) if field_name in available_fields else options.get('name_default')
poitype = feature.get(field_poitype) if field_poitype in available_fields else options.get('type_default')
description = feature.get(field_description) if field_description in available_fields else ""
self.create_poi(feature_geom, name, poitype, description)
if verbosity >= 2:
self.stdout.write(self.style.NOTICE("{} POI created.".format(name)))
transaction.savepoint_commit(sid)
if verbosity >= 2:
self.stdout.write(self.style.NOTICE("{} objects created.".format(self.counter)))
except Exception:
self.stdout.write(self.style.ERROR("An error occured, rolling back operations."))
transaction.savepoint_rollback(sid)
raise
def create_poi(self, geometry, name, poitype, description):
poitype, created = POIType.objects.get_or_create(label=poitype)
poi = POI.objects.create(name=name, type=poitype, description=description)
if settings.TREKKING_TOPOLOGY_ENABLED:
# Use existing topology helpers to transform a Point(x, y)
# to a path aggregation (topology)
geometry = geometry.transform(settings.API_SRID, clone=True)
geometry.coord_dim = 2
serialized = '{"lng": %s, "lat": %s}' % (geometry.x, geometry.y)
topology = Topology.deserialize(serialized)
# Move deserialization aggregations to the POI
poi.mutate(topology)
else:
if geometry.geom_type != 'Point':
raise TypeError
poi.geom = Point(geometry.x, geometry.y, srid=settings.SRID)
poi.save()
self.counter += 1
return poi
|
1705992
|
import pytest
def test_ex8():
import pandas as pd
import numpy as np
from tcrdist.repertoire import TCRrep
df = pd.read_csv('dash.csv')
df = df[df.epitope.isin(['PA'])]
tr = TCRrep(cell_df=df, chains=['alpha','beta'], organism='mouse')
tr.tcrdist2(processes = 1,
metric = 'hamming',
reduce = True,
dump = False,
save = False)
import Levenshtein
for cdr,w in {'cdr3_b_aa':3,'pmhc_b_aa':1,'cdr2_b_aa':1,'cdr1_b_aa':1}.items():
tr.add_custom_dmat(cdr = cdr, metric =Levenshtein.distance, processes = 1)
tr.pw_levenshtein_tcrdist_beta = tr.custom_cdr3_b_aa_pw + \
tr.custom_pmhc_b_aa_pw + \
tr.custom_cdr2_b_aa_pw + \
tr.custom_cdr1_b_aa_pw
|
1705995
|
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('owner/<int:driver_id>', views.get_driver),
path('auto/', views.CarView.as_view()),
path('owners/', views.get_drivers),
path('owner/new/', views.get_driver_form),
path('auto/new/', views.GetAutoForm.as_view(success_url="/auto/new/")),
]
|
1706004
|
import doctest
import pytest
from insights.parsers import ParseException
from insights.tests import context_wrap
from insights.parsers import sendq_recvq_socket_buffer
from insights.parsers.sendq_recvq_socket_buffer import SendQSocketBuffer, RecvQSocketBuffer
SENDQ_SOCKET_BUFFER = """
4096 16384 4194304
""".strip()
EMPTY_SENDQ_SOCKET_BUFFER = """
""".strip()
RECVQ_SOCKET_BUFFER = """
4096 87380 6291456
""".strip()
EMPTY_RECVQ_SOCKET_BUFFER = """
""".strip()
def test_empty_sendq_socket_buffer():
with pytest.raises(ParseException) as exc:
SendQSocketBuffer(context_wrap(EMPTY_SENDQ_SOCKET_BUFFER))
assert str(exc.value) == "Empty content"
def test_sendq_socket_buffer():
sendq_buffer = SendQSocketBuffer(context_wrap(SENDQ_SOCKET_BUFFER))
assert sendq_buffer.minimum == 4096
assert sendq_buffer.default == 16384
assert sendq_buffer.maximum == 4194304
assert sendq_buffer.raw == '4096 16384 4194304'
def test_empty_recvq_socket_buffer():
with pytest.raises(ParseException) as exc:
RecvQSocketBuffer(context_wrap(EMPTY_RECVQ_SOCKET_BUFFER))
assert str(exc.value) == "Empty content"
def test_recvq_socket_buffer():
recvq_buffer = RecvQSocketBuffer(context_wrap(RECVQ_SOCKET_BUFFER))
assert recvq_buffer.minimum == 4096
assert recvq_buffer.default == 87380
assert recvq_buffer.maximum == 6291456
assert recvq_buffer.raw == '4096 87380 6291456'
def test_doc():
env = {
'sendq_buffer_values': SendQSocketBuffer(context_wrap(SENDQ_SOCKET_BUFFER)),
'recvq_buffer_values': RecvQSocketBuffer(context_wrap(RECVQ_SOCKET_BUFFER)),
}
failures, tests = doctest.testmod(sendq_recvq_socket_buffer, globs=env)
assert failures == 0
|
1706013
|
import pytest
from freezegun import freeze_time
import datetime
from io import BytesIO
from aerofiles.igc import Writer
@pytest.fixture()
def output():
return BytesIO()
@pytest.fixture()
def writer(output):
return Writer(output)
def test_write_line(writer):
writer.write_line('line')
assert writer.fp.getvalue() == b'line\r\n'
@pytest.fixture(params=['XXX', 'GCS', 'FIL', 'FLA'])
def manufacturer_code(request):
return request.param
@pytest.fixture(params=['ABC', 'NG6', 'ART'])
def logger_id(request):
return request.param
def test_logger_id(writer, manufacturer_code, logger_id):
writer.write_logger_id(manufacturer_code, logger_id)
assert writer.fp.getvalue() == \
('A%s%s\r\n' % (manufacturer_code, logger_id)).encode('utf-8')
def test_logger_id_with_extension(writer, manufacturer_code, logger_id):
writer.write_logger_id(manufacturer_code, logger_id, 'FLIGHT:1')
assert writer.fp.getvalue() == \
('A%s%sFLIGHT:1\r\n' % (manufacturer_code, logger_id)).encode('utf-8')
def test_logger_id_with_invalid_manufacturer_code(writer):
with pytest.raises(ValueError):
writer.write_logger_id('x_1', 'ABC')
def test_logger_id_with_invalid_logger_id(writer):
with pytest.raises(ValueError):
writer.write_logger_id('XXX', '12345')
def test_logger_id_without_validation(writer):
writer.write_logger_id('a4%', '12345', validate=False)
assert writer.fp.getvalue() == b'Aa4%12345\r\n'
def test_invalid_header_source(writer):
with pytest.raises(ValueError) as ex:
writer.write_header('X', 'XXX', 'ABC')
assert 'Invalid source: X' in str(ex)
@pytest.fixture(params=[(1996, 12, 24), (2014, 1, 31), (2032, 8, 5)])
def date(request):
return datetime.date(*request.param)
def test_date(writer, date):
writer.write_date(date)
assert writer.fp.getvalue() == \
date.strftime('HFDTE%d%m%y\r\n').encode('utf-8')
def test_invalid_date(writer):
with pytest.raises(ValueError) as ex:
writer.write_date('0222')
assert 'Invalid date: 0222' in str(ex)
@pytest.fixture(params=[20, 500, 999])
def fix_accuracy(request):
return request.param
def test_fix_accuracy(writer, fix_accuracy):
writer.write_fix_accuracy(fix_accuracy)
assert writer.fp.getvalue() == \
('HFFXA%03d\r\n' % fix_accuracy).encode('utf-8')
def test_default_fix_accuracy(writer):
writer.write_fix_accuracy()
assert writer.fp.getvalue() == b'HFFXA500\r\n'
def test_invalid_fix_accuracy(writer):
with pytest.raises(ValueError):
writer.write_fix_accuracy(0)
with pytest.raises(ValueError):
writer.write_fix_accuracy(1000)
@pytest.fixture(params=[
'<NAME>',
'Some guy named FOO',
'Deep Thought',
])
def pilot(request):
return request.param
def test_pilot(writer, pilot):
writer.write_pilot(pilot)
assert writer.fp.getvalue() == \
('HFPLTPILOTINCHARGE:%s\r\n' % pilot).encode('utf-8')
def test_copilot(writer, pilot):
writer.write_copilot(pilot)
assert writer.fp.getvalue() == \
('HFCM2CREW2:%s\r\n' % pilot).encode('utf-8')
@pytest.fixture(params=['Hornet', 'JS1', 'ASW-22 BLE'])
def glider_type(request):
return request.param
def test_glider_type(writer, glider_type):
writer.write_glider_type(glider_type)
assert writer.fp.getvalue() == \
('HFGTYGLIDERTYPE:%s\r\n' % glider_type).encode('utf-8')
@pytest.fixture(params=['D-4449', 'N116EL', '2648'])
def glider_id(request):
return request.param
def test_glider_id(writer, glider_id):
writer.write_glider_id(glider_id)
assert writer.fp.getvalue() == \
('HFGIDGLIDERID:%s\r\n' % glider_id).encode('utf-8')
def test_gps_datum(writer):
writer.write_gps_datum(33, 'Guam-1963')
assert writer.fp.getvalue() == b'HFDTM033GPSDATUM:Guam-1963\r\n'
def test_default_gps_datum(writer):
writer.write_gps_datum()
assert writer.fp.getvalue() == b'HFDTM100GPSDATUM:WGS-1984\r\n'
@pytest.fixture(params=['6.4', 'Flarm-IGC05.09'])
def firmware_version(request):
return request.param
def test_firmware_version(writer, firmware_version):
writer.write_firmware_version(firmware_version)
assert writer.fp.getvalue() == \
('HFRFWFIRMWAREVERSION:%s\r\n' % firmware_version).encode('utf-8')
@pytest.fixture(params=['1.2', 'Flarm-IGC06'])
def hardware_version(request):
return request.param
def test_hardware_version(writer, hardware_version):
writer.write_hardware_version(hardware_version)
assert writer.fp.getvalue() == \
('HFRHWHARDWAREVERSION:%s\r\n' % hardware_version).encode('utf-8')
@pytest.fixture(params=[
'Flarm-IGC',
'FILSER,LX5000IGC-2',
'LXNAVIGATION,LX8000F',
'XCSOAR XCSOAR Android 6.4.3 Nov 1 2012',
])
def logger_type(request):
return request.param
def test_logger_type(writer, logger_type):
writer.write_logger_type(logger_type)
assert writer.fp.getvalue() == \
('HFFTYFRTYPE:%s\r\n' % logger_type).encode('utf-8')
@pytest.fixture(params=[
'uBLOX LEA-4S-2,16,max9000m',
'JRC/CCA-450',
'u-blox:LEA-4P,16,8191',
'Internal GPS (Android)',
])
def gps_receiver(request):
return request.param
def test_gps_receiver(writer, gps_receiver):
writer.write_gps_receiver(gps_receiver)
assert writer.fp.getvalue() == \
('HFGPS%s\r\n' % gps_receiver).encode('utf-8')
@pytest.fixture(params=[
'INTERSEMA,MS5534A,max10000m',
'Intersema MS5534B,8191',
])
def pressure_sensor(request):
return request.param
def test_pressure_sensor(writer, pressure_sensor):
writer.write_pressure_sensor(pressure_sensor)
assert writer.fp.getvalue() == \
('HFPRSPRESSALTSENSOR:%s\r\n' % pressure_sensor).encode('utf-8')
@pytest.fixture(params=['TH', '6H', '37', 'B', 'FUN'])
def competition_id(request):
return request.param
def test_competition_id(writer, competition_id):
writer.write_competition_id(competition_id)
assert writer.fp.getvalue() == \
('HFCIDCOMPETITIONID:%s\r\n' % competition_id).encode('utf-8')
@pytest.fixture(params=['Std', 'CLUB', '15M', 'Open'])
def competition_class(request):
return request.param
def test_competition_class(writer, competition_class):
writer.write_competition_class(competition_class)
assert writer.fp.getvalue() == \
('HFCCLCOMPETITIONCLASS:%s\r\n' % competition_class).encode('utf-8')
@pytest.fixture(params=['SFN', 'LV Aachen'])
def club(request):
return request.param
def test_club(writer, club):
writer.write_club(club)
assert writer.fp.getvalue() == \
('HFCLBCLUB:%s\r\n' % club).encode('utf-8')
def test_headers(writer):
writer.write_headers({
'manufacturer_code': 'XCS',
'logger_id': 'TBX',
'date': datetime.date(1987, 2, 24),
'fix_accuracy': 50,
'pilot': '<NAME>',
'copilot': '<NAME>',
'glider_type': 'Duo Discus',
'glider_id': 'D-KKHH',
'firmware_version': '2.2',
'hardware_version': '2',
'logger_type': 'LXNAVIGATION,LX8000F',
'gps_receiver': 'uBLOX LEA-4S-2,16,max9000m',
'pressure_sensor': 'INTERSEMA,MS5534A,max10000m',
'competition_id': '2H',
'competition_class': 'Doubleseater',
'club': 'LV Aachen',
})
assert writer.fp.getvalue() == b'\r\n'.join([
b'AXCSTBX',
b'HFDTE240287',
b'HFFXA050',
b'HFPLTPILOTINCHARGE:<NAME>',
b'HFCM2CREW2:<NAME>',
b'HFGTYGLIDERTYPE:<NAME>',
b'HFGIDGLIDERID:D-KKHH',
b'HFDTM100GPSDATUM:WGS-1984',
b'HFRFWFIRMWAREVERSION:2.2',
b'HFRHWHARDWAREVERSION:2',
b'HFFTYFRTYPE:LXNAVIGATION,LX8000F',
b'HFGPSuBLOX LEA-4S-2,16,max9000m',
b'HFPRSPRESSALTSENSOR:INTERSEMA,MS5534A,max10000m',
b'HFCIDCOMPETITIONID:2H',
b'HFCCLCOMPETITIONCLASS:Doubleseater',
b'HFCLBCLUB:LV Aachen',
]) + b'\r\n'
def test_default_headers(writer):
writer.write_headers({
'manufacturer_code': 'FLA',
'logger_id': '6NG',
'date': datetime.date(2013, 4, 1),
'logger_type': 'Flarm-IGC',
'gps_receiver': 'u-blox:LEA-4P,16,8191',
})
assert writer.fp.getvalue() == b'\r\n'.join([
b'AFLA6NG',
b'HFDTE010413',
b'HFFXA500',
b'HFPLTPILOTINCHARGE:',
b'HFGTYGLIDERTYPE:',
b'HFGIDGLIDERID:',
b'HFDTM100GPSDATUM:WGS-1984',
b'HFRFWFIRMWAREVERSION:',
b'HFRHWHARDWAREVERSION:',
b'HFFTYFRTYPE:Flarm-IGC',
b'HFGPSu-blox:LEA-4P,16,8191',
b'HFPRSPRESSALTSENSOR:',
]) + b'\r\n'
def test_missing_headers(writer):
with pytest.raises(ValueError):
writer.write_headers({})
def test_fix_extensions(writer):
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
assert writer.fp.getvalue() == b'I033638FXA3940SIU4143ENL\r\n'
def test_empty_fix_extensions(writer):
writer.write_fix_extensions([])
assert writer.fp.getvalue() == b'I00\r\n'
def test_invalid_fix_extensions(writer):
with pytest.raises(ValueError) as ex:
writer.write_fix_extensions(('42', 42) * 100)
assert 'Too many extensions' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_fix_extensions([('42', 42)])
assert 'Invalid extension: 42' in str(ex)
def test_k_record_extensions(writer):
writer.write_k_record_extensions([('HDT', 5)])
assert writer.fp.getvalue() == b'J010812HDT\r\n'
def test_empty_k_record_extensions(writer):
writer.write_k_record_extensions([])
assert writer.fp.getvalue() == b'J00\r\n'
def test_invalid_k_record_extensions(writer):
with pytest.raises(ValueError):
writer.write_k_record_extensions([('42', 42)])
def test_task_metadata(writer):
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 2),
flight_date=datetime.date(2014, 4, 14),
task_number=42,
turnpoints=3,
text='Some more metadata',
)
assert writer.fp.getvalue() == \
b'C130414125302140414004203Some more metadata\r\n'
def test_default_task_metadata(writer):
with freeze_time("2012-01-14 03:21:34"):
writer.write_task_metadata(turnpoints=1)
assert writer.fp.getvalue() == b'C140112032134000000000101\r\n'
def test_task_metadata_with_invalid_datetime(writer):
with pytest.raises(ValueError) as ex:
writer.write_task_metadata('xxx', turnpoints=2)
assert 'Invalid declaration datetime: xxx' in str(ex)
def test_task_metadata_with_invalid_tasknumber(writer):
with pytest.raises(ValueError) as ex:
writer.write_task_metadata(task_number='xxx', turnpoints=2)
assert 'Invalid task number: xxx' in str(ex)
def test_task_metadata_with_invalid_turnpoints(writer):
with pytest.raises(ValueError) as ex:
writer.write_task_metadata()
assert 'Invalid turnpoints: None' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_task_metadata(turnpoints='xxx')
assert 'Invalid turnpoints: xxx' in str(ex)
def test_task_point(writer):
writer.write_task_point(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
assert writer.fp.getvalue() == b'C5107345N00624765EMeiersberg\r\n'
def test_task_point_with_negative_coordinates(writer):
writer.write_task_point(
latitude=-(12 + 32.112 / 60.),
longitude=-(178 + .001 / 60.),
text='TAKEOFF',
)
assert writer.fp.getvalue() == b'C1232112S17800001WTAKEOFF\r\n'
def test_task_point_with_area(writer):
writer.write_task_point(
-(12 + 32.112 / 60.),
-(178 + .001 / 60.),
'TURN AREA',
distance_min=12.0,
distance_max=32.0,
bearing1=122.0,
bearing2=182.0,
)
assert writer.fp.getvalue() == \
b'C1232112S17800001W00120000032000122000182000TURN AREA\r\n'
def test_default_task_point(writer):
writer.write_task_point()
assert writer.fp.getvalue() == b'C0000000N00000000E\r\n'
def test_task_points(writer):
writer.write_task_points([
(None, None, 'TAKEOFF'),
(51.40375, 6.41275, 'START'),
(50.38210, 8.82105, 'TURN 1'),
(50.59045, 7.03555, 'TURN 2', 0, 32.5, 0, 180),
(51.40375, 6.41275, 'FINISH'),
(None, None, 'LANDING'),
])
assert writer.fp.getvalue() == b'\r\n'.join([
b'C0000000N00000000ETAKEOFF',
b'C5124225N00624765ESTART',
b'C5022926N00849263ETURN 1',
b'C5035427N00702133E00000000032500000000180000TURN 2',
b'C5124225N00624765EFINISH',
b'C0000000N00000000ELANDING',
]) + b'\r\n'
def test_invalid_task_points(writer):
with pytest.raises(ValueError) as ex:
writer.write_task_points([
(None, None, None, None),
])
assert 'Invalid number of task point tuple items' in str(ex)
def test_security(writer):
writer.write_security('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
assert writer.fp.getvalue() == b'GABCDEFGHIJKLMNOPQRSTUVWXYZ\r\n'
def test_long_security(writer):
writer.write_security('A' * 100)
assert writer.fp.getvalue() == b'\r\n'.join([
b'G' + b'A' * 75,
b'G' + b'A' * 25,
]) + b'\r\n'
def test_custom_long_security(writer):
writer.write_security('A' * 110, bytes_per_line=25)
assert writer.fp.getvalue() == b'\r\n'.join([
b'G' + b'A' * 25,
b'G' + b'A' * 25,
b'G' + b'A' * 25,
b'G' + b'A' * 25,
b'G' + b'A' * 10,
]) + b'\r\n'
def test_fix(writer):
writer.write_fix(
datetime.time(12, 34, 56),
latitude=51.40375,
longitude=6.41275,
valid=True,
pressure_alt=1234,
gps_alt=1432,
)
assert writer.fp.getvalue() == b'B1234565124225N00624765EA0123401432\r\n'
def test_default_fix(writer):
with freeze_time("2012-01-14 03:21:34"):
writer.write_fix()
assert writer.fp.getvalue() == b'B0321340000000N00000000EV0000000000\r\n'
def test_fix_with_extensions(writer):
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
writer.write_fix(datetime.time(2, 3, 4), extensions=['023', 13, 2])
assert writer.fp.getvalue() == b'\r\n'.join([
b'I033638FXA3940SIU4143ENL',
b'B0203040000000N00000000EV000000000002313002',
]) + b'\r\n'
def test_fix_with_missing_extensions(writer):
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
with pytest.raises(ValueError) as ex:
writer.write_fix(datetime.time(2, 3, 4))
assert 'Invalid extensions list' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_fix(datetime.time(2, 3, 4), extensions=['023'])
assert 'Number of extensions does not match declaration' in str(ex)
def test_fix_with_missing_extensions_declaration(writer):
with pytest.raises(ValueError) as ex:
writer.write_fix(datetime.time(2, 3, 4), extensions=['023', 13, 2])
assert 'Invalid extensions list' in str(ex)
def test_fix_with_invalid_extension(writer):
writer.write_fix_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
with pytest.raises(ValueError) as ex:
writer.write_fix(datetime.time(2, 3, 4), extensions=['x', 13, 2])
assert 'Extension value has wrong length' in str(ex)
def test_fix_with_invalid_time(writer):
with pytest.raises(ValueError) as ex:
writer.write_fix('abcdef')
assert 'Invalid time: abcdef' in str(ex)
def test_fix_with_invalid_latitude(writer):
with pytest.raises(ValueError) as ex:
writer.write_fix(latitude=-112.34)
assert 'Invalid latitude:' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_fix(latitude=91.2)
assert 'Invalid latitude:' in str(ex)
def test_fix_with_invalid_longitude(writer):
with pytest.raises(ValueError) as ex:
writer.write_fix(longitude=-181)
assert 'Invalid longitude:' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_fix(longitude=215)
assert 'Invalid longitude:' in str(ex)
def test_event(writer):
writer.write_event(datetime.time(12, 34, 56), 'PEV')
assert writer.fp.getvalue() == b'E123456PEV\r\n'
def test_event_with_text(writer):
writer.write_event(datetime.time(1, 2, 3), 'PEV', 'This is a test')
assert writer.fp.getvalue() == b'E010203PEVThis is a test\r\n'
def test_event_with_default_time(writer):
with freeze_time("2012-01-14 03:21:34"):
writer.write_event('PEV')
assert writer.fp.getvalue() == b'E032134PEV\r\n'
def test_event_with_default_time_and_text(writer):
with freeze_time("2012-01-14 03:21:34"):
writer.write_event('PEV', 'Test')
assert writer.fp.getvalue() == b'E032134PEVTest\r\n'
def test_event_with_invalid_arguments(writer):
with pytest.raises(ValueError) as ex:
writer.write_event()
assert 'Invalid number of parameters received' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_event(1, 2, 3, 4)
assert 'Invalid number of parameters received' in str(ex)
def test_event_with_invalid_code(writer):
with pytest.raises(ValueError) as ex:
writer.write_event('X')
assert 'Invalid event code' in str(ex)
def test_satellites(writer):
writer.write_satellites(datetime.time(12, 34, 56), [2, 52, 33, '03'])
assert writer.fp.getvalue() == b'F12345602523303\r\n'
def test_satellites_with_default_time(writer):
with freeze_time("2012-01-14 03:21:34"):
writer.write_satellites([2, 4, 99])
assert writer.fp.getvalue() == b'F032134020499\r\n'
def test_satellites_with_invalid_id(writer):
with pytest.raises(ValueError) as ex:
writer.write_satellites(['ABCDE'])
assert 'Invalid satellite ID' in str(ex)
def test_satellites_with_invalid_arguments(writer):
with pytest.raises(ValueError) as ex:
writer.write_satellites()
assert 'Invalid number of parameters received' in str(ex)
with pytest.raises(ValueError) as ex:
writer.write_satellites(1, 2, 3)
assert 'Invalid number of parameters received' in str(ex)
def test_k_record(writer):
writer.write_k_record_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
assert writer.fp.getvalue() == b'\r\n'.join([
b'J030810FXA1112SIU1315ENL',
b'K02030402313002',
]) + b'\r\n'
def test_k_record_with_default_time(writer):
writer.write_k_record_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
with freeze_time("2012-01-14 03:21:34"):
writer.write_k_record(['023', 13, 2])
assert writer.fp.getvalue() == b'\r\n'.join([
b'J030810FXA1112SIU1315ENL',
b'K03213402313002',
]) + b'\r\n'
def test_k_record_with_missing_arguments(writer):
with pytest.raises(ValueError) as ex:
writer.write_k_record()
assert 'Invalid number of parameters received' in str(ex)
def test_k_record_with_missing_extensions(writer):
writer.write_k_record_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
with pytest.raises(ValueError) as ex:
writer.write_k_record(datetime.time(2, 3, 4), ['023'])
assert 'Number of extensions does not match declaration' in str(ex)
def test_k_record_with_missing_extensions_declaration(writer):
with pytest.raises(ValueError) as ex:
writer.write_k_record(datetime.time(2, 3, 4), ['023', 13, 2])
assert 'Invalid extensions list' in str(ex)
def test_k_record_with_invalid_extension(writer):
writer.write_k_record_extensions([('FXA', 3), ('SIU', 2), ('ENL', 3)])
with pytest.raises(ValueError) as ex:
writer.write_k_record(datetime.time(2, 3, 4), ['x', 13, 2])
assert 'Extension value has wrong length' in str(ex)
def test_comment(writer):
writer.write_comment('PLT', 'This flight was my second 1000km attempt')
assert writer.fp.getvalue() == \
b'LPLTThis flight was my second 1000km attempt\r\n'
def test_comment_with_invalid_source(writer):
with pytest.raises(ValueError) as ex:
writer.write_comment('X', 'bla')
assert 'Invalid source' in str(ex)
def test_igc_example(writer):
writer.write_headers({
'manufacturer_code': 'XXX',
'logger_id': 'ABC',
'logger_id_extension': 'FLIGHT:1',
'date': datetime.date(2009, 7, 16),
'fix_accuracy': 35,
'pilot': 'Bloggs Bill D',
'glider_type': 'Schempp Ventus2cxa',
'glider_id': 'ABCD-1234',
'firmware_version': '6.4',
'hardware_version': '3.0',
'logger_type': 'Manufacturer, Model',
'gps_receiver': 'MarconiCanada:Superstar,12ch, max10000m',
'pressure_sensor': 'Sensyn, XYZ1111, max11000m',
'competition_id': 'XYZ-78910',
'competition_class': '15m Motor Glider',
})
writer.write_fix_extensions([
('FXA', 3),
('SIU', 2),
('ENL', 3),
])
writer.write_k_record_extensions([
('HDT', 5),
])
writer.write_task_metadata(
datetime.datetime(2001, 7, 15, 21, 38, 41),
datetime.datetime(2001, 7, 16),
turnpoints=2,
text='500K Tri',
)
writer.write_task_points([
(51.18932, -1.03165, 'Lasham Clubhouse'),
(51.16965, -1.04407, 'Lasham Start S, Start'),
(52.15153, -2.92045, 'Sarnesfield, TP1'),
(52.50245, -0.29353, '<NAME>, TP2'),
(51.16965, -1.04407, 'Lasham Start S, Finish'),
(51.18932, -1.03165, 'Lasham Clubhouse'),
])
writer.write_satellites(datetime.time(16, 2, 40), [
4, 6, 9, 12, 36, 24, 22, 18, 21
])
writer.write_fix(
datetime.time(16, 2, 40),
54 + 7.121 / 60,
-2 - 49.342 / 60,
valid=True,
pressure_alt=280,
gps_alt=421,
extensions=[205, 9, 950],
)
writer.write_event(datetime.time(16, 2, 45), 'PEV')
writer.write_fix(
datetime.time(16, 2, 45),
51 + 7.126 / 60,
-1 - 49.300 / 60,
valid=True,
pressure_alt=288,
gps_alt=429,
extensions=[195, 9, 20],
)
writer.write_fix(
datetime.time(16, 2, 50),
51 + 7.134 / 60,
-1 - 49.283 / 60,
valid=True,
pressure_alt=290,
gps_alt=432,
extensions=[210, 9, 15],
)
writer.write_fix(
datetime.time(16, 2, 55),
51 + 7.140 / 60,
-1 - 49.221 / 60,
valid=True,
pressure_alt=290,
gps_alt=430,
extensions=[200, 9, 12],
)
writer.write_satellites(datetime.time(16, 3, 0), [
6, 9, 12, 36, 24, 22, 18, 21
])
writer.write_fix(
datetime.time(16, 3, 00),
51 + 7.150 / 60,
-1 - 49.202 / 60,
valid=True,
pressure_alt=291,
gps_alt=432,
extensions=[256, 8, 9],
)
writer.write_event(datetime.time(16, 3, 5), 'PEV')
writer.write_fix(
datetime.time(16, 3, 5),
51 + 7.180 / 60,
-1 - 49.185 / 60,
valid=True,
pressure_alt=291,
gps_alt=435,
extensions=[210, 8, 15],
)
writer.write_fix(
datetime.time(16, 3, 10),
51 + 7.212 / 60,
-1 - 49.174 / 60,
valid=True,
pressure_alt=293,
gps_alt=435,
extensions=[196, 8, 24],
)
writer.write_k_record(datetime.time(16, 2, 48), [90])
writer.write_fix(
datetime.time(16, 2, 48),
51 + 7.220 / 60,
-1 - 49.150 / 60,
valid=True,
pressure_alt=494,
gps_alt=436,
extensions=[190, 8, 18],
)
writer.write_fix(
datetime.time(16, 2, 52),
51 + 7.330 / 60,
-1 - 49.127 / 60,
valid=True,
pressure_alt=496,
gps_alt=439,
extensions=[195, 8, 15],
)
writer.write_comment('XXX', 'RURITANIAN STANDARD NATIONALS DAY 1')
writer.write_comment('XXX', 'FLIGHT TIME: 4:14:25, TASK SPEED:58.48KTS')
writer.write_security(
'REJNGJERJKNJKRE31895478537H43982FJN9248F942389T433T'
'JNJK2489IERGNV3089IVJE9GO398535J3894N358954983O0934'
'SKTO5427FGTNUT5621WKTC6714FT8957FGMKJ134527FGTR6751'
'K2489IERGNV3089IVJE39GO398535J3894N358954983FTGY546'
'12560DJUWT28719GTAOL5628FGWNIST78154INWTOLP7815FITN',
bytes_per_line=51
)
assert writer.fp.getvalue() == b'\r\n'.join([
b'AXXXABCFLIGHT:1',
b'HFDTE160709',
b'HFFXA035',
b'HFPLTPILOTINCHARGE:Bloggs Bill D',
b'HFGTYGLIDERTYPE:Schempp Ventus2cxa',
b'HFGIDGLIDERID:ABCD-1234',
b'HFDTM100GPSDATUM:WGS-1984',
b'HFRFWFIRMWAREVERSION:6.4',
b'HFRHWHARDWAREVERSION:3.0',
b'HFFTYFRTYPE:Manufacturer, Model',
b'HFGPSMarconiCanada:Superstar,12ch, max10000m',
b'HFPRSPRESSALTSENSOR:Sensyn, XYZ1111, max11000m',
b'HFCIDCOMPETITIONID:XYZ-78910',
b'HFCCLCOMPETITIONCLASS:15m Motor Glider',
b'I033638FXA3940SIU4143ENL',
b'J010812HDT',
b'C150701213841160701000102500K Tri',
b'C5111359N00101899WLasham Clubhouse',
b'C5110179N00102644WLasham Start S, Start',
b'C5209092N00255227WSarnesfield, TP1',
b'C5230147N00017612WNorman Cross, TP2',
b'C5110179N00102644WLasham Start S, Finish',
b'C5111359N00101899WLasham Clubhouse',
b'F160240040609123624221821',
b'B1602405407121N00249342WA002800042120509950',
b'E160245PEV',
b'B1602455107126N00149300WA002880042919509020',
b'B1602505107134N00149283WA002900043221009015',
b'B1602555107140N00149221WA002900043020009012',
b'F1603000609123624221821',
b'B1603005107150N00149202WA002910043225608009',
b'E160305PEV',
b'B1603055107180N00149185WA002910043521008015',
b'B1603105107212N00149174WA002930043519608024',
b'K16024800090',
b'B1602485107220N00149150WA004940043619008018',
b'B1602525107330N00149127WA004960043919508015',
b'LXXXRURITANIAN STANDARD NATIONALS DAY 1',
b'LXXXFLIGHT TIME: 4:14:25, TASK SPEED:58.48KTS',
b'GREJNGJERJKNJKRE31895478537H43982FJN9248F942389T433T',
b'GJNJK2489IERGNV3089IVJE9GO398535J3894N358954983O0934',
b'GSKTO5427FGTNUT5621WKTC6714FT8957FGMKJ134527FGTR6751',
b'<KEY>',
b'G12560DJUWT28719GTAOL5628FGWNIST78154INWTOLP7815FITN',
]) + b'\r\n'
|
1706015
|
import six
from exporters.transform.base_transform import BaseTransform
from exporters.python_interpreter import Interpreter, create_context
class PythonMapTransform(BaseTransform):
"""Transform implementation that maps items using Python expressions
"""
supported_options = {
"map": {'type': six.string_types},
}
def __init__(self, *args, **kwargs):
super(PythonMapTransform, self).__init__(*args, **kwargs)
self.map_expression = self.read_option('map')
self.interpreter = Interpreter()
self.interpreter.check(self.map_expression)
def _map_item(self, it):
context = create_context(item=it)
return self.interpreter.eval(expression=self.map_expression, context=context)
def transform_batch(self, batch):
return (self._map_item(it) for it in batch)
|
1706043
|
class Solution:
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])
dp = [[0, 0]]
for s, e, p in jobs:
i = bisect.bisect(dp, [s + 1]) - 1
if dp[i][1] + p > dp[-1][1]:
dp.append([e, dp[i][1] + p])
return dp[-1][1]
class Solution:
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
H = sorted(zip(startTime, itertools.repeat(1), endTime, profit))
res = 0
while H:
t = heapq.heappop(H)
if t[1]:
heapq.heappush(H, (t[2], 0, res + t[3]))
else:
res = max(res, t[2])
return res
class Solution:
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[0])
hp = []
total = 0
for s,e,p in jobs:
while hp and hp[0][0] <= s:
popd = heappop(hp)
total = max(total, popd[1])
heappush(hp, (e, p + total))
while hp:
popd = heappop(hp)
total = max(total, popd[1])
return total
|
1706070
|
import copy
from typing import List, Union, Tuple, Dict
import statistics
from itertools import chain, repeat, islice
from .node import Node, NodeType
def pad_list(iterable, size, padding=None):
return list(islice(chain(iterable, repeat(padding)), size))
class Connection(object):
connections: Dict['Connection', int] = {}
global_innovation: int = 0
def __init__(self, in_node: Node, out_node: Node, weight: float = 1.0,
dummy: bool = False):
self.in_node = in_node
self.out_node = out_node
self.enabled = True
self.dummy = dummy
if dummy: return
self.weight = weight
self.innovation = Connection.register_connection(self)
self.out_node.inputs.append(self)
def __gt__(self, other):
return self.innovation > other.innovation
@classmethod
def register_connection(cls, new_connection:'Connection') -> int:
if new_connection in cls.connections:
return cls.connections[new_connection]
else:
cls.global_innovation += 1
cls.connections[new_connection] = cls.global_innovation
return cls.global_innovation
def __hash__(self):
return hash(str(self.in_node.id)+str(self.out_node.id))
def __eq__(self, other):
if isinstance(other, Connection):
return ((self.in_node == other.in_node) and
(self.out_node == other.out_node))
else:
raise ValueError(f'Value type should be Connection, got {type(other)}')
def copy(self):
return copy.deepcopy(self)
def __str__(self):
string = f'{self.in_node.id} -> {self.out_node.id} '
string = f'{string}Weight: {self.weight:.3f} '
if self.dummy:
string = f'{string}Innovation No: Dummy '
else:
string = f'{string}Innovation No: {self.innovation} '
string = f'{string}Disabled: {not self.enabled} '
return string
def __repr__(self):
if self.dummy:
return 'Dummy'
else:
return str(self.innovation)
def align_connections(
connections_1: List[Connection],
connections_2: List[Connection]) -> Tuple[
List[Connection],
List[Connection],
int, int, float]:
dummy_node = Node(0, NodeType.HIDDEN)
dummy_connection = Connection(dummy_node, dummy_node, dummy=True)
end = dummy_connection
iterators = [chain(i, [end]) for i in [sorted(connections_1),
sorted(connections_2)]]
values = [next(i) for i in iterators]
connections_1 = []
connections_2 = []
weights = []
excess = 0
disjoint = 0
while not all(v is end for v in values):
smallest = min(v for v in values if v is not end)
alignment = []
match = True
for v in values:
if v == smallest:
alignment.append(v)
else:
match = False
alignment.append(dummy_connection)
if values[0] is end or values[1] is end:
excess += 1
else:
disjoint += 1
connection_1, connection_2 = alignment
if match:
weights.append(abs(connection_1.weight - connection_2.weight))
connections_1.append(connection_1)
connections_2.append(connection_2)
values = [next(i) if v == smallest else v
for i, v in zip(iterators, values)]
avarage_weight_difference = statistics.mean(weights)
return connections_1, connections_2, disjoint, excess, avarage_weight_difference
|
1706093
|
import logging
logging.basicConfig(level=logging.DEBUG)
# export SLACK_API_TOKEN=<PASSWORD>-***
# python3 integration_tests/samples/readme/proxy.py
import os
from slack_sdk.web import WebClient
from ssl import SSLContext
sslcert = SSLContext()
# pip3 install proxy.py
# proxy --port 9000 --log-level d
proxyinfo = "http://localhost:9000"
client = WebClient(token=os.environ["SLACK_API_TOKEN"], ssl=sslcert, proxy=proxyinfo)
response = client.chat_postMessage(channel="#random", text="Hello World!")
print(response)
|
1706095
|
from anchore_engine.db import GrypeDBFeedMetadata
from anchore_engine.subsys import logger
class NoActiveGrypeDB(Exception):
def __init__(self):
super().__init__("No active grypedb available")
def get_most_recent_active_grypedb(session) -> GrypeDBFeedMetadata:
"""
Queries active GrypeDBFeedMetadata order by created at desc
If no active grypedb, raises NoActiveGrypeDB
"""
active_db = (
session.query(GrypeDBFeedMetadata)
.filter(GrypeDBFeedMetadata.active.is_(True))
.order_by(GrypeDBFeedMetadata.created_at.desc())
.limit(1)
.all()
)
if not active_db:
logger.error("No active grypedb found")
raise NoActiveGrypeDB
else:
return active_db[0]
|
1706167
|
N, G = map(int, input().split())
i = 1
runa_valor_amizade = []
while i <= N:
ri, vi = input().split()
vi = int(vi)
runa_valor_amizade.append(ri)
runa_valor_amizade.append(vi)
i+= 1
X = int(input())
recitadas = input().split()
recitadas = recitadas[:X]
soma = 0
for i in range(X):
valor_a_ser_procurado = recitadas[i]
encontrado = runa_valor_amizade.index(valor_a_ser_procurado)
soma += runa_valor_amizade[encontrado + 1]
print(soma)
if soma >= G:
print("You shall pass!")
else:
print("My precioooous")
|
1706186
|
from django.test import TestCase
from backend.models import EventModel, ScheduleModel, TenantModel, AwsEnvironmentModel
from datetime import datetime
class EventModelTestCase(TestCase):
# イベントが登録されていないことを確認する
def test_is_empty(self):
objects_all = EventModel.objects.all()
self.assertEqual(objects_all.count(), 0)
# イベントが登録できることを確認する
def test_create(self):
now = datetime.now()
objects_create = EventModel.objects.create(created_at=now, updated_at=now)
objects_create.save()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 1)
# 登録したイベントが削除できることを確認する
def test_delete(self):
now = datetime.now()
objects_create = EventModel.objects.create(created_at=now, updated_at=now)
objects_create.save()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 1)
event_all.all().delete()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 0)
# 登録したイベントの更新ができることを確認する
def test_update(self):
now = datetime.now()
objects_create = EventModel.objects.create(created_at=now, updated_at=now)
objects_create.save()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 1)
event = event_all[0]
event.updated_at = datetime.now()
event.save()
updated_event = EventModel.get(event.id)
self.assertNotEqual(updated_event.updated_at, now)
# scheduleとして登録できるか確認する
def test_schedule(self):
now = datetime.now()
tenant_model = TenantModel.objects.create(
tenant_name="test_tenant",
created_at=now,
updated_at=now)
aws_environment_model = AwsEnvironmentModel.objects.create(
name="test_aws_name",
aws_account_id="test_account",
aws_external_id="test_external",
aws_role="test_role",
tenant=tenant_model,
created_at=now,
updated_at=now)
schedule_model = ScheduleModel.objects.create(
name="test_schedule",
action="test_action",
params="test_params",
notification=True,
aws_environment=aws_environment_model,
resource_id="test_resource",
service="test_service",
region="test_region"
)
schedule_model.save()
# イベントとして取得したときに区別できているか
event_all = EventModel.all()
self.assertTrue(isinstance(event_all[0], ScheduleModel))
# Scheduleとして取得したときに区別できているか
schedule_all = ScheduleModel.all()
self.assertTrue(isinstance(schedule_all[0], ScheduleModel))
# AWSを削除したときに紐づくスケジュールが削除されることを確認する
def test_delete_cascade_aws(self):
now = datetime.now()
tenant_model = TenantModel.objects.create(
tenant_name="test_tenant",
created_at=now,
updated_at=now)
aws_environment_model = AwsEnvironmentModel.objects.create(
name="test_aws_name",
aws_account_id="test_account",
aws_external_id="test_external",
aws_role="test_role",
tenant=tenant_model,
created_at=now,
updated_at=now)
schedule_model = ScheduleModel.objects.create(
name="test_schedule",
action="test_action",
params="test_params",
notification=True,
aws_environment=aws_environment_model,
resource_id="test_resource",
service="test_service",
region="test_region"
)
schedule_model.save()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 1)
aws = AwsEnvironmentModel.objects.get(name="test_aws_name")
aws.delete()
event_all = EventModel.all()
self.assertEqual(event_all.count(), 0)
|
1706216
|
import numpy as np
from sdcit.hsic import HSIC
from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes
def FCIT_noniid_K(Kx, Ky, cond_Kx, cond_Ky, Kz=None, seed=None):
if seed is not None:
np.random.seed(seed)
RX1 = residual_kernel(Kx, cond_Kx)
RY1 = residual_kernel(Ky, cond_Ky)
return FCIT_K(RX1, RY1, Kz)
def FCIT_noniid(X, Y, Cond_X, Cond_Y, Z=None, seed=None):
"""Flaxman et al. Residualization-based CI Test, X_||_Y | Z
References
----------
<NAME>., <NAME>., & <NAME>. (2016). Gaussian Processes for Independence Tests with Non-iid Data in Causal Inference.
ACM Transactions on Intelligent Systems and Technology, 7(2), 1–23.
"""
if seed is not None:
np.random.seed(seed)
RX1 = residualize(X, Cond_X)
RY1 = residualize(Y, Cond_Y)
return FCIT(RX1, RY1, Z)
def FCIT_K(Kx, Ky, Kz=None, use_expectation=True, with_gp=True, sigma_squared=1e-3, seed=None, hsic_kws=None):
if seed is not None:
np.random.seed(seed)
if hsic_kws is None:
hsic_kws = {}
if Kz is None:
return HSIC(Kx, Ky, **hsic_kws)
RX_Z = residual_kernel(Kx, Kz, use_expectation=use_expectation, with_gp=with_gp, sigma_squared=sigma_squared)
RY_Z = residual_kernel(Ky, Kz, use_expectation=use_expectation, with_gp=with_gp, sigma_squared=sigma_squared)
return HSIC(RX_Z, RY_Z, **hsic_kws)
def FCIT(X, Y, Z=None, kern=rbf_kernel_median, normalize=False, seed=None, hsic_kws=None):
"""Flaxman et al. Residualization-based CI Test, X_||_Y | Z
References
----------
<NAME>., <NAME>., & <NAME>. (2016). Gaussian Processes for Independence Tests with Non-iid Data in Causal Inference.
ACM Transactions on Intelligent Systems and Technology, 7(2), 1–23.
"""
if seed is not None:
np.random.seed(seed)
if hsic_kws is None:
hsic_kws = {}
if normalize:
X, Y, Z = columnwise_normalizes(X, Y, Z)
if Z is None:
return HSIC(kern(X), kern(Y), **hsic_kws)
e_YZ = residualize(Y, Z)
e_XZ = residualize(X, Z)
if normalize:
e_XZ, e_YZ = columnwise_normalizes(e_XZ, e_YZ)
return HSIC(kern(e_XZ), kern(e_YZ), **hsic_kws)
|
1706234
|
from inspect import signature
# TODO: add interface and for checking labels at the training and testing stages (by analogy with sklearn)
# TODO: override __repr__ method by analogy with sklearn and how base Decomposition interface.
class Classifier(object):
""" General interface for all classes that describe classification algorithms.
Parameters
-------
probability : bool
Whether to enable probability estimates. This must be enabled prior
to calling ``fit``, and will slow down that method.
verbose : bool
Enable verbose output.
"""
def __init__(self, probability, verbose):
self.probability = probability
self.verbose = verbose
@property
def name(self):
""" Name of the classifier.
Returns
-------
str
"""
return self.__class__.__name__
def set_params(self, **params):
""" Set the parameters of this estimator.
Returns
-------
self
"""
# Simple optimization to gain speed (inspect is slow)
if not params:
return self
valid_params = self.get_params()
for key, value in params.items():
if key not in valid_params:
raise ValueError("Invalid parameter '{0}' for estimator {1}. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`.".format(key,
self.name
)
)
setattr(self, key, value)
valid_params[key] = value
return self
def get_params(self):
""" Get parameters for this estimator.
Returns
-------
params : dict
Dictionary of parameter names mapped to their values.
"""
params = dict()
for name in self._get_param_names():
value = getattr(self, name, None)
params[name] = value
return params
def fit(self, X, y):
""" Fit a classification model according to the given data. """
raise NotImplementedError('Not implemented in base ({}) class'.format(self.__class__.__name__))
def predict(self, X):
""" Predict the class labels for the provided data. """
raise NotImplementedError('Not implemented in base ({}) class'.format(self.__class__.__name__))
def predict_proba(self, X):
""" Compute probabilities of possible outcomes for samples in the provided data.. """
raise NotImplementedError('Not implemented in base (Classifier) class')
def score(self, X, y):
""" Returns the mean accuracy on the given test data and labels. """
raise NotImplementedError('Not implemented in base ({}) class'.format(self.__class__.__name__))
@classmethod
def _get_param_names(cls):
""" Get parameter names for the estimator.
Returns
-------
param_names : list[str]
Parameter names extracted from the constructors signature.
"""
# Introspect the constructor arguments
init_signature = signature(cls.__init__)
# Extract the constructor positional and keyword parameters excluding 'self'
params = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind == p.POSITIONAL_OR_KEYWORD]
# Get and sort names of parameters extracted from the constructor
param_names = sorted([p.name for p in params])
return param_names
|
1706243
|
import base64
import datetime
import httplib2
import requests
import time
import urllib.parse
import Crypto.Hash.SHA256 as SHA256
import Crypto.PublicKey.RSA as RSA
import Crypto.Signature.PKCS1_v1_5 as PKCS1_v1_5
class GCS:
host = 'storage.googleapis.com'
def __init__(self, login, key, bucket_ns = None):
self.__login = login
self.__key = key
if bucket_ns is not None:
self.__bucket_ns = bucket_ns + '_'
else:
self.__bucket_ns = ''
def __bucket(self, name):
return self.__bucket_ns + name
def __credentials(self):
scope = 'https://www.googleapis.com/auth/devstorage.read_write'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(
self.__login,
self.__key,
scope = scope)
credentials.refresh(httplib2.Http())
return credentials.access_token
def __headers(self, content_length = 0, content_type = None,
public = False):
headers = {
'Authorization' : 'Bearer ' + self.__credentials(),
'Content-Length': str(content_length),
}
if content_type is not None:
headers['Content-Type'] = content_type
if public:
headers['x-goog-acl'] = 'public-read'
return headers
def __url(self, bucket, path = ''):
'''`path` should be url-encoded.'''
return \
'https://%s.%s/%s' % (self.__bucket(bucket), GCS.host, path)
def __request(self, method, *args, **kwargs):
response = method(*args, **kwargs)
if int(response.status_code / 100) != 2:
raise Exception('gcs error %s: %s' %
(response.status_code, response.content))
return response
def __sign_url(self,
bucket,
path,
expiration,
method,
content_type = None,
content_length = None,
headers = {},
):
'''`path` will be url-encoded, don't do it.'''
expiration = datetime.datetime.now() + expiration
expiration = int(time.mktime(expiration.timetuple()))
path = urllib.parse.quote(path)
chunks = []
chunks.append(method)
chunks.append('') # MD5, optional
chunks.append(content_type or '')
for k, v in headers.items():
chunks.append('%s:%s' % (k, v))
chunks.append(str(expiration))
chunks.append('/%s/%s' % (self.__bucket(bucket), path))
signature_string = '\n'.join(chunks)
shahash = SHA256.new(signature_string.encode('utf-8'))
private_key = RSA.importKey(self.__key, passphrase='<PASSWORD>')
signer = PKCS1_v1_5.new(private_key)
signature_bytes = signer.sign(shahash)
sig = base64.b64encode(signature_bytes)
params = {
'GoogleAccessId': self.__login,
'Expires': str(expiration),
'Signature': sig
}
params = urllib.parse.urlencode(params)
url = 'https://%s.%s/%s?%s' % (self.__bucket(bucket), GCS.host,
path, params)
return url
def upload(self, bucket, path, data,
content_type = None,
public = False,
verify = True):
self.__request(
requests.put,
self.__url(bucket, path),
headers = self.__headers(content_length = len(data),
content_type = content_type,
public = public),
data = data,
verify = verify,
)
def upload_url(self, bucket, path, expiration,
content_type = None,
content_length = None,
public = False):
'''`path` will be url-encoded, don't do it.'''
headers = {}
if public:
headers = {
'x-goog-acl': 'public-read',
}
return self.__sign_url(bucket, path, expiration, 'PUT',
content_type = content_type,
content_length = content_length,
headers = headers)
def download_url(self, bucket, path, expiration,
content_type = None,
content_length = None):
'''`path` will be url-encoded, don't do it.'''
return self.__sign_url(bucket, path, expiration, 'GET',
content_type = content_type,
content_length = content_length)
def storage_url(self, bucket, path):
'''A direct access to something stored in GCS. Will require
authentication to be used.
'''
return '{host}/{bucket_ns}_{bucket}/{path}'.format(
host = 'https://storage.cloud.google.com',
bucket_ns = self.__bucket_ns,
bucket = bucket,
path = path,
)
def delete_url(self, bucket, path, expiration,
content_type = None,
content_length = None):
return self.__sign_url(bucket, path, expiration, 'DELETE',
content_length = 0)
def start_upload(self, bucket, path, content_type = None):
response = self.__request(
requests.post,
self.__url(bucket, path),
headers = self.__headers(content_length = len(data),
content_type = content_type),
)
return response.headers['location']
def delete(self, bucket, path):
self.__request(
requests.delete,
self.__url(bucket, path),
headers = self.__headers(),
)
def download_url(self, bucket, path, expiration):
'''`path` will be url-encoded, don't do it.'''
return self.__sign_url(bucket, path, expiration, 'GET')
def bucket_list(self, bucket, prefix = None):
'''Get those object names before you die.'''
response = self.__request(
requests.get,
self.__url(bucket),
headers = self.__headers(),
params = {'prefix': prefix},
)
import xml.etree.ElementTree as ET
root = ET.fromstring(response.text)
return [
n.text[len(str(prefix)) + 1:] for n in
root.findall(
'./{http://doc.s3.amazonaws.com/2006-03-01}Contents/'
'{http://doc.s3.amazonaws.com/2006-03-01}Key'
)
]
|
1706289
|
def get_error_test_cases(errors):
return [ERRORS.get(e) for e in errors]
ERRORS = {
'invalid-parameters': {
"parameter": {
"example": [
"The example field is required"
]
},
"type": "https://www.rev.ai/api/v1/errors/invalid-parameters",
"title": "Your request parameters didn't validate",
"status": 400
},
'unauthorized': {
"title": "Authorization has been denied for this request",
"status": 401
},
'job-not-found': {
"type": "https://www.rev.ai/api/v1/errors/job-not-found",
"title": "could not find job",
"status": 404
},
'out-of-credit': {
"title": "You do not have enough credits",
"type": "https://www.rev.ai/api/v1/errors/out-of-credit",
"detail": "You have only 60 seconds remaining",
"current_balance": 60,
"status": 403
},
'invalid-job-state': {
"allowed_values": [
"transcribed"
],
"current_value": "in_progress",
"type": "https://rev.ai/api/v1/errors/invalid-job-state",
"title": "Job is in invalid state",
"detail": "Job is in invalid state to obtain the transcript",
"status": 409
}
}
|
1706324
|
class Solution(object):
# def letterCasePermutation(self, S):
# ans = [[]]
# for char in S:
# n = len(ans)
# if char.isalpha():
# # Double the ans
# for i in xrange(n):
# ans.append(ans[i][:])
# ans[i].append(char.lower())
# ans[n + i].append(char.upper())
# else:
# # Normal append
# for i in xrange(n):
# ans[i].append(char)
# return map("".join, ans)
def letterCasePermutation(self, S):
B = sum(letter.isalpha() for letter in S)
ans = []
for bits in xrange(1 << B):
b = 0
word = []
for letter in S:
if letter.isalpha():
if (bits >> b) & 1:
word.append(letter.lower())
else:
word.append(letter.upper())
b += 1
else:
word.append(letter)
ans.append("".join(word))
return ans
|
1706332
|
import unittest
import filecmp
import os
from clockwork.ena import submission_receipt
modules_dir = os.path.join(os.path.dirname(os.path.abspath(submission_receipt.__file__)), os.pardir)
data_dir = os.path.normpath(os.path.join(modules_dir, 'tests', 'data', 'ena', 'submission_receipt'))
class TestSubmissionReceipt(unittest.TestCase):
def test_init_success_receipt(self):
'''test __init__ with successful receipt'''
receipt_xml = os.path.join(data_dir, 'init.good_receipt.xml')
receipt = submission_receipt.SubmissionReceipt(receipt_xml)
self.assertTrue(receipt.successful)
expected_accessions = {
'SAMPLE': 'ERS1234567',
'SUBMISSION': 'ERA1234567',
}
self.assertEqual(expected_accessions, receipt.accessions)
def test_init_fail_receipt(self):
'''test __init__ with failed receipt'''
receipt_xml = os.path.join(data_dir, 'init.bad_receipt.xml')
receipt = submission_receipt.SubmissionReceipt(receipt_xml)
self.assertFalse(receipt.successful)
self.assertEqual({}, receipt.accessions)
|
1706343
|
import unittest
from accumulator.multipointer_accumulator import get_representatives, MultipointerAccumulatorFactory
from .base import BaseAccumulatorTestSuite
class GeneralizedAccumulatorTestSuite(BaseAccumulatorTestSuite, unittest.TestCase):
"""Generalized accumulator test cases."""
def test_get_representatives(self):
# floor(log n) = 9
# ceil(floor(log n)^(1/2)) = 3
self.assertEqual(get_representatives(0b1000000001, 2), [0b1000000000])
self.assertEqual(get_representatives(0b1000111001, 2), [0b1000111000, 0b1000100000])
self.assertEqual(get_representatives(0b1110111110, 2), [0b1110111101, 0b1110111100, 0b1110110000])
self.assertEqual(get_representatives(0b1110111111, 2), [0b1110111110, 0b1110111000])
self.assertEqual(get_representatives(0b1111111111, 2), [0b1111111110, 0b1111111000, 0b1000000000])
# 2^10 <= n < 2^11
# floor(log n) = 10
# ceil(floor(log n)^(1/2)) = 4
self.assertEqual(get_representatives(0b10000000001, 2), [0b10000000000])
self.assertEqual(get_representatives(0b10011011010, 2), [0b10011011001, 0b10011011000, 0b10010000000])
self.assertEqual(get_representatives(0b11111111111, 2), [0b11111111110, 0b11111110000])
# 2^15 <= n < 2^16
# floor(log n) = 15
# ceil(floor(log n)^(1/2)) = 4
self.assertEqual(get_representatives(0b1000000000000000, 2), [0b0111111111111111])
self.assertEqual(get_representatives(0b1000000000000001, 2), [0b1000000000000000])
self.assertEqual(get_representatives(0b1111111111111111, 2), [0b1111111111111110, 0b1111111111110000])
# 2^16 <= n < 2^17
# floor(log n) = 16
# ceil(floor(log n)^(1/2)) = 4
self.assertEqual(get_representatives(0b10000000000000001, 2), [0b10000000000000000])
self.assertEqual(
get_representatives(0b11111111111111111, 2),
[0b11111111111111110, 0b11111111111110000, 0b10000000000000000]
)
# 2^8 <= n < 2^9
# floor(log n) = 8
# ceil(floor(log n)^(1/3)) = 2
self.assertEqual(get_representatives(0b100000000, 3), [0b011111111])
self.assertEqual(get_representatives(0b111111111, 3), [0b111111110, 0b111111100, 0b111110000, 0b100000000])
def get_instances(self):
factory = MultipointerAccumulatorFactory()
return factory.create_accumulator(2)
if __name__ == '__main__':
unittest.main()
|
1706346
|
import unittest
from ast2vec import Source
from ast2vec import bblfsh_roles
from snippet_ranger import utils
from snippet_ranger.tests import models
class UtilsTests(unittest.TestCase):
def test_get_func_names_bow(self):
source = Source().load(models.TEST_LIB)
bow = utils.get_func_names_bow(source)
true_bow = {
"f1": 1,
"f2": 1,
"f3": 1,
"f35": 1}
self.assertEqual(bow, true_bow)
def test_uast_to_bag(self):
source = Source().load(models.TEST_REPO)
uast = source.uasts[0]
bag = utils.uast_to_bag(uast)
true_bag = {
"test_lib": 2,
"f": 2,
"f1": 2,
"f2": 2,
"f3": 4}
self.assertEqual(bag, true_bag)
def test_get_imports(self):
source = Source().load(models.TEST_REPO)
uast = source.uasts[0]
imports = utils.get_imports(uast)
true_imports = {"f1", "test_lib"}
self.assertEqual(imports, true_imports)
source = Source().load(models.TEST_LIB)
uast = source.uasts[0]
imports = utils.get_imports(uast)
true_imports = set()
self.assertEqual(imports, true_imports)
def test_has_import(self):
source = Source().load(models.TEST_REPO)
self.assertTrue(utils.has_import("f1", source.uasts[0]))
self.assertTrue(utils.has_import("test_lib", source.uasts[0]))
source = Source().load(models.TEST_LIB)
self.assertFalse(utils.has_import("f1", source.uasts[0]))
self.assertFalse(utils.has_import("test_lib", source.uasts[0]))
if __name__ == "__main__":
unittest.main()
|
1706394
|
from ..._core import ensure_contiguous_state
from sympl import Stepper, get_constant
import logging
try:
from . import _simple_physics as phys
except ImportError as error:
logging.warning(
'Import failed. Simple Physics is likely not compiled and will not be'
'available.'
)
print(error)
class SimplePhysics(Stepper):
"""
Interface to the simple physics package.
<NAME> Jablonowski 2012:
title = {Idealized tropical cyclone simulations of intermediate complexity: a test case for {AGCMs}}
journal = {Journal of Advances in Modeling Earth Systems}
"""
input_properties = {
'air_temperature': {
'dims': ['mid_levels', '*'],
'units': 'degK',
},
'air_pressure': {
'dims': ['mid_levels', '*'],
'units': 'Pa',
},
'air_pressure_on_interface_levels': {
'dims': ['interface_levels', '*'],
'units': 'Pa',
},
'surface_air_pressure': {
'dims': ['*'],
'units': 'Pa',
},
'surface_temperature': {
'dims': ['*'],
'units': 'degK',
},
'specific_humidity': {
'dims': ['mid_levels', '*'],
'units': 'kg/kg',
},
'northward_wind': {
'dims': ['mid_levels', '*'],
'units': 'm s^-1',
},
'eastward_wind': {
'dims': ['mid_levels', '*'],
'units': 'm s^-1',
},
'surface_specific_humidity': {
'dims': ['*'],
'units': 'kg/kg',
},
'latitude': {
'dims': ['*'],
'units': 'degrees_north',
}
}
diagnostic_properties = {
'stratiform_precipitation_rate': {
'dims': ['*'],
'units': 'm s^-1',
},
'surface_upward_latent_heat_flux': {
'dims': ['*'],
'units': 'W m^-2',
},
'surface_upward_sensible_heat_flux': {
'dims': ['*'],
'units': 'W m^-2',
},
}
output_properties = {
'air_temperature': {'units': 'degK'},
'specific_humidity': {'units': 'kg/kg'},
'northward_wind': {'units': 'm s^-1'},
'eastward_wind': {'units': 'm s^-1'},
}
def __init__(
self,
simulate_cyclone=False,
large_scale_condensation=True,
boundary_layer=True,
surface_fluxes=True,
use_external_surface_temperature=True,
use_external_surface_specific_humidity=False,
top_of_boundary_layer=85000.0,
boundary_layer_influence_height=20000.0,
drag_coefficient_heat_fluxes=0.0011,
base_momentum_drag_coefficient=0.0007,
wind_dependent_momentum_drag_coefficient=0.000065,
maximum_momentum_drag_coefficient=0.002,
**kwargs):
"""
Args:
simulate_cyclone (bool):
Option indicating whether the package must
simulate a tropical cyclone. This was the original test case this
physics package was used for.
Default value is False.
large_scale_condensation (bool):
Option indicating whether the package
must add moisture and heating tendencies due to large scale condensation.
Default value is True.
boundary_layer (bool):
Option indicating whether the package must simulate
a simple boundary layer. **It is recommended that this option remain True
unless another boundary layer component is being used**.
Default value is True.
surface_fluxes (bool):
Option indicating whether the package must calculate
surface fluxes. **It is recommended that this option remain True unless the
fluxes are being calculated by another component**.
Default value is True.
use_external_surface_temperature (bool):
Option indicating whether the package
must use surface temperature available in the model state.
If False, an internally generated surface temperature is used.
Default value is True.
top_of_boundary_layer (float):
The nominal top of the boundary layer in :math:`Pa`.
boundary_layer_influence_height (float):
The decay of the influence of the boundary layer above
:code:`top_of_boundary_layer` in :math:`Pa`. The influence
reduces to :math:`1/e` times the boundary layer value at
a pressure given by :code:`top_of_boundary_layer+boundary_layer_influence_height`.
drag_coefficient_heat_fluxes (float):
The wind speed independent drag coefficient for latent and sensible
heat fluxes.
base_momentum_drag_coefficient (float):
The minimum drag coefficient for winds.
wind_dependent_momentum_drag_coefficient (float):
The part of the momentum drag coefficient that depends on the surface wind
speed. The total drag coefficient is given by
:code:`base_momentum_drag_coefficient + wind_dependent_momentum_drag_coefficient*u_base`,
where :code:`u_base` is the surface wind speed.
maximum_momentum_drag_coefficient (float):
This drag coefficient is used for surface wind speeds exceeding :math:`20 m/s`.
"""
self._cyclone = simulate_cyclone
self._lsc = large_scale_condensation
self._pbl = boundary_layer
self._surface_flux = surface_fluxes
self._use_ext_ts = use_external_surface_temperature
self._use_ext_qsurf = use_external_surface_specific_humidity
phys.init_simple_physics(self._cyclone, self._lsc, self._pbl,
self._surface_flux, self._use_ext_ts,
self._use_ext_qsurf)
self._Ct = drag_coefficient_heat_fluxes
self._pbl_top = top_of_boundary_layer
self._delta_pbl = boundary_layer_influence_height
self._Cd0 = base_momentum_drag_coefficient
self._Cd1 = wind_dependent_momentum_drag_coefficient
self._Cm = maximum_momentum_drag_coefficient
self._set_fortran_constants()
super(SimplePhysics, self).__init__(**kwargs)
def _set_fortran_constants(self):
self._g = get_constant('gravitational_acceleration', 'm/s^2')
self._Cpd = get_constant('heat_capacity_of_dry_air_at_constant_pressure', 'J/kg/degK')
self._Rair = get_constant('gas_constant_of_dry_air', 'J/kg/degK')
self._Rcond = get_constant('gas_constant_of_vapor_phase', 'J/kg/degK')
self._radius = get_constant('planetary_radius', 'm')
self._Omega = get_constant('planetary_rotation_rate', 's^-1')
self._Lv = get_constant('latent_heat_of_condensation', 'J/kg')
self._rho_condensible = get_constant('density_of_liquid_water', 'kg/m^3')
phys.set_physical_constants(self._g, self._Cpd, self._Rair, self._Lv,
self._Rcond, self._radius, self._Omega,
self._rho_condensible, self._pbl_top,
self._delta_pbl, self._Ct, self._Cd0,
self._Cd1, self._Cm)
@ensure_contiguous_state
def array_call(self, state, timestep):
'''
Calculate surface and boundary layer tendencies.
Args:
state (dict):
The model state dictionary
timestep (timedelta):
The model timestep
Returns:
state (dict), diagnostics(dict) :
* The updated model state.
* diagnostics for Simple Physics
'''
self._set_fortran_constants()
(t_out, u_out, v_out, q_out, precip_out,
sensible_heat_flux, latent_heat_flux) = phys.get_new_state(
state['eastward_wind'],
state['northward_wind'],
state['air_temperature'],
state['air_pressure'],
state['air_pressure_on_interface_levels'],
state['specific_humidity'],
state['surface_air_pressure'],
state['surface_temperature'],
state['surface_specific_humidity'],
state['latitude'],
timestep.total_seconds()
)
latent_heat_flux[latent_heat_flux < 0] = 0
new_state = {
'eastward_wind': u_out,
'northward_wind': v_out,
'air_temperature': t_out,
'specific_humidity': q_out,
}
diagnostics = {
'stratiform_precipitation_rate': precip_out,
'surface_upward_sensible_heat_flux': sensible_heat_flux,
'surface_upward_latent_heat_flux': latent_heat_flux,
}
return diagnostics, new_state
|
1706417
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch_scatter import scatter_mean
import torch_geometric.transforms as T
from torch_geometric.utils import normalized_cut, to_dense_batch
from torch_geometric.nn import MetaLayer, SplineConv, max_pool, GlobalAttention
from deepdock.utils.distributions import *
def compute_cluster_batch_index(cluster, batch):
max_prev_batch = 0
for i in range(batch.max().item()+1):
cluster[batch == i] += max_prev_batch
max_prev_batch = cluster[batch == i].max().item() + 1
return cluster
class NodeSampling(nn.Module):
def __init__(self, nodes_per_graph):
super(NodeSampling, self).__init__()
self.num = nodes_per_graph
def forward(self, x):
if self.training:
max_prev_batch = 0
idx = []
counts = torch.unique(x.batch, return_counts=True)[1]
for i in range(x.batch.max().item()+1):
idx.append(torch.randperm(counts[i])[:self.num] + max_prev_batch)
max_prev_batch += counts[i]
idx = torch.cat(idx)
x.batch = x.batch[idx]
x.pos = x.pos[idx]
x.x = x.x[idx]
return x
class ResBlock(nn.Module):
def __init__(self, in_channels, dropout_rate=0.15):
super(ResBlock, self).__init__()
self.projectDown_node = nn.Linear(in_channels, in_channels//4)
self.projectDown_edge = nn.Linear(in_channels, in_channels//4)
self.bn1_node = nn.BatchNorm1d(in_channels//4)
self.bn1_edge = nn.BatchNorm1d(in_channels//4)
self.conv = MetaLayer(edge_model=EdgeModel(in_channels//4), node_model=NodeModel(in_channels//4), global_model=None)
self.projectUp_node = nn.Linear(in_channels//4, in_channels)
self.projectUp_edge = nn.Linear(in_channels//4, in_channels)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2_node = nn.BatchNorm1d(in_channels)
nn.init.zeros_(self.bn2_node.weight)
self.bn2_edge = nn.BatchNorm1d(in_channels)
nn.init.zeros_(self.bn2_edge.weight)
def forward(self, data):
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
h_node = F.elu(self.bn1_node(self.projectDown_node(x)))
h_edge = F.elu(self.bn1_edge(self.projectDown_edge(edge_attr)))
h_node, h_edge, _ = self.conv(h_node, edge_index, h_edge, None, batch)
h_node = self.dropout(self.bn2_node(self.projectUp_node(h_node)))
data.x = F.elu(h_node + x)
h_edge = self.dropout(self.bn2_edge(self.projectUp_edge(h_edge)))
data.edge_attr = F.elu(h_edge + edge_attr)
return data
class EdgeModel(torch.nn.Module):
def __init__(self, in_channels):
super(EdgeModel, self).__init__()
self.edge_mlp = nn.Sequential(nn.Linear(in_channels*3, in_channels), nn.BatchNorm1d(in_channels), nn.ELU())
def forward(self, src, dest, edge_attr, u, batch):
# source, target: [E, F_x], where E is the number of edges.
# edge_attr: [E, F_e]
# u: [B, F_u], where B is the number of graphs.
# batch: [E] with max entry B - 1.
out = torch.cat([src, dest, edge_attr], 1)
return self.edge_mlp(out)
class NodeModel(torch.nn.Module):
def __init__(self, in_channels):
super(NodeModel, self).__init__()
self.node_mlp_1 = nn.Sequential(nn.Linear(in_channels*2, in_channels), nn.BatchNorm1d(in_channels), nn.ELU())
self.node_mlp_2 = nn.Sequential(nn.Linear(in_channels*2, in_channels), nn.BatchNorm1d(in_channels), nn.ELU())
def forward(self, x, edge_index, edge_attr, u, batch):
# x: [N, F_x], where N is the number of nodes.
# edge_index: [2, E] with max entry N - 1.
# edge_attr: [E, F_e]
# u: [B, F_u]
# batch: [N] with max entry B - 1.
row, col = edge_index
out = torch.cat([x[row], edge_attr], dim=1)
out = self.node_mlp_1(out)
out = scatter_mean(out, col, dim=0, dim_size=x.size(0))
out = torch.cat([x, out], dim=1)
return self.node_mlp_2(out)
class TargetNet(nn.Module):
def __init__(self, in_channels, edge_features=3, hidden_dim=128, residual_layers=20, dropout_rate=0.15):
super(TargetNet, self).__init__()
self.node_encoder = nn.Linear(in_channels, hidden_dim)
self.edge_encoder = nn.Linear(edge_features, hidden_dim)
self.conv1 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
self.conv2 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
self.conv3 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
layers = [ResBlock(in_channels=hidden_dim, dropout_rate=dropout_rate) for i in range(residual_layers)]
self.resnet = nn.Sequential(*layers)
def forward(self, data):
data.edge_attr = None
data = T.Cartesian(norm=False, max_value=None, cat=False)(data)
data.x = self.node_encoder(data.x)
data.edge_attr = self.edge_encoder(data.edge_attr)
data.x, data.edge_attr, _ = self.conv1(data.x, data.edge_index, data.edge_attr, None, data.batch)
data.x, data.edge_attr, _ = self.conv2(data.x, data.edge_index, data.edge_attr, None, data.batch)
data.x, data.edge_attr, _ = self.conv3(data.x, data.edge_index, data.edge_attr, None, data.batch)
data = self.resnet(data)
return data
class LigandNet(nn.Module):
def __init__(self, in_channels, edge_features=6, hidden_dim=128, residual_layers=20, dropout_rate=0.15):
super(LigandNet, self).__init__()
self.node_encoder = nn.Linear(in_channels, hidden_dim)
self.edge_encoder = nn.Linear(edge_features, hidden_dim)
self.conv1 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
self.conv2 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
self.conv3 = MetaLayer(edge_model=EdgeModel(hidden_dim), node_model=NodeModel(hidden_dim), global_model=None)
layers = [ResBlock(in_channels=hidden_dim, dropout_rate=dropout_rate) for i in range(residual_layers)]
self.resnet = nn.Sequential(*layers)
def forward(self, data):
data.x = self.node_encoder(data.x)
data.edge_attr = self.edge_encoder(data.edge_attr)
data.x, data.edge_attr, _ = self.conv1(data.x, data.edge_index, data.edge_attr, None, data.batch)
data.x, data.edge_attr, _ = self.conv2(data.x, data.edge_index, data.edge_attr, None, data.batch)
data.x, data.edge_attr, _ = self.conv3(data.x, data.edge_index, data.edge_attr, None, data.batch)
data = self.resnet(data)
return data
class DeepDock(nn.Module):
def __init__(self, ligand_model, target_model, hidden_dim, n_gaussians, dropout_rate=0.15,
nodes_per_target=None, dist_threhold=1000):
super(DeepDock, self).__init__()
self.ligand_model = ligand_model
self.target_model = target_model
if nodes_per_target:
self.node_sampling = NodeSampling(nodes_per_graph=nodes_per_target)
else :
self.node_sampling = None
self.MLP = nn.Sequential(nn.Linear(256, hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ELU(), nn.Dropout(p=dropout_rate))
self.z_pi = nn.Linear(hidden_dim, n_gaussians)
self.z_sigma = nn.Linear(hidden_dim, n_gaussians)
self.z_mu = nn.Linear(hidden_dim, n_gaussians)
self.atom_types = nn.Linear(128, 28)
self.bond_types = nn.Linear(256, 6)
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.dist_threhold = dist_threhold
def forward(self, data_ligand, data_target, y=None):
h_l = self.ligand_model(data_ligand)
h_t = self.target_model(data_target)
if self.node_sampling:
h_t = self.node_sampling(h_t)
h_l_x, l_mask = to_dense_batch(h_l.x, h_l.batch, fill_value=0)
h_t_x, t_mask = to_dense_batch(h_t.x, h_t.batch, fill_value=0)
h_l_pos, _ = to_dense_batch(h_l.pos, h_l.batch, fill_value=0)
h_t_pos, _ = to_dense_batch(h_t.pos, h_t.batch, fill_value=0)
assert h_l_x.size(0) == h_t_x.size(0), 'Encountered unequal batch-sizes'
(B, N_l, C_out), N_t = h_l_x.size(), h_t_x.size(1)
# Combine and mask
h_l_x = h_l_x.unsqueeze(-2)
h_l_x = h_l_x.repeat(1, 1, N_t, 1) # [B, N_l, N_t, C_out]
h_t_x = h_t_x.unsqueeze(-3)
h_t_x = h_t_x.repeat(1, N_l, 1, 1) # [B, N_l, N_t, C_out]
C = torch.cat((h_l_x, h_t_x), -1)
self.C_mask = C_mask = l_mask.view(B, N_l, 1) & t_mask.view(B, 1, N_t)
self.C = C = C[C_mask]
C = self.MLP(C)
# Get batch indexes for ligand-target combined features
C_batch = torch.tensor(range(B)).unsqueeze(-1).unsqueeze(-1)
C_batch = C_batch.repeat(1, N_l, N_t)[C_mask].to(self.device)
# Outputs
pi = F.softmax(self.z_pi(C), -1)
sigma = F.elu(self.z_sigma(C))+1.1
mu = F.elu(self.z_mu(C))+1
dist = self.compute_euclidean_distances_matrix(h_l_pos, h_t_pos)[C_mask]
atom_types = self.atom_types(h_l.x)
bond_types = self.bond_types(torch.cat([h_l.x[h_l.edge_index[0]], h_l.x[h_l.edge_index[1]]], axis=1))
return pi, sigma, mu, dist.unsqueeze(1).detach(), atom_types, bond_types, C_batch
def compute_euclidean_distances_matrix(self, X, Y):
# Based on: https://medium.com/@souravdey/l2-distance-matrix-vectorization-trick-26aa3247ac6c
# (X-Y)^2 = X^2 + Y^2 -2XY
X = X.double()
Y = Y.double()
dists = -2 * torch.bmm(X, Y.permute(0, 2, 1)) + torch.sum(Y**2, axis=-1).unsqueeze(1) + torch.sum(X**2, axis=-1).unsqueeze(-1)
return dists**0.5
def mdn_loss_fn(pi, sigma, mu, y):
normal = Normal(mu, sigma)
loglik = normal.log_prob(y.expand_as(normal.loc))
loss = -torch.logsumexp(torch.log(pi) + loglik, dim=1)
return loss
|
1706431
|
class Metrics(object):
@staticmethod
def h_metric(design_candidate):
cores = [d * 1. for d in design_candidate if d > 0]
core_types = len(cores)
core_counts = sum(cores) * 1.
return core_types / core_counts
|
1706442
|
import logging
import time
from signal import SIGTERM
from subprocess import PIPE, Popen
from threading import Thread
logger = logging.getLogger(__name__)
def is_fileobj_open(fileobj):
return fileobj and not getattr(fileobj, 'closed', False)
class NBSubprocThread(Thread):
DEFAULT_POLL_INTERVAL_SEC = 0.01
DEFAULT_SUBPROCESS_NAME = 'Subprocess'
DEFAULT_STOP_SIGNAL = SIGTERM
def __init__(
self,
args,
cwd=None,
stdin=None,
on_poll=None,
on_stdout=None,
on_stderr=None,
on_finish=None,
poll_interval=DEFAULT_POLL_INTERVAL_SEC,
quiet=False,
subprocess_name=DEFAULT_SUBPROCESS_NAME,
):
"""Non-blocking STDOUT/STDERR streaming for subprocess.Popen().
This class makes two daemonized threads for nonblocking
streaming of STDOUT/STDERR.
Note that return value of callback functions are updated
for the following properties:
- status:
Updated with return value of on_poll, on_stdout, on_stderr.
If return value is None then no update.
- returnvalue:
Updated with return value of on_finish.
If return value is None then no update.
This is useful to check status of the thread and
get the final return value of the function that this class
actually runs.
Args:
args:
List of command line arguments.
cwd:
subprocess.Popen's cwd.
stdin:
subprocess.Popen's stdin.
Note that subprocess.Popen's stdout/stderr is fixed
at subprocess.PIPE/subprocess.STDOUT.
on_poll:
Callback on every polling.
If return value is not None then it is used for updating property `status`.
on_stdout:
Callback on every non-empty STDOUT line.
If return value is not None then it is used for updating property `status`.
This callback function should take one argument:
- stdout (str):
New incoming STDOUT line string with trailing newline (backslash n).
on_stderr:
Callback on every non-empty STDERR line.
If return value is not None then it is used for updating property `status`.
This callback function should take one argument:
- stderr (str):
New incoming STDERR line string with trailing newline (backslash n).
on_finish:
Callback on terminating/completing a thread.
If return value is not None then it is used for updating property `returnvalue`.
poll_interval (float):
Polling interval in seconds.
quiet:
No logging.
subprocess_name:
Subprocess name for logging.
"""
super().__init__(
target=self._popen,
args=(args, cwd, stdin, on_poll, on_stdout, on_stderr, on_finish),
)
self._poll_interval = poll_interval
self._quiet = quiet
self._subprocess_name = subprocess_name
self._stdout_list = []
self._stderr_list = []
self._returncode = None
self._stop_it = False
self._stop_signal = None
self._status = None
self._returnvalue = None
@property
def stdout(self):
return ''.join(self._stdout_list)
@property
def stderr(self):
return ''.join(self._stderr_list)
@property
def returncode(self):
"""Returns subprocess.Popen.returncode.
None if not completed or any general Exception occurs.
"""
return self._returncode
@property
def status(self):
"""Updated with return value of on_poll() for every polling.
Also updated with return value of on_stdout() or on_stderr()
if their return values are not None.
"""
return self._status
@property
def returnvalue(self):
"""Updated with return value of on_finish()
which is called when a thread is terminated.
None if thread is still running so that on_finish() has not been called yet.
This works like an actual return value of the function ran inside a thread.
"""
return self._returnvalue
def stop(self, stop_signal=DEFAULT_STOP_SIGNAL, wait=False):
"""Subprocess will be teminated after next polling.
Args:
wait:
Wait for a valid returncode (which is not None).
"""
self._stop_it = True
self._stop_signal = stop_signal
if wait:
if self._returncode is None:
logger.info(
'{name} stopped but waiting for graceful shutdown...'.format(
name=self._subprocess_name
)
)
while True:
if self._returncode is not None:
return
time.sleep(self._poll_interval)
def _popen(
self,
args,
cwd=None,
stdin=None,
on_poll=None,
on_stdout=None,
on_stderr=None,
on_finish=None,
):
"""Wrapper for subprocess.Popen().
"""
def read_stdout(stdout_bytes):
text = stdout_bytes.decode()
if text:
self._stdout_list.append(text)
if on_stdout:
ret_on_stdout = on_stdout(text)
if ret_on_stdout is not None:
self._status = ret_on_stdout
def read_stderr(stderr_bytes):
text = stderr_bytes.decode()
if text:
self._stderr_list.append(text)
if on_stderr:
ret_on_stderr = on_stderr(text)
if ret_on_stderr is not None:
self._status = ret_on_stderr
def read_from_stdout_obj(stdout):
if is_fileobj_open(stdout):
for line in iter(stdout.readline, b''):
read_stdout(line)
def read_from_stderr_obj(stderr):
if is_fileobj_open(stderr):
for line in iter(stderr.readline, b''):
read_stderr(line)
self._stop_it = False
try:
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd, stdin=stdin)
thread_stdout = Thread(
target=read_from_stdout_obj, args=(p.stdout,), daemon=True
)
thread_stderr = Thread(
target=read_from_stderr_obj, args=(p.stderr,), daemon=True
)
thread_stdout.start()
thread_stderr.start()
while True:
if on_poll:
ret_on_poll = on_poll()
if ret_on_poll is not None:
self._status = ret_on_poll
if p.poll() is not None:
self._returncode = p.poll()
break
if self._stop_it and self._stop_signal:
p.send_signal(self._stop_signal)
break
time.sleep(self._poll_interval)
except Exception as e:
if not self._quiet:
logger.error(e, exc_info=True)
finally:
stdout_bytes, stderr_bytes = p.communicate()
read_stdout(stdout_bytes)
read_stderr(stderr_bytes)
self._returncode = p.returncode
if on_finish:
ret_on_finish = on_finish()
if ret_on_finish is not None:
self._returnvalue = ret_on_finish
if not self._quiet:
if self._returncode:
logger.error(
'{name} failed. returncode={rc}'.format(
name=self._subprocess_name, rc=self._returncode
)
)
else:
logger.info(
'{name} finished successfully.'.format(name=self._subprocess_name)
)
|
1706464
|
from __future__ import annotations
from .extension import Backends, backends
from .datasource import DataSource, Dormitory, InitContextCallable
|
1706493
|
from hypergol import BaseData
class LabelledArticle(BaseData):
def __init__(self, labelledArticleId: int, articleId: int, labelId: int):
self.labelledArticleId = labelledArticleId
self.articleId = articleId
self.labelId = labelId
def get_id(self):
return (self.labelledArticleId, )
|
1706499
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread('/home/pi/book/dataset/ruler.512.tiff', 1)
input = cv2.cvtColor(image, cv2.COLOR_BGR2RGB )
rows, cols, channels = input.shape
points1 = np.float32([[0, 0], [400, 0], [0, 400], [400, 400]])
points2 = np.float32([[0,0], [300, 0], [0, 300], [300, 300]])
P = cv2.getPerspectiveTransform(points1, points2)
output = cv2.warpPerspective(input, P, (300, 300))
plt.subplot(121)
plt.imshow(input)
plt.title('Input Image')
plt.subplot(122)
plt.imshow(output)
plt.title('Perspective Transform')
plt.show()
|
1706514
|
import pytest
def test_envvar_parsing():
from mpi4jax._src.decorators import _is_falsy, _is_truthy
assert _is_truthy("1")
assert not _is_falsy("1")
assert not _is_truthy("false")
assert _is_falsy("false")
assert not _is_truthy("foo")
assert not _is_falsy("foo")
def test_missing_omnistaging(monkeypatch):
import jax
from mpi4jax._src.decorators import ensure_omnistaging
with monkeypatch.context() as m:
m.setattr(jax.config, "omnistaging_enabled", False)
with pytest.raises(RuntimeError) as excinfo:
ensure_omnistaging()
assert "omnistaging" in str(excinfo.value)
def test_ensure_gpu_ext(monkeypatch):
from mpi4jax._src import xla_bridge
from mpi4jax._src.decorators import ensure_gpu_ext
with monkeypatch.context() as m:
m.setattr(xla_bridge, "HAS_GPU_EXT", False)
with pytest.raises(ImportError) as excinfo:
ensure_gpu_ext()
assert "GPU extensions could not be imported" in str(excinfo.value)
|
1706533
|
import numpy
from matplotlib import pyplot
def bisection(f, interval, max_steps=100, tol=1e-10):
x_lo, x_hi = interval
x = (x_lo + x_hi)/2
f_lo = f(x_lo)
f_hi = f(x_hi)
fx = f(x)
steps = 0
while steps < max_steps and abs(fx) > tol and (x_hi - x_lo) > tol:
steps = steps + 1
if fx*f_hi < 0: # Root lies in right-hand half
x_lo = x
f_lo = fx
else: # Root lies in left-hand half
x_hi = x
f_hi = fx
x = (x_lo + x_hi) / 2
fx = f(x)
print("Nsteps", steps)
return x
if __name__=="__main__":
def f(x):
return numpy.exp(x) + x - 2
def g(x):
return numpy.sin(x**2) - 0.1*x
interval = [0,1]
s = bisection(f, interval)
print("s = ", s, "f(s) = ", f(s))
x = numpy.linspace(0, 10, 1000)
pyplot.plot(x, g(x))
pyplot.show()
s = bisection(g, [1,10])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,9])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8.5])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8])
print("s = ", s, "g(s) = ", g(s))
|
1706560
|
import sys
import ctypes
def run_as_admin(argv=None, debug=False):
shell32 = ctypes.windll.shell32
if argv is None and shell32.IsUserAnAdmin():
return True
if argv is None:
argv = sys.argv
if hasattr(sys, '_MEIPASS'):
# Support pyinstaller wrapped program.
arguments = map(str, argv[1:])
else:
arguments = map(str, argv)
argument_line = u' '.join(arguments)
executable = str(sys.executable)
if debug:
print ('Command line: '), executable, argument_line
ret = shell32.ShellExecuteW(None, u"runas", executable, argument_line, None, 1)
if int(ret) <= 32:
return False
return None
|
1706583
|
from pkg_resources import get_distribution
from .decorators import *
# Set package information
__version__ = get_distribution("pytorch_common").version
__author__ = "<NAME>"
|
1706592
|
import contextlib
import importlib
import os
import sys
# Resources
TESTDIR = os.path.dirname(os.path.abspath(__file__))
MAINDIR = os.path.dirname(TESTDIR)
DOCSDIR = os.path.join(MAINDIR, "docs")
DATADIR = os.path.join(TESTDIR, "data")
# Shortcut to try import modules/functions
def try_import(*paths):
for path in paths:
if path is None:
return None
with contextlib.suppress(ImportError, AttributeError):
if ":" in path:
modname, attrname = path.rsplit(":", 1)
return getattr(importlib.import_module(modname), attrname)
else:
return importlib.import_module(path)
raise ImportError(f"could not find any of the following: {', '.join(paths)}")
# Force importing the local version of the module
sys.path.insert(0, MAINDIR)
|
1706611
|
from vispy.scene.visuals import Compound, Line, Text
from ..filters.tracks import TracksFilter
from .clipping_planes_mixin import ClippingPlanesMixin
class TracksVisual(ClippingPlanesMixin, Compound):
"""
Compound vispy visual for Track visualization with
clipping planes functionality
Components:
- Track lines (vispy.LineVisual)
- Track IDs (vispy.TextVisual)
- Graph edges (vispy.LineVisual)
"""
def __init__(self):
self.tracks_filter = TracksFilter()
self.graph_filter = TracksFilter()
super().__init__([Line(), Text(), Line()])
self._subvisuals[0].attach(self.tracks_filter)
self._subvisuals[2].attach(self.graph_filter)
# text label properties
self._subvisuals[1].color = 'white'
self._subvisuals[1].font_size = 8
|
1706633
|
import time
import logging
import fire
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
import models
import utils
from dataset import ImageDataset
logging.getLogger().setLevel(logging.INFO)
def run(model_name, output_dir, dataname, data_dir='./data', batch_size=16, test_run=-1):
data_path = '%s/%s' % (data_dir, dataname)
logging.info('Load data from %s' % data_path)
logging.info('Using model=%s' % model_name)
ds = ImageDataset(data_path)
model = models.get_model(model_name)
data_loader = DataLoader(ds, batch_size=batch_size)
features_list = []
count = 0
iterator = tqdm(data_loader)
for batch in iterator:
output = model.forward_pass(batch.to(utils.torch_device()))
features_list.append(output.cpu().detach().numpy())
if test_run != -1 and count > test_run:
iterator.close()
break
count = count + 1
features = np.vstack(features_list)
logging.info(features.shape)
output_path = '%s/%s-%s--%s' % (output_dir, model_name, dataname, time.strftime('%Y-%m-%d-%H-%M-%S'))
np.save(output_path, features)
logging.info('save data at %s' % output_path)
if __name__ == "__main__":
fire.Fire(run)
|
1706640
|
import numpy as np
def defaultMotionPlanners():
return {
'home': HomeMotionPlanner,
'search': SearchMotionPlanner,
'aboveTarget': AboveTargetMotionPlanner,
'approach': ApproachMotionPlanner,
'target': TargetMotionPlanner,
# 'clean': CleanMotionPlanner,
# 'rinse': RinseMotionPlanner,
'idle': IdleMotionPlanner,
}
class PipetteMotionPlanner(object):
def __init__(self, pip, position, speed, **kwds):
self.pip = pip
self.position = position
self.speed = speed
self.kwds = kwds
self.future = None
def move(self):
"""Move the pipette to the requested named position and return a Future
"""
if self.future is not None:
self.stop()
self.future = self._move()
return self.future
def stop(self):
if self.future is not None:
self.future.stop()
def _move(self):
raise NotImplementedError()
def shouldUseLinearMotion(self):
return self.pip._shouldUseLinearMovement()
_LOCAL_ORIGIN = (0, 0, 0)
def _extractionWaypoint(destLocal, pipAngle):
"""
Parameters
----------
destLocal
Destination coordinates in pipette-local frame of reference. Extraction is only needed when +z and -x from the origin.
pipAngle
The angle of the pipette in radians, oriented to be between 0 and π/2.
Returns
-------
waypoint
Local coordinates of the extraction waypoint, or None if none is needed.
"""
if pipAngle < 0 or pipAngle > np.pi / 2:
raise ValueError("Invalid pipette pitch; orient your measurement to put it between 0 and π/2")
destX = destLocal[0]
destZ = destLocal[2]
if destX > 0 or destZ < 0 or (destX, destZ) == (0, 0):
# no clear diagonal extraction to go forward or down
return None
destAngle = np.arctan2(destZ, -destX) # `-x` to match the pipAngle orientation
if destAngle > pipAngle:
dz = destX * np.tan(pipAngle)
waypoint = (destX, 0, -dz)
else:
dx = destZ / np.tan(pipAngle)
waypoint = (-dx, 0, destZ)
# sanity check, floating point errors
return np.clip(waypoint, _LOCAL_ORIGIN, destLocal)
class HomeMotionPlanner(PipetteMotionPlanner):
"""Extract pipette tip diagonally, then move to home position.
"""
def _move(self):
pip = self.pip
speed = self.speed
manipulator = pip.parentDevice()
manipulatorHome = manipulator.homePosition()
assert manipulatorHome is not None, "No home position defined for %s" % manipulator.name()
# how much should the pipette move in global coordinates
globalMove = np.asarray(manipulatorHome) - np.asarray(manipulator.globalPosition())
startPosGlobal = pip.globalPosition()
# where should the pipette tip end up in global coordinates
endPosGlobal = np.asarray(startPosGlobal) + globalMove
# use local coordinates to make it easier to do the boundary intersections
endPosLocal = pip.mapFromGlobal(endPosGlobal)
waypointLocal = _extractionWaypoint(endPosLocal, pip.pitchRadians())
# sensapex manipulators shouldn't need a waypoint to perform correct extraction
if waypointLocal is None or not self.shouldUseLinearMotion():
path = [(endPosGlobal, speed, False), ]
else:
waypointGlobal = pip.mapToGlobal(waypointLocal)
path = [
(waypointGlobal, speed, True),
(endPosGlobal, speed, False),
]
return pip._movePath(path)
class SearchMotionPlanner(PipetteMotionPlanner):
"""Focus the microscope 2mm above the surface, then move the electrode
tip to 500um below the focal point of the microscope.
This position is used when searching for new electrodes.
Set *distance* to adjust the search position along the pipette's x-axis. Positive values
move the tip farther from the microscope center to reduce the probability of collisions.
Negative values move the pipette past the center of the microscope to improve the
probability of seeing the tip immediately.
"""
def _move(self):
pip = self.pip
speed = self.speed
distance = self.kwds.get('distance', 0)
# Bring focus to 2mm above surface (if needed)
scope = pip.scopeDevice()
surfaceDepth = scope.getSurfaceDepth()
if surfaceDepth is None:
raise Exception("Cannot determine search position; surface depth is not defined.")
searchDepth = surfaceDepth + pip._opts['searchHeight']
cam = pip.imagingDevice()
focusDepth = cam.getFocusDepth()
# move scope such that camera will be focused at searchDepth
if focusDepth < searchDepth:
scopeFocus = scope.getFocusDepth()
scope.setFocusDepth(scopeFocus + searchDepth - focusDepth).wait(updates=True)
# Here's where we want the pipette tip in global coordinates:
globalTarget = cam.globalCenterPosition('roi')
globalTarget[2] += pip._opts['searchTipHeight'] - pip._opts['searchHeight']
# adjust for distance argument:
localTarget = pip.mapFromGlobal(globalTarget)
localTarget[0] -= distance
globalTarget = pip.mapToGlobal(localTarget)
return pip._moveToGlobal(globalTarget, speed)
class ApproachMotionPlanner(PipetteMotionPlanner):
def _move(self):
pip = self.pip
speed = self.speed
target = pip.targetPosition()
return pip._movePath(self.approachPath(target, speed))
def approachPath(self, target, speed):
"""
Describe a path that puts the pipette in-line to do straight movement along the pipette pitch to the target
Parameters
----------
target: coordinates
speed: m/s
"""
pip = self.pip
# Return steps (in global coords) needed to move to approach position
stbyDepth = pip.approachDepth()
pos = pip.globalPosition()
# steps are in global coordinates.
path = []
# If tip is below the surface, then first pull out slowly along pipette axis
if pos[2] < stbyDepth:
dz = stbyDepth - pos[2]
dx = -dz / np.tan(pip.pitchRadians())
last = np.array([dx, 0., dz])
path.append([pip.mapToGlobal(last), 100e-6, self.shouldUseLinearMotion()]) # slow removal from sample
else:
last = np.array([0., 0., 0.])
# local vector pointing in direction of electrode tip
evec = np.array([1., 0., -np.tan(pip.pitchRadians())])
evec /= np.linalg.norm(evec)
# target in local coordinates
ltarget = pip.mapFromGlobal(target)
# compute approach position (axis aligned to target, at standby depth or higher)
dz2 = max(0, stbyDepth - target[2])
dx2 = -dz2 / np.tan(pip.pitchRadians())
stby = ltarget + np.array([dx2, 0., dz2])
# compute intermediate position (point along approach axis that is closest to the current position)
targetToTip = last - ltarget
targetToStby = stby - ltarget
targetToStby /= np.linalg.norm(targetToStby)
closest = ltarget + np.dot(targetToTip, targetToStby) * targetToStby
if np.linalg.norm(stby - last) > 1e-6:
if (closest[2] > stby[2]) and (np.linalg.norm(stby - closest) > 1e-6):
path.append([pip.mapToGlobal(closest), speed, self.shouldUseLinearMotion()])
path.append([pip.mapToGlobal(stby), speed, self.shouldUseLinearMotion()])
return path
class TargetMotionPlanner(ApproachMotionPlanner):
def _move(self):
pip = self.pip
speed = self.speed
target = pip.targetPosition()
pos = pip.globalPosition()
if np.linalg.norm(np.asarray(target) - pos) < 1e-7:
return
path = self.approachPath(target, speed)
path.append([target, 100e-6, self.shouldUseLinearMotion()])
return pip._movePath(path)
class AboveTargetMotionPlanner(PipetteMotionPlanner):
"""Move the pipette tip to be centered over the target in x/y, and 100 um above
the sample surface in z.
This position is used to recalibrate the pipette immediately before going to approach.
"""
def _move(self):
pip = self.pip
speed = self.speed
scope = pip.scopeDevice()
waypoint1, waypoint2 = self.aboveTargetPath()
pfut = pip._moveToGlobal(waypoint1, speed)
sfut = scope.setGlobalPosition(waypoint2)
pfut.wait(updates=True)
pip._moveToGlobal(waypoint2, 'slow').wait(updates=True)
sfut.wait(updates=True)
return sfut
def aboveTargetPath(self):
"""Return the path to the "above target" recalibration position.
The path has 2 waypoints:
1. 100 um away from the second waypoint, on a diagonal approach. This is meant to normalize the hysteresis
at the second waypoint.
2. This position is centered on the target, a small distance above the sample surface.
"""
pip = self.pip
target = pip.targetPosition()
# will recalibrate 50 um above surface
scope = pip.scopeDevice()
surfaceDepth = scope.getSurfaceDepth()
waypoint2 = np.array(target)
waypoint2[2] = surfaceDepth + 50e-6
# Need to arrive at this point via approach angle to correct for hysteresis
lwp = pip.mapFromGlobal(waypoint2)
dz = 100e-6
lwp[2] += dz
lwp[0] -= dz / np.tan(pip.pitchRadians())
waypoint1 = pip.mapToGlobal(lwp)
return waypoint1, waypoint2
class IdleMotionPlanner(PipetteMotionPlanner):
"""Move the electrode tip to the outer edge of the recording chamber, 1mm above the sample surface.
NOTE: this method assumes that (0, 0) in global coordinates represents the center of the recording
chamber.
"""
def _move(self):
pip = self.pip
speed = self.speed
scope = pip.scopeDevice()
surface = scope.getSurfaceDepth()
if surface is None:
raise Exception("Surface depth has not been set.")
# we want to land 1 mm above sample surface
idleDepth = surface + pip._opts['idleHeight']
# If the tip is below idle depth, bring it up along the axis of the electrode.
pos = pip.globalPosition()
if pos[2] < idleDepth:
pip.advance(idleDepth, speed)
# From here, move directly to idle position
angle = pip.yawRadians()
ds = pip._opts['idleDistance'] # move to 7 mm from center
globalIdlePos = -ds * np.cos(angle), -ds * np.sin(angle), idleDepth
return pip._moveToGlobal(globalIdlePos, speed)
|
1706650
|
from pyrez.models import APIResponse
class Search(APIResponse):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.teamFounder = kwargs.get("Founder", '') or ''
self.teamName = kwargs.get("Name", '') or ''
self.players = kwargs.get("Players", 0) or 0
self.teamTag = kwargs.get("Tag", '') or ''
self.teamId = kwargs.get("TeamId", 0) or 0
|
1706654
|
import numpy as np
# from tsnecuda import TSNE
# from sklearn.manifold import TSNE
from data.IncrementalTSNE import IncrementalTSNE
import fastlapjv
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from fastlapjv import fastlapjv
import math
from time import time
class GridLayout(object):
def __init__(self):
super().__init__()
self.tsner = IncrementalTSNE(n_components=2, init='pca', method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
def fit(self, X: np.ndarray, labels: np.ndarray = None, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None):
"""main fit function
Args:
X (np.ndarray): n * d, n is the number of samples, d is the dimension of a sample
labels (np.ndarray): label of each sample in X
"""
X_embedded = self.tsne(X, constraintX = constraintX, constraintY = constraintY, labels = labels, constraintLabels = constraintLabels, init = init)
grid_ass, grid_size = self.grid(X_embedded)
return X_embedded, grid_ass, grid_size
def tsne(self, X: np.ndarray, labels: np.ndarray = None, perplexity: int = 15, learning_rate: int = 3, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None) -> np.ndarray:
# remove empty labels
labelcnt = 0
removeEmptyTransform = np.zeros((np.max(labels)+1), dtype=int)-1
for label in labels:
if removeEmptyTransform[label]==-1:
removeEmptyTransform[label]=labelcnt
labelcnt += 1
labels = removeEmptyTransform[labels]
constraintLabels = removeEmptyTransform[constraintLabels]
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
if constraintX is None:
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.5, labels=labels, label_alpha=0.9)
else:
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=5, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, constraint_labels = constraintLabels, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.3, labels = labels, label_alpha=0.2)
return X_embedded
def grid(self, X_embedded: np.ndarray):
X_embedded -= X_embedded.min(axis=0)
X_embedded /= X_embedded.max(axis=0)
num = X_embedded.shape[0]
square_len = math.ceil(np.sqrt(num))
N = square_len * square_len
grids = np.dstack(np.meshgrid(np.linspace(0, 1 - 1.0 / square_len, square_len),
np.linspace(0, 1 - 1.0 / square_len, square_len))) \
.reshape(-1, 2)
original_cost_matrix = cdist(grids, X_embedded, "euclidean")
# knn process
dummy_points = np.ones((N - original_cost_matrix.shape[1], 2)) * 0.5
# dummy at [0.5, 0.5]
dummy_vertices = (1 - cdist(grids, dummy_points, "euclidean")) * 100
cost_matrix = np.concatenate((original_cost_matrix, dummy_vertices), axis=1)
row_asses, col_asses, info = fastlapjv(cost_matrix, k_value=50)
col_asses = col_asses[:num]
return col_asses, square_len
if __name__ == "__main__":
X = np.random.rand(500, 128)
labels = np.random.randint(10, size=500)
grid = GridLayout()
grid.fit(X, labels)
|
1706666
|
import pandas as pd
from sklearn.metrics import classification_report
import config
def generate_csv_report(y_true_inv, y_pred_inv, label_encoder, accuracy) -> None:
"""
Print and save classification report for accuracy, precision, recall and f1 score metrics.
:return: None.
"""
# Classification report.
print(classification_report(y_true_inv, y_pred_inv, target_names=label_encoder.classes_))
report_df = pd.DataFrame(classification_report(y_true_inv, y_pred_inv, target_names=label_encoder.classes_,
output_dict=True)).transpose()
# Append accuracy.
report_df.append({'accuracy': accuracy}, ignore_index=True)
# Save report.
report_df.to_csv(
"../output/{}_dataset-{}_mammogramtype-{}_model-{}_lr-{}_b-{}_e1-{}_e2-{}_roi-{}_report.csv".format(
config.run_mode,
config.dataset,
config.mammogram_type,
config.model,
config.learning_rate,
config.batch_size,
config.max_epoch_frozen,
config.max_epoch_unfrozen,
config.is_roi),
index=False,
header=True
)
def generate_csv_metadata(runtime) -> None:
"""
Print and save CLI arguments and training runtime.
:return: None.
"""
metadata_dict = {
'dataset': config.dataset,
'mammogram_type': config.mammogram_type,
'model': config.model,
'run_mode': config.run_mode,
'learning_rate': config.learning_rate,
'batch_size': config.batch_size,
'max_epoch_frozen': config.max_epoch_frozen,
'max_epoch_unfrozen': config.max_epoch_unfrozen,
'is_roi': config.is_roi,
'experiment_name': config.name,
'training runtime (s)': runtime
}
# Convert to dataframe.
metadata_df = pd.DataFrame.from_dict(metadata_dict, orient='index')
# Save report.
metadata_df.to_csv(
"../output/{}_dataset-{}_mammogramtype-{}_model-{}_lr-{}_b-{}_e1-{}_e2-{}_roi-{}_metadata.csv".format(
config.run_mode,
config.dataset,
config.mammogram_type,
config.model,
config.learning_rate,
config.batch_size,
config.max_epoch_frozen,
config.max_epoch_unfrozen,
config.is_roi)
)
|
1706672
|
from parglare import get_collector
def test_collector_can_use_unicode_in_python_2():
action = get_collector()
def f(context, node):
return node
action('f_action')(f)
|
1706673
|
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
def combine_flat_list(_structure, _flat_list, axis=1):
_combined = []
for i in range(len(_flat_list[0])):
t = []
for v in _flat_list:
t.append(v[i])
if len(t[0].get_shape()) == 0:
cc = tf.stack(t, axis)
else:
cc = tf.concat(t, axis)
_combined.append(cc)
return nest.pack_sequence_as(_structure, _combined)
def to_bool(_t):
return tf.cast(_t, tf.bool)
def switch_time_and_batch_dimension(_tensor):
rank = len(_tensor.get_shape())
perm = np.arange(rank)
perm[0], perm[1] = 1, 0
if _tensor.dtype == tf.bool:
_tensor = tf.cast(_tensor, tf.int64)
res = tf.transpose(_tensor, perm, name='switch_time_and_batch_dimension')
if _tensor.dtype == tf.bool:
return tf.cast(res, tf.bool)
return res
def exp_convolve(tensor, decay, initializer=None):
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float64]
if initializer is None:
initializer = tf.zeros_like(tensor)
filtered_tensor = tf.scan(lambda a, x: a * decay + (1-decay) * x, tensor, initializer=initializer)
return filtered_tensor
|
1706683
|
from .Extract import (is_bearish_engulfing, is_bullish_engulfing, is_dark_cloud_cover,
is_doji, is_evening_star, is_falling_three_methods, is_hammer, is_hanging_man,
is_inverse_hammer, is_morning_star, is_piercing_line, is_rising_three_methods,
is_shooting_star, is_spinning_top, is_three_black_crows, is_three_white_soldier,
is_bullish_harami, is_bearish_harami)
from tqdm import tqdm
# hyper parameters
# Trend Parameters
window_size = 20
indicator = 'MA'
# Candle stick pattern parameters
gap_significance_level = 0.05
candle_significance_level = 0.1
doji_length_percentage = 0.01
# star
down_gap_percentage_morning_star = 0.1
up_gap_percentage_evening_star = 0.1
# hammer
percentage_of_shadow_hammer = 0.2
upper_bound_hammer_significance_level = 0.4
lower_bound_hammer_significance_level = 0.1
# hangman
percentage_of_upper_shadow = 0.2
upper_bound_hangman_significance_level = 0.4
lower_bound_hangman_significance_level = 0.1
# spanning top
spanning_top_significance = 0.1
spanning_top_doji_level = 0.3
spanning_top_offset = 0.1
# Harami
harami_gap_significance_level = 0.05
def label_candles(data):
average_range_of_candles_bodies = abs(data.close - data.open).mean()
data['label'] = "None"
data['action'] = "None"
data['%body'] = abs(data.close - data.open) / (data.high - data.low)
data['%upper-shadow'] = (data.high - data[['close', 'open']].max(axis=1)) / (data.high - data.low)
data['%lower-shadow'] = (data[['close', 'open']].min(axis=1) - data.low) / (data.high - data.low)
for i in tqdm(range(len(data))):
data['label'][i] = set()
patterns = {"hammer": [], "inverse hammer": [], "bullish engulfing": [], "piercing line": [],
"morning star": [], "three white soldiers": [], "hanging man": [], "shooting star": [],
"bearish engulfing": [], "evening star": [], "three black crows": [], "dark cloud cover": [],
"doji": [], "spanning top": [], "falling three methods": [], "rising three methods": [],
"bullish harami": [], "bearish harami": []}
find_trend(data, window_size)
for i in tqdm(range(len(data) - 1)):
if is_hammer(data.iloc[i], percentage_of_upper_shadow=percentage_of_shadow_hammer,
upper_bound_hammer_significance_level=upper_bound_hammer_significance_level,
lower_bound_hammer_significance_level=lower_bound_hammer_significance_level):
patterns["hammer"].append(i)
data['label'][i].add("hammer")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if is_inverse_hammer(data.iloc[i], percentage_of_lower_shadow=percentage_of_shadow_hammer,
upper_bound_hammer_significance_level=upper_bound_hammer_significance_level,
lower_bound_hammer_significance_level=lower_bound_hammer_significance_level):
patterns["inverse hammer"].append(i)
data['label'][i].add("inverse hammer")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if (i > 0) and is_bullish_engulfing(data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
candle_significance_level):
patterns["bullish engulfing"].append(i)
data['label'][i].add("bullish engulfing")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if (i > 0) and is_piercing_line(data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
significance_level=candle_significance_level,
gap_significance_level=gap_significance_level):
patterns["piercing line"].append(i)
data['label'][i].add("piercing line")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if (i > 1) and is_morning_star(data.iloc[i - 2], data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
doji_length_percentage=doji_length_percentage,
significance_level=candle_significance_level,
down_percentage=down_gap_percentage_morning_star):
patterns["morning star"].append(i)
data['label'][i].add("morning star")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if (i > 1) and is_three_white_soldier(data.iloc[i - 2], data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
candle_significance_level):
patterns["three white soldiers"].append(i)
data['label'][i].add("three white soldiers")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if (i > 0) and is_bullish_harami(data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
candle_significance_level, harami_gap_significance_level):
patterns["bullish harami"].append(i)
data['label'][i].add("bullish harami")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'up':
data['action'][i + 1] = 'buy'
if is_hanging_man(data.iloc[i], percentage_of_upper_shadow=percentage_of_upper_shadow,
lower_bound_hangman_significance_level=lower_bound_hangman_significance_level,
upper_bound_hangman_significance_level=upper_bound_hangman_significance_level):
patterns["hanging man"].append(i)
data['label'][i].add("hanging man")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if is_shooting_star(data.iloc[i], percentage_of_lower_shadow=percentage_of_upper_shadow,
lower_bound_hangman_significance_level=lower_bound_hangman_significance_level,
upper_bound_hangman_significance_level=upper_bound_hangman_significance_level):
patterns["shooting star"].append(i)
data['label'][i].add("shooting star")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if (i > 0) and is_bearish_engulfing(data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
candle_significance_level):
patterns["bearish engulfing"].append(i)
data['label'][i].add("bearish engulfing")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if (i > 1) and is_evening_star(data.iloc[i - 2], data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
doji_length_percentage=doji_length_percentage,
significance_level=candle_significance_level,
up_percentage=up_gap_percentage_evening_star):
patterns["evening star"].append(i)
data['label'][i].add("evening star")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if (i > 1) and is_three_black_crows(data.iloc[i - 2], data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
significance_level=candle_significance_level):
patterns["three black crows"].append(i)
data['label'][i].add("three black crows")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if (i > 0) and is_dark_cloud_cover(data.iloc[i - 1], data.iloc[i],
average_range_of_candles_bodies,
significance_level=candle_significance_level,
gap_significance_level=gap_significance_level):
patterns["dark cloud cover"].append(i)
data['label'][i].add("dark cloud cover")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if (i > 0) and is_bearish_harami(data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
candle_significance_level, harami_gap_significance_level):
patterns["bearish harami"].append(i)
data['label'][i].add("bearish harami")
trend_history = data['trend'][i]
if trend_history and confirmation_of_the_trend(data, i) == 'down':
data['action'][i + 1] = 'sell'
if is_doji(data.iloc[i], average_range_of_candles_bodies, percentage_length=doji_length_percentage):
patterns["doji"].append(i)
data['label'][i].add("doji")
if is_spinning_top(data.iloc[i], average_range_of_candles_bodies, significance_level=spanning_top_significance,
doji_level=spanning_top_doji_level,
offset=spanning_top_offset):
patterns["spanning top"].append(i)
data['label'][i].add("spanning top")
if (i > 3) and is_falling_three_methods(data.iloc[i - 4], data.iloc[i - 3], data.iloc[i - 2],
data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
significance_level=candle_significance_level):
patterns["falling three methods"].append(i)
data['label'][i].add("falling three methods")
if (i > 3) and is_rising_three_methods(data.iloc[i - 4], data.iloc[i - 3], data.iloc[i - 2],
data.iloc[i - 1], data.iloc[i], average_range_of_candles_bodies,
significance_level=candle_significance_level):
patterns["rising three methods"].append(i)
data['label'][i].add("rising three methods")
for i in range(len(data)):
data['label'][i] = list(data['label'][i])
return patterns
def find_trend(data, window_size=20):
data['MA'] = data.mean_candle.rolling(window_size).mean()
data['trend'] = 0
for index in range(len(data)):
moving_average_history = []
if index >= window_size:
for i in range(index - window_size, index):
moving_average_history.append(data['MA'][i])
difference_moving_average = 0
for i in range(len(moving_average_history) - 1, 0, -1):
difference_moving_average += (moving_average_history[i] - moving_average_history[i - 1])
# trend = 1 means ascending, and trend = 0 means descending
data['trend'][index] = 1 if (difference_moving_average / window_size) > 0 else 0
def confirmation_of_the_trend(data, index):
if index < len(data) - 1:
return 'up' if data.close[index + 1] > data.close[index] else 'down'
|
1706704
|
import numpy as np
import pyfastnoisesimd as fns
# Num workers does not seem to matter.
# It only seems to be an issue if the middle dimension is not divisible
# by the SIMD length?
n = fns.Noise(numWorkers=1)
shape = [27, 127, 1]
for I in range(10):
# print(I)
res = n.genAsGrid(shape)
print('Finished successfully')
|
1706795
|
import json
import numpy as np
from ..utils import *
from .. import logger
class Randomizer:
def __init__(self, randomization_config_fp='default_dr.json', default_config_fp='default.json'):
try:
with open(get_file_path('randomization/config', randomization_config_fp, 'json'), mode='r') as f:
self.randomization_config = json.load(f)
except:
logger.warning("Couldn't find {} in randomization/config subdirectory".format(randomization_config_fp))
self.randomization_config = dict()
with open(get_file_path('randomization/config', default_config_fp, 'json'), mode='r') as f:
self.default_config = json.load(f)
self.keys = set(list(self.randomization_config.keys()) + list(self.default_config.keys()))
def randomize(self):
"""Returns a dictionary of randomized parameters, with key: parameter name and value: randomized
value
"""
randomization_settings = dict()
for k in self.keys:
setting = None
if k in self.randomization_config:
randomization_definition = self.randomization_config[k]
if randomization_definition['type'] == 'int':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.randint(low=low, high=high, size=size)
elif randomization_definition['type'] == 'uniform':
try:
low = randomization_definition['low']
high = randomization_definition['high']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.uniform(low=low, high=high, size=size)
elif randomization_definition['type'] == 'normal':
try:
loc = randomization_definition['loc']
scale = randomization_definition['scale']
size = randomization_definition.get('size', 1)
except:
raise IndexError("Please check your randomization definition for: {}".format(k))
setting = np.random.normal(loc=loc, scale=scale, size=size)
else:
raise NotImplementedError("You've specified an unsupported distribution type")
elif k in self.default_config:
randomization_definition = self.default_config[k]
setting = randomization_definition['default']
randomization_settings[k] = setting
return randomization_settings
|
1706799
|
import numpy as np
from keras.models import load_model
from utils.img_process import process, yolo_img_process
import utils.yolo_util as yolo_util
import tensorflow as tf
import cv2
from PIL import Image
import pickle
from deepgtav.messages import frame2numpy
import gzip
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
IMAGE_H, IMAGE_W = 416, 416
CAP_IMG_W, CAP_IMG_H = 1914, 1051
data_path = "video_drive_file/dataset.pz"
data_path = gzip.open(data_path)
def drive(model, image, speed, warning):
throttle = 0
breakk = 0
roi, radar = process(image)
controls = model.predict([np.array([roi]), np.array([radar]), np.array([speed])], batch_size=1)
controls = controls[0][0]*5/3.14
if warning:
return "--> %lf.2 throttle:%lf brake:%lf" % (controls, False, True)
if speed < 5: # control speed
throttle = 1
elif speed < 20:
throttle = 0.5
elif speed > 25:
throttle = 0.0
breakk = 0.4
if controls > 0:
controls = controls
info = "--> %lf.2 throttle:%lf brake:%lf" % (controls, throttle, breakk)
else:
info = "<-- %lf.2 throttle:%lf brake:%lf" % (controls, throttle, breakk)
print(info)
return info
def main():
# load yolo v3
classes = yolo_util.read_coco_names('./files/coco/coco.names')
num_classes = len(classes)
input_tensor, output_tensors = yolo_util.read_pb_return_tensors(tf.get_default_graph(),
"./files/trained_models/yolov3.pb",
["Placeholder:0", "concat_9:0", "mul_6:0"])
print("load yolo v3 successfully!")
with tf.Session() as sess:
model = load_model("files/trained_models/main_model.h5")
print("load main_model successfully!")
while True:
try:
data_dict = pickle.load(data_path) # 读取数据中的每一帧
speed = data_dict['speed']
frame = data_dict['frame']
frame = frame2numpy(frame,(CAP_IMG_W,CAP_IMG_H))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
except EOFError:
print("===========end=============")
exit(0)
boxes, scores = sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(yolo_img_process(frame), axis=0)})
boxes, scores, labels = yolo_util.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.1)
image, warning = yolo_util.draw_boxes(image, boxes, scores, labels, classes, (IMAGE_H, IMAGE_W), show=False)
info = drive(model=model, image=frame, speed=speed, warning=warning)
result = np.asarray(image)
cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
while True:
cv2.imshow("result", result)
if cv2.waitKey(0) & 0xFF == 32: # 点击空格下一张
break
elif cv2.waitKey(0) & 0xFF == ord('q'): # 点击q退出程序
print("====================done===================")
exit(0)
if __name__ == '__main__':
main()
|
1706811
|
import pytest
from tests.functional.coercers.common import resolve_unwrapped_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={"Query.nonNullIntField": resolve_unwrapped_field},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { nonNullIntField }""",
None,
{
"data": None,
"errors": [
{
"message": "Missing mandatory argument < param > in field < Query.nonNullIntField >.",
"path": ["nonNullIntField"],
"locations": [{"line": 1, "column": 9}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
}
],
},
),
(
"""query { nonNullIntField(param: null) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Int! > must not be null.",
"path": ["nonNullIntField"],
"locations": [{"line": 1, "column": 25}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { nonNullIntField(param: 10) }""",
None,
{"data": {"nonNullIntField": "SUCCESS-13"}},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < Int! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"nonNullIntField": "SUCCESS-23"}},
),
(
"""query ($param: Int! = null) { nonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < null >.",
"path": None,
"locations": [{"line": 1, "column": 23}],
}
],
},
),
(
"""query ($param: Int! = null) { nonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int! = null) { nonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"nonNullIntField": "SUCCESS-23"}},
),
(
"""query ($param: Int! = 30) { nonNullIntField(param: $param) }""",
None,
{"data": {"nonNullIntField": "SUCCESS-33"}},
),
(
"""query ($param: Int! = 30) { nonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int! = 30) { nonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"nonNullIntField": "SUCCESS-23"}},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < Int! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: Int!) { nonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"nonNullIntField": "SUCCESS-23"}},
),
],
)
async def test_coercion_non_null_int_field(engine, query, variables, expected):
assert await engine.execute(query, variables=variables) == expected
|
1706821
|
import tensorflow as tf
from transforms.audio import ops
NAME_TO_FUNC = {
'Identity': tf.identity,
'FreqMask': ops.freq_mask,
'TimeMask': ops.time_mask,
'FreqRescale': ops.freq_rescale,
'TimeRescale': ops.time_rescale,
'FreqWarping': ops.freq_warping,
'TimeWarping': ops.time_warping,
'Dropout': ops.mel_dropout,
'Loudness': ops.mel_loudness,
}
def _ignore_level_to_arg(level):
del level
return ()
def _mask_level_to_arg(level):
# level = [0~1]
# Note factor loop in [0. ~ 0.2]
limit = tf.constant(0.2, tf.float32)
factor = tf.math.mod(level, limit)
factor = tf.cond(tf.equal(factor, 0.), lambda: limit, lambda: factor)
times = tf.cast(tf.math.floordiv(level, limit), tf.int32) + 1
return (
factor,
times,
)
def _rescale_level_to_arg(level):
# level = [0~1]
factor = level * 0.5
return (factor,)
def _warping_level_to_arg(level):
# level = [0~1]
# Note factor loop in [0. ~ 0.2]
factor = tf.math.mod(level, 0.2)
factor = tf.cond(tf.equal(factor, 0.), lambda: 0.2, lambda: factor)
npoints = tf.cast(tf.math.floordiv(level, 0.2), tf.int32) + 1
return (
factor,
npoints,
)
def _dropout_level_to_arg(level):
# level = [0~1]
drop_prob = level * 0.3
return (drop_prob,)
def _loudness_level_to_arg(level):
# level = [0~1]
factor = level * 0.4
return (factor,)
LEVEL_TO_ARG = {
'Identity': _ignore_level_to_arg,
'FreqMask': _mask_level_to_arg,
'TimeMask': _mask_level_to_arg,
'FreqRescale': _rescale_level_to_arg,
'TimeRescale': _rescale_level_to_arg,
'FreqWarping': _warping_level_to_arg,
'TimeWarping': _warping_level_to_arg,
'Dropout': _dropout_level_to_arg,
'Loudness': _loudness_level_to_arg,
}
AUG_OPS = [
'Identity',
'FreqMask',
'TimeMask',
'FreqRescale',
'TimeRescale',
'FreqWarping',
'TimeWarping',
'Dropout',
'Loudness',
]
class RandAugment(object):
"""Random augment with fixed magnitude."""
def __init__(self,
num_layers: int = 2,
prob_to_apply: float = None,
num_levels: int = 10):
"""Initialized rand augment.
Args:
num_layers (int, optional): how many times to do augmentation. Defaults to 2.
prob_to_apply (float, optional): probability to apply on each layer.
If None then always apply. Defaults to None.
num_levels (int, optional): number of levels for quantization of the magnitude. Defaults to 10.
"""
self.num_layers = num_layers
self.prob_to_apply = (
float(prob_to_apply) if prob_to_apply is not None else None)
self.num_levels = int(num_levels) if num_levels else None
def _get_level(self):
level = tf.random.uniform([], 1, self.num_levels + 1, tf.int32)
return (tf.cast(level, tf.float32) / self.num_levels)
def _apply_one_layer(self, data):
"""Applies one level of augmentation to the data."""
level = self._get_level()
branch_fns = []
for augment_op_name in AUG_OPS:
augment_fn = NAME_TO_FUNC[augment_op_name]
level_to_args_fn = LEVEL_TO_ARG[augment_op_name]
def _branch_fn(data=data,
augment_fn=augment_fn,
level_to_args_fn=level_to_args_fn):
args = [data] + list(level_to_args_fn(level))
return augment_fn(*args)
branch_fns.append(_branch_fn)
branch_index = tf.random.uniform(
shape=[], maxval=len(branch_fns), dtype=tf.int32)
aug_data = tf.switch_case(branch_index, branch_fns, default=lambda: data)
if self.prob_to_apply is not None:
return tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) <
self.prob_to_apply, lambda: aug_data, lambda: data)
else:
return aug_data
def __call__(self, data: tf.Tensor, aug_key='data') -> tf.Tensor:
output_dict = {}
org_shape = data.shape
if aug_key is not None:
aug_data = data
for _ in range(self.num_layers):
aug_data = self._apply_one_layer(aug_data)
# NOTE must set shape for while_loop !
aug_data.set_shape(org_shape)
output_dict[aug_key] = aug_data
if aug_key != 'data':
output_dict['data'] = data
return output_dict
|
1706836
|
from distutils.core import setup
import os
from pathlib import Path
from setuptools import find_packages
import versioneer
CODE_DIRECTORY = Path(__file__).parent
def read_file(filename):
"""Source the contents of a file"""
with open(
os.path.join(os.path.dirname(__file__), filename), encoding="utf-8"
) as file:
return file.read()
setup(
name="avionix",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(),
long_description="Coming soon...",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
description="A package for soldifying kubernetes structure and development by "
"using objects and code rather than yaml",
python_requires=">=3.6.1",
install_requires=["pyyaml >=5.4"],
project_urls={
"Source Code": "https://github.com/zbrookle/avionix",
"Documentation": "https://github.com/zbrookle/avionix",
"Bug Tracker": "https://github.com/zbrookle/avionix/issues",
},
url="https://avionix.readthedocs.io/en/latest/index.html",
download_url="https://github.com/zbrookle/avionix/archive/master.zip",
keywords=["kubernetes", "helm", "yaml", "docker", "infrastructure", "devops"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Typing :: Typed",
"Operating System :: OS Independent",
],
long_description_content_type="text/markdown",
include_package_data=True,
package_data={"avionix": ["py.typed"]},
zip_safe=False,
)
|
1706847
|
from mock import Mock
from oauth2 import Provider
from oauth2.compatibility import json
from oauth2.error import OAuthInvalidError, OAuthInvalidNoRedirectError
from oauth2.grant import (AuthorizationCodeGrant, GrantHandler, RefreshToken,
ResourceOwnerGrant)
from oauth2.store import ClientStore
from oauth2.test import unittest
from oauth2.web import (AuthorizationCodeGrantSiteAdapter,
ResourceOwnerGrantSiteAdapter, Response)
from oauth2.web.wsgi import Request
class ProviderTestCase(unittest.TestCase):
def setUp(self):
self.client_store_mock = Mock(spec=ClientStore)
self.token_generator_mock = Mock()
self.response_mock = Mock(spec=Response)
self.response_mock.body = ""
response_class_mock = Mock(return_value=self.response_mock)
self.token_generator_mock.expires_in = {}
self.token_generator_mock.refresh_expires_in = 0
self.auth_server = Provider(access_token_store=Mock(),
auth_code_store=Mock(),
client_store=self.client_store_mock,
token_generator=self.token_generator_mock,
response_class=response_class_mock)
def test_add_grant_set_expire_time(self):
"""
Provider.add_grant() should set the expiration time on the instance of TokenGenerator
"""
self.auth_server.add_grant(
AuthorizationCodeGrant(expires_in=400,
site_adapter=Mock(spec=AuthorizationCodeGrantSiteAdapter))
)
self.auth_server.add_grant(
ResourceOwnerGrant(expires_in=500,
site_adapter=Mock(spec=ResourceOwnerGrantSiteAdapter))
)
self.auth_server.add_grant(RefreshToken(expires_in=1200))
self.assertEqual(self.token_generator_mock.expires_in[AuthorizationCodeGrant.grant_type], 400)
self.assertEqual(self.token_generator_mock.expires_in[ResourceOwnerGrant.grant_type], 500)
self.assertEqual(self.token_generator_mock.refresh_expires_in, 1200)
def test_add_grant_set_unexpired_refresh_time(self):
"""
Provider.add_grant() should set the expiration time on the instance of TokenGenerator
"""
self.auth_server.add_grant(
ResourceOwnerGrant(expires_in=0,
site_adapter=Mock(spec=ResourceOwnerGrantSiteAdapter))
)
self.auth_server.add_grant(RefreshToken(expires_in=0))
self.assertEqual(self.token_generator_mock.expires_in[ResourceOwnerGrant.grant_type], 0)
self.assertEqual(self.token_generator_mock.refresh_expires_in, 0)
def test_dispatch(self):
environ = {"session": "data"}
process_result = "response"
request_mock = Mock(spec=Request)
grant_handler_mock = Mock(spec=["process", "read_validate_params"])
grant_handler_mock.process.return_value = process_result
grant_factory_mock = Mock(return_value=grant_handler_mock)
self.auth_server.site_adapter = Mock(spec=AuthorizationCodeGrantSiteAdapter)
self.auth_server.add_grant(grant_factory_mock)
result = self.auth_server.dispatch(request_mock, environ)
grant_factory_mock.assert_called_with(request_mock, self.auth_server)
grant_handler_mock.read_validate_params.assert_called_with(request_mock)
grant_handler_mock.process.assert_called_with(request_mock, self.response_mock, environ)
self.assertEqual(result, process_result)
def test_dispatch_no_grant_type_found(self):
error_body = {
"error": "unsupported_response_type",
"error_description": "Grant not supported"
}
request_mock = Mock(spec=Request)
result = self.auth_server.dispatch(request_mock, {})
self.response_mock.add_header.assert_called_with("Content-Type", "application/json")
self.assertEqual(self.response_mock.status_code, 400)
self.assertEqual(self.response_mock.body, json.dumps(error_body))
self.assertEqual(result, self.response_mock)
def test_dispatch_no_client_found(self):
error_body = {
"error": "invalid_redirect_uri",
"error_description": "Invalid redirect URI"
}
request_mock = Mock(spec=Request)
grant_handler_mock = Mock(spec=GrantHandler)
grant_handler_mock.process.side_effect = OAuthInvalidNoRedirectError(error="")
grant_factory_mock = Mock(return_value=grant_handler_mock)
self.auth_server.add_grant(grant_factory_mock)
result = self.auth_server.dispatch(request_mock, {})
self.response_mock.add_header.assert_called_with("Content-Type", "application/json")
self.assertEqual(self.response_mock.status_code, 400)
self.assertEqual(self.response_mock.body, json.dumps(error_body))
self.assertEqual(result, self.response_mock)
def test_dispatch_general_exception(self):
request_mock = Mock(spec=Request)
grant_handler_mock = Mock(spec=GrantHandler)
grant_handler_mock.process.side_effect = KeyError
grant_factory_mock = Mock(return_value=grant_handler_mock)
self.auth_server.add_grant(grant_factory_mock)
self.auth_server.dispatch(request_mock, {})
self.assertTrue(grant_handler_mock.handle_error.called)
|
1706875
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import logbook
import roll_history
debug_enabled = False
def debug_print(*args):
if debug_enabled:
for arg in args:
print arg,
return
def handle_sanctuary(to_hit_eo, d20a):
tgt = d20a.target
if tgt == OBJ_HANDLE_NULL or not tgt.is_critter():
return
if d20a.query_can_be_affected_action_perform(tgt):
return
flags = to_hit_eo.attack_packet.get_flags()
if flags & D20CAF_CRITICAL:
flags &= ~D20CAF_CRITICAL
if flags & D20CAF_HIT:
flags &= ~D20CAF_HIT
to_hit_eo.bonus_list.add_zeroed(262) # Action lost due to Sanctuary
to_hit_eo.attack_packet.set_flags(flags)
return
def add_percent_chance_history_stub():
return
def mirror_image_attack_roll(d20a):
performer = d20a.performer
target = d20a.target
# Work out current Dex bonus to AC
mi_ac_evt_obj = tpdp.EventObjAttack()
mi_ac_evt_obj.attack_packet.attacker = performer
mi_ac_evt_obj.attack_packet.target = target
flags = d20a.flags
flags |= D20CAF_TOUCH_ATTACK
mi_ac_evt_obj.attack_packet.set_flags(flags)
mi_ac_evt_obj.attack_packet.action_type = d20a.action_type
mi_ac_evt_obj.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
full_bonus = mi_ac_evt_obj.bonus_list
tgt_ac = full_bonus.get_sum()
dex = target.stat_level_get(stat_dexterity)
dex_mod = - (dex-10)/2
full_bonus.modify(dex_mod, 3, 104)
tgt_ac_mod = full_bonus.get_sum()
dex_ac_bonus = tgt_ac - tgt_ac_mod
size_offset = target.stat_level_get(stat_size) - STAT_SIZE_MEDIUM
size_bonus = 0
if size_offset < 0:
size_offset = - size_offset
size_bonus = 1 << (size_offset-1)
elif size_offset > 0:
size_bonus = (-1) << (size_offset-1)
image_bonus = tpdp.BonusList()
image_bonus.add(10, 1, 102)
image_bonus.add(dex_ac_bonus, 3, 104)
image_bonus.add(size_bonus, 0, 115)
image_ac = image_bonus.get_sum()
#Performer to Hit Bonus
to_hit = tpdp.EventObjAttack()
to_hit.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE)
to_hit_dice = dice_new("1d20")
to_hit_roll = to_hit_dice.roll()
to_hit_bonus = to_hit.bonus_list.get_sum()
roll_id = tpdp.create_history_attack_roll(
performer, target, to_hit_roll, to_hit.bonus_list,
image_bonus, to_hit.attack_packet.get_flags())
result = to_hit_roll - image_ac + to_hit_bonus
d20a.roll_id_0 = roll_id
return result
def hitMirrorImage(d20a, numberOfMirrorImages):
#Check if real target was hit
#A roll of 1 indicates hit on real target
mirrorDice = dice_new("1d{}".format(numberOfMirrorImages+1) )
mirrorRoll = mirrorDice.roll()
if mirrorRoll == 1:
return False
performer = d20a.performer
target = d20a.target
#Get spell_id and spellName
spell_id = target.d20_query_get_data(Q_Critter_Has_Mirror_Image,0)
roll_result = mirror_image_attack_roll(d20a)
if roll_result >= 0:
target.d20_send_signal(S_Spell_Mirror_Image_Struck, spell_id, 0)
target.float_mesfile_line('mes\\combat.mes', 109)
game.create_history_from_pattern(10, performer, target)
return True
def getDefenderConcealment(d20a):
target = d20a.target
defenderConcealment = tpdp.EventObjAttack()
defenderConcealment.attack_packet.set_flags(d20a.flags)
defenderConcealment.attack_packet.target = target
defenderConcealment.attack_packet.attacker = d20a.performer
return defenderConcealment.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetDefenderConcealmentMissChance, EK_NONE)
def getAttackerConcealment(performer):
performerConcealment = tpdp.EventObjObjectBonus()
performerConcealment.dispatch(performer, ET_OnGetAttackerConcealmentMissChance, EK_NONE)
return performerConcealment.bonus_list.get_highest()
def getSuppressConcealment(performer, target):
#suppressingConditions can be easily expanded with new conditions if necessary
suppressingConditions = [tpdp.get_condition_ref("sp-True Strike"), tpdp.get_condition_ref("Weapon Seeking")]
if any(performer.d20_query_with_data(Q_Critter_Has_Condition, conRef, 0) for conRef in suppressingConditions):
return True
elif performer.can_blindsee(target):
return True
elif performer.d20_query("Ignore Concealment"): #Example for Arcane Archer; not implemented in AA
return True
return False
def rollConcealment(concealmentMissChance):
concealmentDice = dice_new("1d100")
concealmentDiceRoll = concealmentDice.roll()
if concealmentDiceRoll > concealmentMissChance:
return True, concealmentDiceRoll
return False, concealmentDiceRoll
def toHitResult(performerToHit, targetAc):
toHitDice = dice_new("1d20")
toHitRoll = toHitDice.roll()
if toHitRoll == 1:
return False, toHitRoll
elif toHitRoll == 20:
return True, toHitRoll
elif toHitRoll + performerToHit >= targetAc:
return True, toHitRoll
return False, toHitRoll
def to_hit_processing(d20a):
performer = d20a.performer #auto performer = d20a.d20APerformer;
d20Data = d20a.data1 #auto d20Data = d20a.data1;
target = d20a.target #auto tgt = d20a.d20ATarget;
if not target:
return
#Mirror Image
numberOfMirrorImages = target.d20_query(Q_Critter_Has_Mirror_Image)
if numberOfMirrorImages:
if hitMirrorImage(d20a, numberOfMirrorImages):
return
#Concealment
debug_print("Concealment")
targetConcealment = getDefenderConcealment(d20a)
performerCanSuppressConcealment = getSuppressConcealment(performer, target)
if performerCanSuppressConcealment:
targetConcealment = 0
concealmentMissChance = max(targetConcealment, getAttackerConcealment(performer))
if concealmentMissChance > 0:
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 194, 193)
d20a.roll_id_1 = roll_id
else: # concealment miss
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 195, 193)
d20a.roll_id_1 = roll_id
# Blind fight - give second chance
if not performer.has_feat(feat_blind_fight):
return
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if not is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 195, 193)
return
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 194, 193)
d20a.roll_id_2 = roll_id
#ToHitBonus Actions
debug_print("To Hit")
to_hit_eo = tpdp.EventObjAttack()
to_hit_eo.attack_packet.set_flags(d20a.flags)
to_hit_eo.attack_packet.target = target
to_hit_eo.attack_packet.action_type = d20a.action_type #dispIoToHitBon.attackPacket.d20ActnType = d20a.action_type
to_hit_eo.attack_packet.attacker = performer
to_hit_eo.attack_packet.event_key = d20Data #dispIoToHitBon.attackPacket.dispKey = d20Data
unarmed = OBJ_HANDLE_NULL
if to_hit_eo.attack_packet.get_flags() & D20CAF_TOUCH_ATTACK:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
elif to_hit_eo.attack_packet.get_flags() & D20CAF_SECONDARY_WEAPON:
offhandItem = performer.item_worn_at(item_wear_weapon_secondary)
if offhandItem == OBJ_HANDLE_NULL or offhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(offhandItem)
else:
mainhandItem = performer.item_worn_at(item_wear_weapon_primary)
if mainhandItem == OBJ_HANDLE_NULL or mainhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(mainhandItem)
to_hit_eo.attack_packet.ammo_item = performer.get_ammo_used()
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_FINAL_ATTACK_ROLL
to_hit_eo.attack_packet.set_flags(flags)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetBucklerAcPenalty , EK_NONE)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE) # // note: the "Global" condition has ToHitBonus2 hook that dispatches the ToHitBonusBase
to_hit_bon_final = to_hit_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnToHitBonusFromDefenderCondition, EK_NONE)
#targetAc Actions
debug_print("Target AC")
target_ac_eo = to_hit_eo.__copy__()
target_ac_eo.bonus_list.reset()
target_ac_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
tgt_ac_final = target_ac_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetAcModifierFromAttacker, EK_NONE)
#Check if attacks hits
attackDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
critAlwaysCheat = cheats.critical #Note: changed behavior from vanilla (this used to toggle the property)
#Check for special hit conditions
if not attackDidHit:
if to_hit_eo.attack_packet.get_flags() & D20CAF_ALWAYS_HIT:
attackDidHit = True
elif critAlwaysCheat:
attackDidHit = True
else:
#Reroll Check
if performer.d20_query(Q_RerollAttack):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
rerollDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_REROLL
to_hit_eo.attack_packet.set_flags(flags)
if not rerollDidHit:
logbook.inc_misses(performer)
else:
attackDidHit = True
if not attackDidHit:
debug_print("Missed")
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
d20a.roll_id_0 = roll_id
return
#We have a hit sir!
debug_print("Scored a hit")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_HIT
to_hit_eo.attack_packet.set_flags(flags)
logbook.inc_hits(performer)
#Check if attack was a critical hit
performerCritRange = to_hit_eo.__copy__()
performerCritRange.bonus_list.reset()
critRange = 21 - performerCritRange.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetCriticalHitRange, EK_NONE)
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
isCritical = False
elif toHitRoll == 20:
isCritical = True
elif toHitRoll >= critRange:
isCritical = True
elif critAlwaysCheat:
isCritical = True
else:
isCritical = False
#Check to Confirm Critical Hit
crit_hit_roll = -1
if isCritical:
debug_print("Confirm critical:")
to_hit_bon_final += to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnConfirmCriticalBonus, EK_NONE)
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#Check for special confirm conditions
if not critConfirmed:
if performer.d20_query("Always Confirm Criticals"):
critConfirmed = True
elif critAlwaysCheat:
critConfirmed = True
else:
if performer.d20_query(Q_RerollCritical):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#no reroll flag seems to be added in original code
if critConfirmed:
debug_print("Crit confirm")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_CRITICAL
to_hit_eo.attack_packet.set_flags(flags)
#Deflect Arrows
#Unsure why it is done after confirm crit,
#If done before, history window for normal attack
#could be done earlier
#dispIoToHitBon.Dispatch(dispIoToHitBon.attackPacket.victim, objHndl::null, dispTypeDeflectArrows, DK_NONE)
#unsure why it is not simply tgt, will copy it
to_hit_eo.dispatch(to_hit_eo.attack_packet.target, OBJ_HANDLE_NULL, ET_OnDeflectArrows, EK_NONE)
handle_sanctuary(to_hit_eo, d20a)
#Set flags
debug_print("Final")
d20a.flags = to_hit_eo.attack_packet.get_flags()
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
d20a.roll_id_0 = roll_id
return
|
1706892
|
import sublime_plugin
from ..libs.global_vars import *
from ..libs import cli, logger
import os
class TypescriptOpenPluginDefaultSettingFile(sublime_plugin.WindowCommand):
def run(self):
default_plugin_setting_path = os.path.join(PLUGIN_DIR, "Preferences.sublime-settings")
sublime.active_window().open_file(default_plugin_setting_path)
class TypescriptOpenTsDefaultSettingFile(sublime_plugin.WindowCommand):
def run(self):
default_ts_setting_path = os.path.join(PLUGIN_DIR, "TypeScript.sublime-settings")
sublime.active_window().open_file(default_ts_setting_path)
class TypescriptOpenTsreactDefaultSettingFile(sublime_plugin.WindowCommand):
def run(self):
default_tsreact_setting_path = os.path.join(PLUGIN_DIR, "TypeScriptReact.sublime-settings")
sublime.active_window().open_file(default_tsreact_setting_path)
|
1706917
|
def test_args(x,y):
"""
Number * Number -> Number
Hyp : empty
return the sum of x and y
"""
return x+y
assert test_args(1,2) == 3
assert test_args(1.5,22,5) == 24
|
1707043
|
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spin_basis_general
from quspin.basis.transformations import square_lattice_trans
from quspin.operators import hamiltonian
import numpy as np
from itertools import product
import os
def test(S,Lx,Ly):
N = Lx*Ly
nmax = int(eval("2*"+S))
sps = nmax+1
tr = square_lattice_trans(Lx,Ly)
basis_dict = {}
Nups=range(nmax*N+1)
for Nup in Nups:
basis_blocks=[]
pcon_basis = spin_basis_general(N,Nup=Nup,S=S)
Ns_block = 0
for blocks in tr.allowed_blocks_spin_inversion_iter(Nup,sps):
basis = spin_basis_general(N,Nup=Nup,S=S,**blocks)
Ns_block += basis.Ns
basis_blocks.append(basis)
try:
assert(Ns_block == pcon_basis.Ns)
except AssertionError:
print(Nup,Ns_block,pcon_basis.Ns)
raise AssertionError("reduced blocks don't sum to particle sector.")
basis_dict[Nup] = (pcon_basis,basis_blocks)
J = [[1.0,i,tr.T_x[i]] for i in range(N)]
J.extend([[1.0,i,tr.T_y[i]] for i in range(N)])
static = [["zz",J],["+-",J],["-+",J]]
E_symm = {}
for Nb,(pcon_basis,basis_blocks) in basis_dict.items():
H_pcon = hamiltonian(static,[],basis=pcon_basis,dtype=np.float64)
if H_pcon.Ns>0:
E_pcon = np.linalg.eigvalsh(H_pcon.todense())
else:
E_pcon = np.array([])
E_block = []
for basis in basis_blocks:
H = hamiltonian(static,[],basis=basis,dtype=np.complex128)
if H.Ns>0:
E_block.append(np.linalg.eigvalsh(H.todense()))
E_block = np.hstack(E_block)
E_block.sort()
np.testing.assert_allclose(E_pcon,E_block,atol=1e-13)
print("passed Nb={} sector".format(Nb))
test("1/2",3,3)
test("1",3,3)
test("1/2",3,2)
test("1",3,2)
|
1707053
|
import torch
import torch.nn as nn
from pytorch_wavelets.dwt.lowlevel import *
def _SFB2D(low, highs, g0_row, g1_row, g0_col, g1_col, mode):
mode = int_to_mode(mode)
lh, hl, hh = torch.unbind(highs, dim=2)
lo = sfb1d(low, lh, g0_col, g1_col, mode=mode, dim=2)
hi = sfb1d(hl, hh, g0_col, g1_col, mode=mode, dim=2)
y = sfb1d(lo, hi, g0_row, g1_row, mode=mode, dim=3)
return y
class DWTInverse(nn.Module):
""" Performs a 2d DWT Inverse reconstruction of an image
Args:
wave (str or pywt.Wavelet): Which wavelet to use
C: deprecated, will be removed in future
"""
def __init__(self, wave='db1', mode='zero', trace_model=False):
super().__init__()
if isinstance(wave, str):
wave = pywt.Wavelet(wave)
if isinstance(wave, pywt.Wavelet):
g0_col, g1_col = wave.rec_lo, wave.rec_hi
g0_row, g1_row = g0_col, g1_col
else:
if len(wave) == 2:
g0_col, g1_col = wave[0], wave[1]
g0_row, g1_row = g0_col, g1_col
elif len(wave) == 4:
g0_col, g1_col = wave[0], wave[1]
g0_row, g1_row = wave[2], wave[3]
# Prepare the filters
filts = prep_filt_sfb2d(g0_col, g1_col, g0_row, g1_row)
self.register_buffer('g0_col', filts[0])
self.register_buffer('g1_col', filts[1])
self.register_buffer('g0_row', filts[2])
self.register_buffer('g1_row', filts[3])
self.mode = mode
self.trace_model = trace_model
def forward(self, coeffs):
"""
Args:
coeffs (yl, yh): tuple of lowpass and bandpass coefficients, where:
yl is a lowpass tensor of shape :math:`(N, C_{in}, H_{in}',
W_{in}')` and yh is a list of bandpass tensors of shape
:math:`list(N, C_{in}, 3, H_{in}'', W_{in}'')`. I.e. should match
the format returned by DWTForward
Returns:
Reconstructed input of shape :math:`(N, C_{in}, H_{in}, W_{in})`
Note:
:math:`H_{in}', W_{in}', H_{in}'', W_{in}''` denote the correctly
downsampled shapes of the DWT pyramid.
Note:
Can have None for any of the highpass scales and will treat the
values as zeros (not in an efficient way though).
"""
yl, yh = coeffs
ll = yl
mode = mode_to_int(self.mode)
# Do a multilevel inverse transform
for h in yh[::-1]:
if h is None:
h = torch.zeros(ll.shape[0], ll.shape[1], 3, ll.shape[-2],
ll.shape[-1], device=ll.device)
# 'Unpad' added dimensions
if ll.shape[-2] > h.shape[-2]:
ll = ll[...,:-1,:]
if ll.shape[-1] > h.shape[-1]:
ll = ll[...,:-1]
if not self.trace_model:
ll = SFB2D.apply(ll, h, self.g0_col, self.g1_col, self.g0_row, self.g1_row, mode)
else:
ll = _SFB2D(ll, h, self.g0_col, self.g1_col, self.g0_row, self.g1_row, mode)
return ll
|
1707055
|
import pytest
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import nics_fix_pt as nfp
# When module_cfg's nf_fix_paramparam is set , it means scale=-1, bitwidth=2, method=FIX_AUTO, see the default config in conftest module_cfg fixture.
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3},
{
"inputs": [1, 1, 0],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": 0,
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [1, 1, 0],
"data": [0.2513, -0.5, 0],
"out_scale": 0.5,
"result": -0.25,
"output": [0.25, -0.5, 0], # quantized parameters, step 0.25
},
),
],
indirect=["module_cfg"],
)
def test_fix_forward_auto(module_cfg, case):
module, cfg, _ = module_cfg
if "data" in case:
module.param[0, :] = torch.tensor(case["data"])
with torch.no_grad():
res = module.forward(torch.tensor(case["inputs"]).float())
assert np.isclose(res, case["result"]) # calc output
assert np.isclose(module.param, case["output"]).all() # quantized parameter
assert cfg["param"]["scale"] == case["out_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 2, 0]],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": [[0], [-0.5]],
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 1, 0]],
"data": [0.2513, -0.52, 0],
"out_scale": 1,
"result": [[0], [0]],
"output": [0.5, -0.5, 0], # quantized parameters, step 0.5
},
),
(
{"input_num": 3},
{
"inputs": [[1, 1, 0], [1, 1, 0]],
"data": [0.2513, -0.5, 0],
"out_scale": 0.5,
"result": [[-0.25], [-0.25]],
"output": [0.25, -0.5, 0], # quantized parameters, step 0.25
},
),
],
indirect=["module_cfg"],
)
def test_fix_forward_parallel_gpu(module_cfg, case):
module, cfg, _ = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
model = nn.DataParallel(module.cuda(), [0, 1])
with torch.no_grad():
res = model(torch.tensor(case["inputs"]).float().cuda())
assert cfg["param"]["scale"] == case["out_scale"] # scale
assert np.isclose(res.cpu(), case["result"]).all() # calc output
# assert np.isclose(module.param.cpu(), case["output"]).all() # quantized parameter
# this will not change,
# but the gradient will still be accumulated in module_parameters[name].grad
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.52, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.5, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_backward_auto(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
res = module.forward(torch.tensor(case["inputs"]).float())
res.backward()
assert np.isclose(
module._parameters["param"].grad, case["output"]
).all() # quantized gradient
assert cfg["param"]["scale"] == case["grad_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "data_cfg": {"method": nfp.FIX_NONE},
"grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [[0.52, -0.27, 0], [0.52, -0.27, 0]],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [[0.5, -0.27, 0], [0.5, -0.27, 0]],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_backward_parallel_gpu(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
model = nn.DataParallel(module.cuda(), [0, 1])
res = torch.sum(model(torch.tensor(case["inputs"]).float().cuda()))
res.backward()
assert np.isclose(
module._parameters["param"].grad.cpu(), 2 * np.array(case["output"])
).all() # quantized gradient, 2 batch, grad x 2
assert cfg["param"]["scale"] == case["grad_scale"] # scale
@pytest.mark.parametrize(
"module_cfg, case",
[
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.52, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 1,
"output": [0.5, -0.5, 0],
},
),
(
{"input_num": 3, "grad_cfg": {"method": nfp.FIX_AUTO}},
{
"inputs": [0.5, -0.27, 0],
"data": [0, 0, 0],
"grad_scale": 0.5,
"output": [0.5, -0.25, 0], # quantized gradients
},
),
],
indirect=["module_cfg"],
)
def test_fix_update_auto(module_cfg, case):
module, _, cfg = module_cfg
if "data" in case:
module.param.data[0, :] = torch.tensor(case["data"])
optimizer = optim.SGD(module.parameters(), lr=1.0, momentum=0)
res = module.forward(torch.tensor(case["inputs"]).float())
res.backward()
optimizer.step()
assert np.isclose(
-module._parameters["param"].detach(), case["output"]
).all() # updated parameter should be - lr * gradient
assert cfg["param"]["scale"] == case["grad_scale"] # scale
def test_ConvBN_fix():
from nics_fix_pt.nn_fix import ConvBN_fix
# float forward and combine forward get the same results
module = ConvBN_fix(3, 32, nf_fix_params={}).cuda()
module.train()
data = torch.tensor(np.random.rand(128, 3, 32, 32).astype(np.float32)).cuda()
comb_out = module(data)
float_out = module.bn(module.conv(data))
assert (float_out - comb_out < 1e-3).all()
module.eval()
module = ConvBN_fix(3, 32, nf_fix_params={}).cuda()
data = torch.tensor(np.random.rand(128, 3, 32, 32).astype(np.float32)).cuda()
comb_out = module(data)
float_out = module.bn(module.conv(data))
assert (float_out - comb_out < 1e-3).all()
|
1707089
|
import tensorflow as tf
import os.path as osp
import os
op_file = 'roi_pooling_op_gpu_cuda8.so' # for CUDA 8
#op_file = 'roi_pooling_op_gpu.so' # CUDA 7.5
filename = osp.join(osp.dirname(__file__), op_file)
_roi_pooling_module = tf.load_op_library(filename)
roi_pool = _roi_pooling_module.roi_pool
roi_pool_grad = _roi_pooling_module.roi_pool_grad
|
1707139
|
import torch, os, numpy as np, copy
import cv2
import glob
from .map import GeometricMap
class preprocess(object):
def __init__(self, data_root, seq_name, parser, log, split='train', phase='training'):
self.parser = parser
self.dataset = parser.dataset
self.data_root = data_root
self.past_frames = parser.past_frames
self.future_frames = parser.future_frames
self.frame_skip = parser.get('frame_skip', 1)
self.min_past_frames = parser.get('min_past_frames', self.past_frames)
self.min_future_frames = parser.get('min_future_frames', self.future_frames)
self.traj_scale = parser.traj_scale
self.past_traj_scale = parser.traj_scale
self.load_map = parser.get('load_map', False)
self.map_version = parser.get('map_version', '0.1')
self.seq_name = seq_name
self.split = split
self.phase = phase
self.log = log
if parser.dataset == 'nuscenes_pred':
label_path = os.path.join(data_root, 'label/{}/{}.txt'.format(split, seq_name))
delimiter = ' '
elif parser.dataset in {'eth', 'hotel', 'univ', 'zara1', 'zara2'}:
label_path = f'{data_root}/{parser.dataset}/{seq_name}.txt'
delimiter = ' '
else:
assert False, 'error'
self.gt = np.genfromtxt(label_path, delimiter=delimiter, dtype=str)
frames = self.gt[:, 0].astype(np.float32).astype(np.int)
fr_start, fr_end = frames.min(), frames.max()
self.init_frame = fr_start
self.num_fr = fr_end + 1 - fr_start
if self.load_map:
self.load_scene_map()
else:
self.geom_scene_map = None
self.class_names = class_names = {'Pedestrian': 1, 'Car': 2, 'Cyclist': 3, 'Truck': 4, 'Van': 5, 'Tram': 6, 'Person': 7, \
'Misc': 8, 'DontCare': 9, 'Traffic_cone': 10, 'Construction_vehicle': 11, 'Barrier': 12, 'Motorcycle': 13, \
'Bicycle': 14, 'Bus': 15, 'Trailer': 16, 'Emergency': 17, 'Construction': 18}
for row_index in range(len(self.gt)):
self.gt[row_index][2] = class_names[self.gt[row_index][2]]
self.gt = self.gt.astype('float32')
self.xind, self.zind = 13, 15
def GetID(self, data):
id = []
for i in range(data.shape[0]):
id.append(data[i, 1].copy())
return id
def TotalFrame(self):
return self.num_fr
def PreData(self, frame):
DataList = []
for i in range(self.past_frames):
if frame - i < self.init_frame:
data = []
data = self.gt[self.gt[:, 0] == (frame - i * self.frame_skip)]
DataList.append(data)
return DataList
def FutureData(self, frame):
DataList = []
for i in range(1, self.future_frames + 1):
data = self.gt[self.gt[:, 0] == (frame + i * self.frame_skip)]
DataList.append(data)
return DataList
def get_valid_id(self, pre_data, fut_data):
cur_id = self.GetID(pre_data[0])
valid_id = []
for idx in cur_id:
exist_pre = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in pre_data[:self.min_past_frames]]
exist_fut = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in fut_data[:self.min_future_frames]]
if np.all(exist_pre) and np.all(exist_fut):
valid_id.append(idx)
return valid_id
def get_pred_mask(self, cur_data, valid_id):
pred_mask = np.zeros(len(valid_id), dtype=np.int)
for i, idx in enumerate(valid_id):
pred_mask[i] = cur_data[cur_data[:, 1] == idx].squeeze()[-1]
return pred_mask
def get_heading(self, cur_data, valid_id):
heading = np.zeros(len(valid_id))
for i, idx in enumerate(valid_id):
heading[i] = cur_data[cur_data[:, 1] == idx].squeeze()[16]
return heading
def load_scene_map(self):
map_file = f'{self.data_root}/map_{self.map_version}/{self.seq_name}.png'
map_vis_file = f'{self.data_root}/map_{self.map_version}/vis_{self.seq_name}.png'
map_meta_file = f'{self.data_root}/map_{self.map_version}/meta_{self.seq_name}.txt'
self.scene_map = np.transpose(cv2.imread(map_file), (2, 0, 1))
self.scene_vis_map = np.transpose(cv2.cvtColor(cv2.imread(map_vis_file), cv2.COLOR_BGR2RGB), (2, 0, 1))
self.meta = np.loadtxt(map_meta_file)
self.map_origin = self.meta[:2]
self.map_scale = scale = self.meta[2]
homography = np.array([[scale, 0., 0.], [0., scale, 0.], [0., 0., scale]])
self.geom_scene_map = GeometricMap(self.scene_map, homography, self.map_origin)
self.scene_vis_map = GeometricMap(self.scene_vis_map, homography, self.map_origin)
def PreMotion(self, DataTuple, valid_id):
motion = []
mask = []
for identity in valid_id:
mask_i = torch.zeros(self.past_frames)
box_3d = torch.zeros([self.past_frames, 2])
for j in range(self.past_frames):
past_data = DataTuple[j] # past_data
if len(past_data) > 0 and identity in past_data[:, 1]:
found_data = past_data[past_data[:, 1] == identity].squeeze()[[self.xind, self.zind]] / self.past_traj_scale
box_3d[self.past_frames-1 - j, :] = torch.from_numpy(found_data).float()
mask_i[self.past_frames-1 - j] = 1.0
elif j > 0:
box_3d[self.past_frames-1 - j, :] = box_3d[self.past_frames - j, :] # if none, copy from previous
else:
raise ValueError('current id missing in the first frame!')
motion.append(box_3d)
mask.append(mask_i)
return motion, mask
def FutureMotion(self, DataTuple, valid_id):
motion = []
mask = []
for identity in valid_id:
mask_i = torch.zeros(self.future_frames)
pos_3d = torch.zeros([self.future_frames, 2])
for j in range(self.future_frames):
fut_data = DataTuple[j] # cur_data
if len(fut_data) > 0 and identity in fut_data[:, 1]:
found_data = fut_data[fut_data[:, 1] == identity].squeeze()[[self.xind, self.zind]] / self.traj_scale
pos_3d[j, :] = torch.from_numpy(found_data).float()
mask_i[j] = 1.0
elif j > 0:
pos_3d[j, :] = pos_3d[j - 1, :] # if none, copy from previous
else:
raise ValueError('current id missing in the first frame!')
motion.append(pos_3d)
mask.append(mask_i)
return motion, mask
def __call__(self, frame):
assert frame - self.init_frame >= 0 and frame - self.init_frame <= self.TotalFrame() - 1, 'frame is %d, total is %d' % (frame, self.TotalFrame())
pre_data = self.PreData(frame)
fut_data = self.FutureData(frame)
valid_id = self.get_valid_id(pre_data, fut_data)
if len(pre_data[0]) == 0 or len(fut_data[0]) == 0 or len(valid_id) == 0:
return None
if self.dataset == 'nuscenes_pred':
pred_mask = self.get_pred_mask(pre_data[0], valid_id)
heading = self.get_heading(pre_data[0], valid_id)
else:
pred_mask = None
heading = None
pre_motion_3D, pre_motion_mask = self.PreMotion(pre_data, valid_id)
fut_motion_3D, fut_motion_mask = self.FutureMotion(fut_data, valid_id)
data = {
'pre_motion_3D': pre_motion_3D,
'fut_motion_3D': fut_motion_3D,
'fut_motion_mask': fut_motion_mask,
'pre_motion_mask': pre_motion_mask,
'pre_data': pre_data,
'fut_data': fut_data,
'heading': heading,
'valid_id': valid_id,
'traj_scale': self.traj_scale,
'pred_mask': pred_mask,
'scene_map': self.geom_scene_map,
'seq': self.seq_name,
'frame': frame
}
return data
|
1707149
|
from typing import List
class Solution:
row_state = [[False for i in range(10)] for _ in range(9)]
column_state = [[False for i in range(10)] for _ in range(9)]
box_state = [[False for i in range(10)] for _ in range(9)]
board = []
def solveSudoku(self, board: List[List[str]]) -> None:
self.row_state = [[False for i in range(10)] for _ in range(9)]
self.column_state = [[False for i in range(10)] for _ in range(9)]
self.box_state = [[False for i in range(10)] for _ in range(9)]
self.board = board
for i in range(9):
for j in range(9):
if self.board[i][j] != ".":
num = int(self.board[i][j])
self.row_state[i][num] = True
self.column_state[j][num] = True
self.box_state[(i // 3) * 3 + j // 3][num] = True
def recursive_place_number(self, row: int, column: int,) -> bool:
if row == -1 and column == -1:
return True
if board[row][column] != ".":
return False
for i in range(1, 10):
if (
self.row_state[row][i]
or self.column_state[column][i]
or self.box_state[(row // 3) * 3 + column // 3][i]
):
continue
else:
self.place_number(row, column, i)
x, y = self.get_max_possible_coordinate()
if recursive_place_number(self, x, y,):
return True
self.undo_number_placement(row, column, i)
return False
x, y = self.get_max_possible_coordinate()
recursive_place_number(self, x, y)
def place_number(self, row: int, column: int, i: int,) -> bool:
self.row_state[row][i] = True
self.column_state[column][i] = True
self.box_state[(row // 3) * 3 + column // 3][i] = True
self.board[row][column] = str(i)
def undo_number_placement(self, row: int, column: int, i: int,) -> bool:
self.row_state[row][i] = False
self.column_state[column][i] = False
self.box_state[(row // 3) * 3 + column // 3][i] = False
self.board[row][column] = "."
def get_max_possible_coordinate(self) -> (int, int):
x, y, min_count = -1, -1, 9
for i in range(9):
for j in range(9):
if self.board[i][j] != ".":
continue
tmp_count = 9
for k in range(9):
if (
self.row_state[i][k]
or self.column_state[j][k]
or self.box_state[(i // 3) * 3 + j // 3][k]
):
tmp_count -= 1
if tmp_count == 1:
return i, j
if min_count > tmp_count:
min_count = tmp_count
x = i
y = j
return x, y
|
1707166
|
import os, json, re
from urllib.request import urlopen
def download(url, typ, rewrite = False):
start = url.index('.com/')
name = 'data/' + typ + '/' + url[start+5:].replace('/', '-')
if not os.path.exists(name) or rewrite:
try:
resource = urlopen(url)
except:
print('Не существует!', url)
return 1 #raise False
out = open(name, 'wb')
out.write(resource.read())
out.close()
return 1
return 0
def attachments(url):
previous = [] #Закачать предыдущее ещё раз, если было аварийно закончено и повреждено
last = 0
for i in os.listdir(url):
if '-' not in i: continue
with open(url+i, 'r') as file:
messages = json.loads(file.read())
for mes in messages:
if 'attachments' in mes:
for attachment in mes['attachments']:
next_ = ()
if attachment['type'] == 'image':
next_ = (attachment['url'], 'images')
elif attachment['type'] == 'document':
next_ = (attachment['url'], 'documents')
elif attachment['type'] == 'voice':
next_ = (attachment['url'], 'voices')
elif attachment['type'] == 'sticker':
next_ = (attachment['url'], 'stickers')
if len(next_):
new_ = download(*next_)
if last > new_:
print(previous)
if len(previous):
download(previous[0], previous[1], True)
previous = next_
last = new_
if __name__ == '__main__':
for i in ('data/dialogs/', 'data/chats/'):
attachments(i)
|
1707192
|
import sys
import os
UTILS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'utils')
sys.path.insert(1, UTILS_DIR)
from training import train, test
if __name__ == '__main__':
"""
This assumes a pretrained actor at the actor-path.
"""
BREAK_EARLY = False
BATCH_SIZE = 500
for data_subdir in ['ml-20m', 'netflix-prize', 'msd']:
actor_path = "WMFVAE_ACTOR_TRAIN_{}".format(data_subdir)
train(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
logging_frequency=50,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
path_to_save_actor=actor_path,
log_critic_training_error=False,
)
print("Now, hopefully on to testing...")
test(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
)
print("On to round 2! Now we'll do the critic.")
train(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
logging_frequency=50,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
restore_trained_actor_path=actor_path,
)
print("Now, hopefully on to testing...")
test(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
restore_trained_actor_path=actor_path,
)
print("Bye bye")
exit()
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=200,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.05,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="FULL_WMFVAE_RUN_JUST_ACTOR",
# path_to_save_actor="200_EPOCHS_WMFVAE_AT_0.05_KL_JUST_ACTOR",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.05,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="FULL_WMFVAE_RUN_WITH_CRITIC",
# # path_to_save_actor="200_EPOCHS_WMFVAE_AT_0.05_KL",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_EPOCHS_WMFVAE_AT_0.05_KL_JUST_ACTOR",
# )
# print("Now that we've done the thing we really care about, let's have some fun with hyperparameters")
# for max_kl in [0.4, 0.2, 0.1, 0.01]:
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=100,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=max_kl,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TESTING_HYPERPARAMETERS",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to the next one...")
# exit()
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=10,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# # max_kl=0.2,
# # ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to round 2! Now we'll do the critic.")
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Bye bye")
# exit()
|
1707197
|
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from httpx._exceptions import HTTPError
from app.core.utils import has_inquest_api_key, has_virustotal_api_key
from app.schemas.eml import Attachment
from app.schemas.submission import SubmissionResult
from app.submitters.inquest import InQuestSubmitter
from app.submitters.virustotal import VirusTotalSubmitter
router = APIRouter()
@router.post(
"/inquest",
response_model=SubmissionResult,
response_description="Return a submission result",
summary="Submit an attachment to InQuest",
description="Submit an attachment to InQuest",
status_code=200,
)
async def submit_to_inquest(attachment: Attachment) -> SubmissionResult:
if not has_inquest_api_key():
return JSONResponse(
status_code=403,
content=jsonable_encoder({"detail": "You don't have the InQuest API key"}),
)
# check ext type
valid_types = ["doc", "docx", "ppt", "pptx", "xls", "xlsx"]
if attachment.extension not in valid_types:
return JSONResponse(
status_code=415,
content=jsonable_encoder(
{"detail": f"{attachment.extension} is not supported."}
),
)
submitter = InQuestSubmitter(attachment)
try:
return await submitter.submit()
except HTTPError as e:
detail = f"Something went wrong with InQuest submission: {str(e)}"
return JSONResponse(
status_code=500, content=jsonable_encoder({"detail": detail})
)
@router.post(
"/virustotal",
response_model=SubmissionResult,
response_description="Return a submission result",
summary="Submit an attachment to VirusTotal",
description="Submit an attachment to VirusTotal",
status_code=200,
)
async def submit_to_virustotal(attachment: Attachment) -> SubmissionResult:
if not has_virustotal_api_key():
return JSONResponse(
status_code=403,
content=jsonable_encoder(
{"detail": "You don't have the VirusTotal API key"}
),
)
submitter = VirusTotalSubmitter(attachment)
try:
return await submitter.submit()
except HTTPError as e:
detail = f"Something went wrong with VirusTotal submission: {str(e)}"
return JSONResponse(
status_code=500, content=jsonable_encoder({"detail": detail})
)
|
1707248
|
import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AEkrayNodeTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
self.buildBody(nodeName)
log.debug("AEkrayNodeTemplate")
def buildKrayTemplates(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
if self.thisNode.type() == "camera":
log.debug("buildKrayTemplates:build camera AE")
self.beginLayout("Kray" ,collapse=1)
self.endLayout()
def buildBody(self, nodeName):
self.buildKrayTemplates(nodeName)
|
1707261
|
import tensorflow as tf
from easy_rec.python.core.metrics import metric_learning_average_precision_at_k
from easy_rec.python.core.metrics import metric_learning_recall_at_k
from easy_rec.python.layers import dnn
from easy_rec.python.layers.common_layers import gelu
from easy_rec.python.layers.common_layers import highway
from easy_rec.python.loss.circle_loss import circle_loss
from easy_rec.python.loss.multi_similarity import ms_loss
from easy_rec.python.model.easy_rec_model import EasyRecModel
from easy_rec.python.protos.loss_pb2 import LossType
from easy_rec.python.utils.proto_util import copy_obj
from easy_rec.python.core.distribute_metrics import distribute_metric_learning_average_precision_at_k # NOQA
from easy_rec.python.core.distribute_metrics import distribute_metric_learning_recall_at_k # NOQA
from easy_rec.python.protos.collaborative_metric_learning_pb2 import CoMetricLearningI2I as MetricLearningI2IConfig # NOQA
if tf.__version__ >= '2.0':
tf = tf.compat.v1
class CoMetricLearningI2I(EasyRecModel):
def __init__(
self,
model_config, # pipeline.model_config
feature_configs, # pipeline.feature_configs
features, # same as model_fn input
labels=None,
is_training=False):
super(CoMetricLearningI2I, self).__init__(model_config, feature_configs,
features, labels, is_training)
model = self._model_config.WhichOneof('model')
assert model == 'metric_learning', 'invalid model config: %s' % model
self._loss_type = self._model_config.loss_type
loss_type_name = LossType.Name(self._loss_type).lower()
self._model_config = self._model_config.metric_learning
assert isinstance(self._model_config, MetricLearningI2IConfig)
model_loss = self._model_config.WhichOneof('loss').lower()
assert model_loss == loss_type_name, 'invalid loss type: %s' % model_loss
if self._loss_type == LossType.CIRCLE_LOSS:
self.loss = self._model_config.circle_loss
elif self._loss_type == LossType.MULTI_SIMILARITY_LOSS:
self.loss = self._model_config.multi_similarity_loss
else:
raise ValueError('unsupported loss type: %s' %
LossType.Name(self._loss_type))
self._highway_features = {}
self._highway_num = len(self._model_config.highway)
for _id in range(self._highway_num):
highway_cfg = self._model_config.highway[_id]
highway_feature, _ = self._input_layer(self._feature_dict,
highway_cfg.input)
self._highway_features[highway_cfg.input] = highway_feature
self.input_features = []
if self._model_config.HasField('input'):
input_feature, _ = self._input_layer(self._feature_dict,
self._model_config.input)
self.input_features.append(input_feature)
self.dnn = copy_obj(self._model_config.dnn)
if self._labels is not None:
if self._model_config.HasField('session_id'):
self.session_ids = self._labels.pop(self._model_config.session_id)
else:
self.session_ids = None
assert len(self._labels) > 0
self.labels = list(self._labels.values())[0]
if self._model_config.HasField('sample_id'):
self.sample_id = self._model_config.sample_id
else:
self.sample_id = None
def build_predict_graph(self):
for _id in range(self._highway_num):
highway_cfg = self._model_config.highway[_id]
highway_fea = tf.layers.batch_normalization(
self._highway_features[highway_cfg.input],
training=self._is_training,
trainable=True,
name='highway_%s_bn' % highway_cfg.input)
highway_fea = highway(
highway_fea,
highway_cfg.emb_size,
activation=gelu,
scope='highway_%s' % _id)
print('highway_fea: ', highway_fea)
self.input_features.append(highway_fea)
feature = tf.concat(self.input_features, axis=1)
num_dnn_layer = len(self.dnn.hidden_units)
last_hidden = self.dnn.hidden_units.pop()
dnn_net = dnn.DNN(self.dnn, self._l2_reg, 'dnn', self._is_training)
net_output = dnn_net(feature)
tower_emb = tf.layers.dense(
inputs=net_output,
units=last_hidden,
kernel_regularizer=self._l2_reg,
name='dnn/dnn_%d' % (num_dnn_layer - 1))
if self._model_config.output_l2_normalized_emb:
norm_emb = tf.nn.l2_normalize(tower_emb, axis=-1)
self._prediction_dict['norm_emb'] = norm_emb
self._prediction_dict['norm_embedding'] = tf.reduce_join(
tf.as_string(norm_emb), axis=-1, separator=',')
self._prediction_dict['float_emb'] = tower_emb
self._prediction_dict['embedding'] = tf.reduce_join(
tf.as_string(tower_emb), axis=-1, separator=',')
if self.sample_id is not None and self.sample_id in self._feature_dict:
self._prediction_dict['sample_id'] = tf.identity(
self._feature_dict[self.sample_id])
return self._prediction_dict
def build_loss_graph(self):
emb = self._prediction_dict['float_emb']
emb_normed = self._model_config.output_l2_normalized_emb
norm_emb = self._prediction_dict['norm_emb'] if emb_normed else emb
if self._loss_type == LossType.CIRCLE_LOSS:
self._loss_dict['circle_loss'] = circle_loss(
norm_emb,
self.labels,
self.session_ids,
self.loss.margin,
self.loss.gamma,
embed_normed=emb_normed)
elif self._loss_type == LossType.MULTI_SIMILARITY_LOSS:
self._loss_dict['ms_loss'] = ms_loss(
norm_emb,
self.labels,
self.session_ids,
self.loss.alpha,
self.loss.beta,
self.loss.lamb,
self.loss.eps,
embed_normed=emb_normed)
else:
raise ValueError('invalid loss type: %s' % LossType.Name(self._loss_type))
return self._loss_dict
def get_outputs(self):
outputs = ['embedding', 'float_emb']
if self.sample_id is not None and 'sample_id' in self._prediction_dict:
outputs.append('sample_id')
if self._model_config.output_l2_normalized_emb:
outputs.append('norm_embedding')
outputs.append('norm_emb')
return outputs
def build_metric_graph(self, eval_config):
metric_dict = {}
recall_at_k = []
precision_at_k = []
for metric in eval_config.metrics_set:
if metric.WhichOneof('metric') == 'recall_at_topk':
recall_at_k.append(metric.recall_at_topk.topk)
elif metric.WhichOneof('metric') == 'precision_at_topk':
precision_at_k.append(metric.precision_at_topk.topk)
emb = self._prediction_dict['float_emb']
if len(recall_at_k) > 0:
metric_dict.update(
metric_learning_recall_at_k(recall_at_k, emb, self.labels,
self.session_ids))
if len(precision_at_k) > 0:
metric_dict.update(
metric_learning_average_precision_at_k(precision_at_k, emb,
self.labels, self.session_ids))
return metric_dict
def build_distribute_metric_graph(self, eval_config):
metric_dict = {}
recall_at_k = []
precision_at_k = []
for metric in eval_config.metrics_set:
if metric.WhichOneof('metric') == 'recall_at_topk':
recall_at_k.append(metric.recall_at_topk.topk)
elif metric.WhichOneof('metric') == 'precision_at_topk':
precision_at_k.append(metric.precision_at_topk.topk)
emb = self._prediction_dict['float_emb']
if len(recall_at_k) > 0:
metric_dict.update(
distribute_metric_learning_recall_at_k(recall_at_k, emb, self.labels,
self.session_ids))
if len(precision_at_k) > 0:
metric_dict.update(
distribute_metric_learning_average_precision_at_k(
precision_at_k, emb, self.labels, self.session_ids))
return metric_dict
|
1707268
|
from decorator import decorate
def _deprecated(func, *args, **kw):
return func(*args, **kw)
def deprecated(func):
return decorate(func, _deprecated)
|
1707273
|
import os
import numpy as np
import cv2
from paddle.fluid.io import Dataset
class LaneDataSet(Dataset):
def __init__(self, dataset_path, data_list='train', transform=None, is_val=False):
self.img = os.listdir(os.path.join(dataset_path, data_list))
self.is_val = is_val
self.is_testing = 'test' in data_list
if not self.is_testing:
self.sky = ['10011130', '10010014', '10024306', '10010116', '10008480', '10016709', '10016688', '10012704',
'10016634', '10010679', '10024403', '10013078', '10010443', '10016355', '10014527', '10020544']
print("{} images loaded.".format(len(self.img)))
self.img_list = [os.path.join(dataset_path, data_list, x) for x in self.img]
if 'train' in data_list:
self.label_list = [x.replace(data_list, 'train_label').replace('jpg', 'png') for x in self.img_list]
self.transform = transform
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
image = cv2.imread(self.img_list[idx])
# im_copy = np.copy(image)
size = image.shape
if not self.is_testing:
label = cv2.imread(self.label_list[idx], cv2.IMREAD_UNCHANGED)
if not self.is_val:
crop_height = int(size[0] * 1 / 3)
if self.img[idx][:8] in self.sky:
# h = np.random.randint(crop_height + 1)
# image = image[h:h + size[0] - crop_height]
# label = label[h:h + size[0] - crop_height]
image = image[:(size[0] - crop_height)]
label = label[:(size[0] - crop_height)]
else:
image = image[crop_height:]
label = label[crop_height:]
if self.transform:
if self.is_testing:
for transform in self.transform:
image = transform(image)
# import matplotlib.pyplot as plt
# image += np.array([103.939, 116.779, 123.68])
# image = image[:, :, ::-1].astype(np.uint8)
# plt.imshow(image)
# plt.show()
return np.transpose(image, (2, 0, 1)).astype('float32'), self.img[idx], size
else:
for transform in self.transform:
image, label = transform((image, label))
# if (label == 17).any() or (label == 16).any() or (label == 9).any() or (label == 10).any():
# import matplotlib.pyplot as plt
# image += np.array([103.939, 116.779, 123.68])
# image = image[:, :, ::-1].astype(np.uint8)
# plt.imshow(im_copy[:, :, ::-1].astype(np.uint8))
# plt.show()
# plt.imshow(image)
# plt.show()
# plt.imshow((label * 10).astype(np.uint8))
# plt.show()
return np.transpose(image, (2, 0, 1)).astype('float32'), label.astype('int64')
def collate_fn(batch):
img = [x[0] for x in batch]
name = np.array([int(x[1].replace('.jpg', '')) for x in batch])
size = np.array([x[2] for x in batch])
img = np.stack(img, axis=0)
return [img, name, size]
|
1707315
|
import numpy as np
import unittest
from algorithm_ncs.benchmark import benchmark_func
from algorithm_ncs.problem import load_problem
def load_test_data(file_path):
with open(file_path, "r") as f:
lines_data = f.readlines()
lines = []
for data in lines_data:
line = []
for d in data.split(" "):
if d != "":
line.append(float(d))
lines.append(line)
x = np.asarray(lines[0:10])
v = np.asarray(lines[10:]).reshape(10)
return x, v
def test_func(problem_index):
file_path = "datasets_ncs/test_data_func{}.txt".format(problem_index)
x, v = load_test_data(file_path)
para = load_problem(problem_index, 50)
res = benchmark_func(x, problem_index, para)
return v, res
class FuncTest(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_fun6(self):
v, res = test_func(6)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun12(self):
v, res = test_func(12)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun9(self):
v, res = test_func(9)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
def test_fun10(self):
v, res = test_func(10)
for i in range(len(res)):
self.assertTrue(abs((v[i]-res[i])/v[i]) < 0.001)
if __name__ == '__main__':
unittest.main()
|
1707335
|
import os
import sys
import random
import argparse
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from utils.utilities import (mkdir, write_lst)
random.seed(1234)
instr_tags = "vn,vc,va,fl,cl,sax,tpt,tbn,bn,hn,tba,db,ob"
instrs = "Violin,Cello,Viola,Flute,Clarinet,Saxophone,Trumpet,Trombone,Bassoon,Horn,Tuba,Double_Bass,Oboe"
tag2instr = {}
seen = "Violin,Cello,Viola,Flute,Clarinet,Saxophone,Trumpet,Trombone"
unseen = "Horn,Tuba,Double_Bass,Bassoon,Oboe"
skips = ""
instr_tags = instr_tags.split(',')
instrs = instrs.split(',')
seen = seen.split(',')
unseen = unseen.split(',')
skips = skips.split(',')
for i, tag in enumerate(instr_tags):
tag2instr[tag] = instrs[i]
def get_all_audios(folder):
audios = {}
tracks_num = 0
sample_folders = os.listdir(folder)
for sample in sample_folders:
sample_path = os.path.join(folder, sample)
tracks = os.listdir(sample_path)
if len(sample.split('_')) < 2:
continue
sampleName = sample.split('_')[1]
sample_instrs = sample.split('_')[2:]
if sampleName not in audios:
audios[sampleName] = {}
for track in tracks:
if not str.endswith(track, "ref.txt"):
continue
track = str.replace(track, "_ref.txt", ".h5")
#track = str.replace(track, "_TRAIN.h5", "_TEST.h5")
track_path = os.path.join(sample_path, track)
track_name = track.split("_")[1]
instr = tag2instr[track.split("_")[2]]
if instr not in audios[sampleName]:
audios[sampleName][instr] = {}
if track_name not in audios[sampleName][instr]:
tracks_num += 1
audios[sampleName][instr][track_name] = track_path
seen_audios = []
unseen_audios = []
for songName in audios:
for instr in audios[songName]:
if instr in seen:
seen_audios.append(songName)
else:
unseen_audios.append(songName)
train_lst = {}
test_lst = {}
for songName in audios:
if songName in unseen_audios:
instrs = {}
instrs_num = 0
for instr in audios[songName]:
if instr not in instrs:
instrs[instr] = []
for track in audios[songName][instr]:
instrs[instr].append(audios[songName][instr][track])
instrs_num += len(instrs[instr])
instrs = sorted(instrs.items(), key=lambda d: -len(d[1]))
show = [{instr[0]:len(instr[1])} for instr in instrs]
print(show)
data_lst = []
for instr in instrs:
if len(instr[1]) > instrs_num // 2:
print("aaaaaaaaaaaaaaaaaaaaaaaah")
for track in instr[1]:
data_lst.append([instr[0], track])
total = len(data_lst)
pairs = []
for i, track in enumerate(data_lst):
j = total - 1- i
if j == i:
j = 0
pairs.append([track[0], data_lst[j][0], track[1],data_lst[j][1]])
if i + 1 >= (total + 1)// 2:
break
test_lst[songName] = {"test" : pairs, "query" : []}
else:
for instr in audios[songName]:
if instr not in train_lst:
train_lst[instr] = []
for track in audios[songName][instr]:
train_lst[instr].append(str.replace(audios[songName][instr][track], "_TEST.h5", "h5"))
print("\nseen:\n")
compute_instr_samples(audios, songNames=None, skipNames=unseen_audios)
print("\nunseen:\n")
compute_instr_samples(audios, songNames=unseen_audios)
print("\nall:\n")
compute_instr_samples(audios)
query_lst = []
songs_lst = []
songs_num = len(test_lst)
for test in test_lst:
songs_lst.append(test)
for i, test in enumerate(test_lst):
for pair in test_lst[test]["test"]:
query = []
query += pair[:2]
for j in range(2):
path = None
while path is None:
song_id = random.randint(0, songs_num - 1)
if song_id == i:
continue
query_pairs = test_lst[songs_lst[song_id]]["test"]
for query_pair in query_pairs:
for k in range(2):
if query_pair[k] == pair[j] and not query_pair[k + 2] == pair[j + 2]:
path = query_pair[k + 2]
query.append(path)
break
if path is not None:
break
test_lst[test]["query"] += [query]
return audios, train_lst, test_lst
def compute_instr_samples(audios, songNames=None, skipNames=None):
samples = {}
num = 0
for songName in audios:
if songNames is not None and songName not in songNames:
continue
if skipNames is not None and songName in skipNames:
continue
for instr in audios[songName]:
if instr not in samples:
samples[instr] = 0
num += len(audios[songName][instr])
samples[instr] += len(audios[songName][instr])
total_num = 0
for instr in samples:
total_num += samples[instr]
print(instr, samples[instr])
print(total_num, num)
return samples
def save_train_lst(data, output_folder):
for instr in data:
instr_folder = os.path.join(output_folder, instr)
mkdir(instr_folder)
path = os.path.join(instr_folder, "train.lst")
write_lst(path, data[instr])
def save_test_lst(data, output_folder):
testset_folder = os.path.join(output_folder, "testset")
mkdir(testset_folder)
test_lst = []
query_lst = []
for songName in data:
test_lst += data[songName]["test"]
query_lst += data[songName]["query"]
test_lst = [f"{t[0]},{t[1]}\t{t[2]},{t[3]}" for t in test_lst]
query_lst = [f"{t[0]},{t[1]}\t{t[2]},{t[3]}" for t in query_lst]
print("test set", len(test_lst))
test_lst_path = os.path.join(testset_folder, "test.lst")
query_lst_path = os.path.join(testset_folder, "query.lst")
write_lst(test_lst_path, test_lst)
write_lst(query_lst_path, query_lst)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--feature_dir', type=str, required=True, help='Directory of generated dataset.')
parser.add_argument('--data_dir', type=str, required=True, help='Directory to store generated files.')
args = parser.parse_args()
folder = args.feature_dir
output_folder = args.data_dir
audios, train_lst, test_lst = get_all_audios(folder)
save_train_lst(train_lst, output_folder)
save_test_lst(test_lst, output_folder)
instr_samples = compute_instr_samples(audios)
|
1707366
|
from unittest import TestCase
import re
import triegex
class TriegexTest(TestCase):
def findall(self, triegex, string):
return re.findall(triegex.to_regex(), string)
def test_basic(self):
t = triegex.Triegex('Jon')
self.assertListEqual(self.findall(t, '<NAME>'), ['Jon'])
def test_empty_triegex_matches_nothing(self):
t = triegex.Triegex()
self.assertListEqual(self.findall(t, 'foo'), [], 'Should match nothing: {}'.format(t.to_regex()))
def test_multiple_words(self):
t = triegex.Triegex('Jon', 'Tyrion', 'Sam', 'Bran')
self.assertListEqual(self.findall(t, 'Jon & Sam'), ['Jon', 'Sam'])
def test_word_boundary_is_handled(self):
t = triegex.Triegex('Sam')
self.assertListEqual([], self.findall(t, 'Samwell'))
self.assertListEqual(['Sam'], self.findall(t, 'Sam` Tarly'))
def test_optimized(self):
t = triegex.Triegex('Jon', 'Jorah')
self.assertEqual(r'(?:Jo(?:n\b|rah\b)|~^(?#match nothing))', t.to_regex())
class TriegexMutableSetInterfaceTest(TestCase):
def test_iter(self):
self.assertListEqual(list(triegex.Triegex('foo')), ['foo'])
def test_contains(self):
self.assertIn('Jaime', triegex.Triegex('Jaime', 'Lannister'))
self.assertNotIn('Stannis', triegex.Triegex('Kings Landing'))
def test_len(self):
t = triegex.Triegex()
self.assertEqual(len(t), 0)
t.add('Sansa')
self.assertEqual(len(t), 1)
def test_discart(self):
t = triegex.Triegex()
t.add('Hound')
self.assertIn('Hound', t)
t.discard('Hound')
self.assertNotIn('Hound', t)
|
1707415
|
import functools
import os
import pickle
import torch
from torch import distributed as dist
def is_main_process():
rank, _ = get_dist_info()
return rank == 0
def get_dist_info():
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def init_dist(backend='nccl', **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_main_process():
return func(*args, **kwargs)
return wrapper
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
if world_size == 1:
return result_part
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
|
1707422
|
const_T = Hyper()
const_M = Hyper()
@Runtime([const_M, const_M, const_M], const_M)
def Update(prev, cur, offset):
return (prev + cur + offset) % 2
offset = Param(const_M)
do_anything = Param(2)
initial_tape = Input(const_M)[2]
tape = Var(const_M)[const_T]
for t in range(2):
tape[t].set_to(initial_tape[t])
for t in range(2, const_T):
if do_anything == 1:
tape[t].set_to(Update(tape[t - 2], tape[t - 1], offset))
elif do_anything == 0:
tape[t].set_to(tape[t - 1])
final_tape = Output(const_M)
final_tape.set_to(tape[const_T - 1])
|
1707423
|
import itertools
import vim
from pathfinder.debytes import debytes
last_output = None
def strtrans(string):
"""Convert special characters like '' to '^D'."""
escaped_string = string.replace("'", "\\'").replace("\\", "\\\\")
return vim.eval(f"strtrans('{escaped_string}')")
def get_count(motion, count):
"""Build a string like 'k', 'hh', '15w'"""
motion_str = strtrans(motion.motion + (motion.argument or ""))
if count == 1:
return motion_str
elif count == 2 and len(motion_str) == 1:
# It's easier to press a single-character motion twice
# than to type a 2 before it
return (motion_str) * 2
return str(count) + motion_str
def compact_motions(motions):
"""
Return the given motion sequence in single-line form.
e.g. 2* 5j $
"""
return " ".join(
[
get_count(motion, len(list(group)))
for motion, group in itertools.groupby(motions)
]
)
def get_description(motion, repetitions):
description = debytes(vim.vars["pf_descriptions"][motion.motion])
description = description.replace("{count}", str(repetitions))
if motion.argument is not None:
description = description.replace("{argument}", motion.argument)
return description
def explained_motions(motions):
"""
Yield each motion in the form "motion <padding> help"
e.g. ['5j Down 5 lines', '$ To the end of the line']
"""
for motion, group in itertools.groupby(motions):
repetitions = len(list(group))
yield (
get_count(motion, repetitions) + " " + get_description(motion, repetitions)
)
|
1707468
|
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
import hashlib
# travis encrypt PASSWORD=password -a -x
sha = SHA256.new()
sha.update(raw_input('Password:'))
key = sha.hexdigest()[:AES.block_size*2]
text = open('emails.txt', 'rb').read()
iv = text[:AES.block_size]
cipher = text[AES.block_size:]
aes = AES.new(key, AES.MODE_CBC, iv)
plain = aes.decrypt(cipher).strip()
print 'Decrypted: "' + plain + '"'
plain += ','+raw_input('Append data:')
plain += ' '*(AES.block_size - len(plain)%AES.block_size)
iv = Random.new().read(AES.block_size)
aes = AES.new(key, AES.MODE_CBC, iv)
cipher = aes.encrypt(plain)
open('emails.txt', 'wb').write(iv+cipher)
|
1707489
|
params = dict()
params['num_classes'] = 101
params['dataset'] = '/home/Dataset/UCF-101-origin'
#params['dataset'] = '/home/Dataset/hmdb'
params['epoch_num'] = 150#600
params['batch_size'] = 8
params['step'] = 10
params['num_workers'] = 4
params['learning_rate'] = 0.001
params['momentum'] = 0.9
params['weight_decay'] = 0.0005
params['display'] = 100#10
params['pretrained'] = None
params['gpu'] = [0]
params['log'] = 'log'
#params['save_path'] = 'UCF101'
params['save_path_base']='/home/Workspace/PRP/outputs/'
params['data']='UCF-101'
|
1707541
|
import requests
APEX_VALUES = ['172.16.31.10']
CNAME_VALUE = ["domains.tumblr.com"]
RESPONSE_FINGERPRINT = "Whatever you were looking for doesn't currently exist at this address."
def detector(domain, ip, cname):
if APEX_VALUES:
if ip in APEX_VALUES:
return True
if filter(lambda x: x in cname, CNAME_VALUE):
return True
try:
if RESPONSE_FINGERPRINT in requests.get('http://%s' % domain).text:
return True
except Exception as e:
pass
return False
|
1707558
|
import pytest
from tasktiger import Task, TaskNotFound
from .tasks import simple_task
from .utils import get_tiger
class TestTaskFromId:
@pytest.fixture
def tiger(self):
return get_tiger()
@pytest.fixture
def queued_task(self, tiger):
return tiger.delay(simple_task)
def test_task_found(self, tiger, queued_task):
task = Task.from_id(tiger, "default", "queued", queued_task.id)
assert queued_task.id == task.id
def test_task_wrong_state(self, tiger, queued_task):
with pytest.raises(TaskNotFound):
Task.from_id(tiger, "default", "active", queued_task.id)
def test_task_wrong_queue(self, tiger, queued_task):
with pytest.raises(TaskNotFound):
Task.from_id(tiger, "other", "active", queued_task.id)
|
1707569
|
import numpy
from PIL import Image
pascal_colormap = [
0 , 0, 0,
0.5020, 0, 0,
0, 0.5020, 0,
0.5020, 0.5020, 0,
0, 0, 0.5020,
0.5020, 0, 0.5020,
0, 0.5020, 0.5020,
0.5020, 0.5020, 0.5020,
0.2510, 0, 0,
0.7529, 0, 0,
0.2510, 0.5020, 0,
0.7529, 0.5020, 0,
0.2510, 0, 0.5020,
0.7529, 0, 0.5020,
0.2510, 0.5020, 0.5020,
0.7529, 0.5020, 0.5020,
0, 0.2510, 0,
0.5020, 0.2510, 0,
0, 0.7529, 0,
0.5020, 0.7529, 0,
0, 0.2510, 0.5020,
0.5020, 0.2510, 0.5020,
0, 0.7529, 0.5020,
0.5020, 0.7529, 0.5020,
0.2510, 0.2510, 0,
0.7529, 0.2510, 0,
0.2510, 0.7529, 0,
0.7529, 0.7529, 0,
0.2510, 0.2510, 0.5020,
0.7529, 0.2510, 0.5020,
0.2510, 0.7529, 0.5020,
0.7529, 0.7529, 0.5020,
0, 0, 0.2510,
0.5020, 0, 0.2510,
0, 0.5020, 0.2510,
0.5020, 0.5020, 0.2510,
0, 0, 0.7529,
0.5020, 0, 0.7529,
0, 0.5020, 0.7529,
0.5020, 0.5020, 0.7529,
0.2510, 0, 0.2510,
0.7529, 0, 0.2510,
0.2510, 0.5020, 0.2510,
0.7529, 0.5020, 0.2510,
0.2510, 0, 0.7529,
0.7529, 0, 0.7529,
0.2510, 0.5020, 0.7529,
0.7529, 0.5020, 0.7529,
0, 0.2510, 0.2510,
0.5020, 0.2510, 0.2510,
0, 0.7529, 0.2510,
0.5020, 0.7529, 0.2510,
0, 0.2510, 0.7529,
0.5020, 0.2510, 0.7529,
0, 0.7529, 0.7529,
0.5020, 0.7529, 0.7529,
0.2510, 0.2510, 0.2510,
0.7529, 0.2510, 0.2510,
0.2510, 0.7529, 0.2510,
0.7529, 0.7529, 0.2510,
0.2510, 0.2510, 0.7529,
0.7529, 0.2510, 0.7529,
0.2510, 0.7529, 0.7529,
0.7529, 0.7529, 0.7529,
0.1255, 0, 0,
0.6275, 0, 0,
0.1255, 0.5020, 0,
0.6275, 0.5020, 0,
0.1255, 0, 0.5020,
0.6275, 0, 0.5020,
0.1255, 0.5020, 0.5020,
0.6275, 0.5020, 0.5020,
0.3765, 0, 0,
0.8784, 0, 0,
0.3765, 0.5020, 0,
0.8784, 0.5020, 0,
0.3765, 0, 0.5020,
0.8784, 0, 0.5020,
0.3765, 0.5020, 0.5020,
0.8784, 0.5020, 0.5020,
0.1255, 0.2510, 0,
0.6275, 0.2510, 0,
0.1255, 0.7529, 0,
0.6275, 0.7529, 0,
0.1255, 0.2510, 0.5020,
0.6275, 0.2510, 0.5020,
0.1255, 0.7529, 0.5020,
0.6275, 0.7529, 0.5020,
0.3765, 0.2510, 0,
0.8784, 0.2510, 0,
0.3765, 0.7529, 0,
0.8784, 0.7529, 0,
0.3765, 0.2510, 0.5020,
0.8784, 0.2510, 0.5020,
0.3765, 0.7529, 0.5020,
0.8784, 0.7529, 0.5020,
0.1255, 0, 0.2510,
0.6275, 0, 0.2510,
0.1255, 0.5020, 0.2510,
0.6275, 0.5020, 0.2510,
0.1255, 0, 0.7529,
0.6275, 0, 0.7529,
0.1255, 0.5020, 0.7529,
0.6275, 0.5020, 0.7529,
0.3765, 0, 0.2510,
0.8784, 0, 0.2510,
0.3765, 0.5020, 0.2510,
0.8784, 0.5020, 0.2510,
0.3765, 0, 0.7529,
0.8784, 0, 0.7529,
0.3765, 0.5020, 0.7529,
0.8784, 0.5020, 0.7529,
0.1255, 0.2510, 0.2510,
0.6275, 0.2510, 0.2510,
0.1255, 0.7529, 0.2510,
0.6275, 0.7529, 0.2510,
0.1255, 0.2510, 0.7529,
0.6275, 0.2510, 0.7529,
0.1255, 0.7529, 0.7529,
0.6275, 0.7529, 0.7529,
0.3765, 0.2510, 0.2510,
0.8784, 0.2510, 0.2510,
0.3765, 0.7529, 0.2510,
0.8784, 0.7529, 0.2510,
0.3765, 0.2510, 0.7529,
0.8784, 0.2510, 0.7529,
0.3765, 0.7529, 0.7529,
0.8784, 0.7529, 0.7529,
0, 0.1255, 0,
0.5020, 0.1255, 0,
0, 0.6275, 0,
0.5020, 0.6275, 0,
0, 0.1255, 0.5020,
0.5020, 0.1255, 0.5020,
0, 0.6275, 0.5020,
0.5020, 0.6275, 0.5020,
0.2510, 0.1255, 0,
0.7529, 0.1255, 0,
0.2510, 0.6275, 0,
0.7529, 0.6275, 0,
0.2510, 0.1255, 0.5020,
0.7529, 0.1255, 0.5020,
0.2510, 0.6275, 0.5020,
0.7529, 0.6275, 0.5020,
0, 0.3765, 0,
0.5020, 0.3765, 0,
0, 0.8784, 0,
0.5020, 0.8784, 0,
0, 0.3765, 0.5020,
0.5020, 0.3765, 0.5020,
0, 0.8784, 0.5020,
0.5020, 0.8784, 0.5020,
0.2510, 0.3765, 0,
0.7529, 0.3765, 0,
0.2510, 0.8784, 0,
0.7529, 0.8784, 0,
0.2510, 0.3765, 0.5020,
0.7529, 0.3765, 0.5020,
0.2510, 0.8784, 0.5020,
0.7529, 0.8784, 0.5020,
0, 0.1255, 0.2510,
0.5020, 0.1255, 0.2510,
0, 0.6275, 0.2510,
0.5020, 0.6275, 0.2510,
0, 0.1255, 0.7529,
0.5020, 0.1255, 0.7529,
0, 0.6275, 0.7529,
0.5020, 0.6275, 0.7529,
0.2510, 0.1255, 0.2510,
0.7529, 0.1255, 0.2510,
0.2510, 0.6275, 0.2510,
0.7529, 0.6275, 0.2510,
0.2510, 0.1255, 0.7529,
0.7529, 0.1255, 0.7529,
0.2510, 0.6275, 0.7529,
0.7529, 0.6275, 0.7529,
0, 0.3765, 0.2510,
0.5020, 0.3765, 0.2510,
0, 0.8784, 0.2510,
0.5020, 0.8784, 0.2510,
0, 0.3765, 0.7529,
0.5020, 0.3765, 0.7529,
0, 0.8784, 0.7529,
0.5020, 0.8784, 0.7529,
0.2510, 0.3765, 0.2510,
0.7529, 0.3765, 0.2510,
0.2510, 0.8784, 0.2510,
0.7529, 0.8784, 0.2510,
0.2510, 0.3765, 0.7529,
0.7529, 0.3765, 0.7529,
0.2510, 0.8784, 0.7529,
0.7529, 0.8784, 0.7529,
0.1255, 0.1255, 0,
0.6275, 0.1255, 0,
0.1255, 0.6275, 0,
0.6275, 0.6275, 0,
0.1255, 0.1255, 0.5020,
0.6275, 0.1255, 0.5020,
0.1255, 0.6275, 0.5020,
0.6275, 0.6275, 0.5020,
0.3765, 0.1255, 0,
0.8784, 0.1255, 0,
0.3765, 0.6275, 0,
0.8784, 0.6275, 0,
0.3765, 0.1255, 0.5020,
0.8784, 0.1255, 0.5020,
0.3765, 0.6275, 0.5020,
0.8784, 0.6275, 0.5020,
0.1255, 0.3765, 0,
0.6275, 0.3765, 0,
0.1255, 0.8784, 0,
0.6275, 0.8784, 0,
0.1255, 0.3765, 0.5020,
0.6275, 0.3765, 0.5020,
0.1255, 0.8784, 0.5020,
0.6275, 0.8784, 0.5020,
0.3765, 0.3765, 0,
0.8784, 0.3765, 0,
0.3765, 0.8784, 0,
0.8784, 0.8784, 0,
0.3765, 0.3765, 0.5020,
0.8784, 0.3765, 0.5020,
0.3765, 0.8784, 0.5020,
0.8784, 0.8784, 0.5020,
0.1255, 0.1255, 0.2510,
0.6275, 0.1255, 0.2510,
0.1255, 0.6275, 0.2510,
0.6275, 0.6275, 0.2510,
0.1255, 0.1255, 0.7529,
0.6275, 0.1255, 0.7529,
0.1255, 0.6275, 0.7529,
0.6275, 0.6275, 0.7529,
0.3765, 0.1255, 0.2510,
0.8784, 0.1255, 0.2510,
0.3765, 0.6275, 0.2510,
0.8784, 0.6275, 0.2510,
0.3765, 0.1255, 0.7529,
0.8784, 0.1255, 0.7529,
0.3765, 0.6275, 0.7529,
0.8784, 0.6275, 0.7529,
0.1255, 0.3765, 0.2510,
0.6275, 0.3765, 0.2510,
0.1255, 0.8784, 0.2510,
0.6275, 0.8784, 0.2510,
0.1255, 0.3765, 0.7529,
0.6275, 0.3765, 0.7529,
0.1255, 0.8784, 0.7529,
0.6275, 0.8784, 0.7529,
0.3765, 0.3765, 0.2510,
0.8784, 0.3765, 0.2510,
0.3765, 0.8784, 0.2510,
0.8784, 0.8784, 0.2510,
0.3765, 0.3765, 0.7529,
0.8784, 0.3765, 0.7529,
0.3765, 0.8784, 0.7529,
0.8784, 0.8784, 0.7529]
def save_with_pascal_colormap(filename, arr):
colmap = (numpy.array(pascal_colormap) * 255).round().astype("uint8")
palimage = Image.new('P', (16, 16))
palimage.putpalette(colmap)
im = Image.fromarray(numpy.squeeze(arr.astype("uint8")))
im2 = im.quantize(palette=palimage)
im2.save(filename)
|
1707592
|
import unittest
import numpy as np
import SimpleITK as sitk
import pymia.filtering.filter as pymia_fltr
import pymia.filtering.preprocessing as pymia_fltr_prep
class TestNormalizeZScore(unittest.TestCase):
def setUp(self):
# set up image
image = sitk.Image((4, 1), sitk.sitkUInt8)
image.SetPixel((0, 0), 1)
image.SetPixel((1, 0), 2)
image.SetPixel((2, 0), 3)
image.SetPixel((3, 0), 4)
self.image = image
# test_case = [1, 2, 3, 4]
# not in R, so tested by using:
# (test_case[i] - mean(test_case, axis=0)) / sqrt(var(test_case) * 3/4)
self.desired = np.array([[-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]], np.float64)
def test_normalization(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_normalization_with_param(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image, pymia_fltr.FilterParams())
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_image_properties(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
self.assertEqual(self.image.GetSize(), out.GetSize())
self.assertEqual(self.image.GetOrigin(), out.GetOrigin())
self.assertEqual(self.image.GetSpacing(), out.GetSpacing())
self.assertEqual(self.image.GetDirection(), out.GetDirection())
self.assertEqual(self.image.GetDimension(), out.GetDimension())
self.assertEqual(self.image.GetNumberOfComponentsPerPixel(), out.GetNumberOfComponentsPerPixel())
self.assertEqual(sitk.sitkFloat64, out.GetPixelID())
|
1707593
|
from DRecPy.Evaluation.Processes import predictive_evaluation
import pytest
import pandas as pd
from DRecPy.Dataset import InteractionDataset
from DRecPy.Recommender.Baseline import UserKNN
from DRecPy.Evaluation.Metrics import RMSE
from DRecPy.Evaluation.Metrics import MSE
@pytest.fixture(scope='module')
def train_interactions_ds():
df = pd.DataFrame([
[1, 2, 3],
[1, 4, 5],
[1, 5, 2],
[2, 2, 5],
[2, 3, 2],
[3, 2, 2],
[3, 5, 5],
[3, 1, 1],
], columns=['user', 'item', 'interaction'])
return InteractionDataset.read_df(df)
@pytest.fixture(scope='module')
def test_interactions_ds():
df = pd.DataFrame([
[1, 1, 2],
[2, 4, 5],
[3, 3, 3],
[3, 6, 1],
], columns=['user', 'item', 'interaction'])
return InteractionDataset.read_df(df)
@pytest.fixture(scope='module')
def model(train_interactions_ds):
item_knn = UserKNN(k=3, m=0, sim_metric='cosine', aggregation='weighted_mean',
shrinkage=100, use_averages=False)
item_knn.fit(train_interactions_ds, verbose=False)
return item_knn
def test_predictive_evaluation_0(model, test_interactions_ds):
"""Evaluation without counting None predictions."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True) == {'MSE': 0.6667, 'RMSE': 0.8165}
def test_predictive_evaluation_1(model, test_interactions_ds):
"""Evaluation counting None predictions."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=True,
n_test_predictions=None, skip_errors=True) == {'MSE': 0.75, 'RMSE': 0.866}
def test_predictive_evaluation_2(model, test_interactions_ds):
"""Evaluation without skip errors."""
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=False) == {'MSE': 0.75, 'RMSE': 0.866}
assert False
except Exception as e:
assert str(e) == 'Item 6 was not found.'
def test_predictive_evaluation_3(model, test_interactions_ds):
"""Evaluation without skip errors."""
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=True,
n_test_predictions=None, skip_errors=False) == {'MSE': 0.75, 'RMSE': 0.866}
assert False
except Exception as e:
assert str(e) == 'Item 6 was not found.'
def test_predictive_evaluation_4(model, test_interactions_ds):
"""Evaluation using the RMSE metric only."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True, metrics=[RMSE()]) == {'RMSE': 0.8165}
def test_predictive_evaluation_5(model, test_interactions_ds):
"""Evaluation using the MSE metric only."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True, metrics=[MSE()]) == {'MSE': 0.6667}
def test_predictive_evaluation_6(model, test_interactions_ds):
"""Evaluation on the first test prediction."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=1, skip_errors=True) == {'MSE': 1.0, 'RMSE': 1.0}
def test_predictive_evaluation_7(model, test_interactions_ds):
"""Evaluation on the first 2 test predictions."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=2, skip_errors=True) == {'MSE': 0.5, 'RMSE': 0.7071}
def test_predictive_evaluation_8(model):
"""Evaluation on the training set."""
assert predictive_evaluation(model, count_none_predictions=False,
n_test_predictions=None, skip_errors=True) == {'MSE': 5.2485, 'RMSE': 2.291}
def test_predictive_evaluation_9(model, test_interactions_ds):
"""Invalid n_test_predictions value (0)."""
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=0, skip_errors=True)
assert False
except Exception as e:
assert str(e) == 'The number of test users (0) should be > 0.'
def test_predictive_evaluation_10(model, test_interactions_ds):
"""Invalid n_test_predictions value (< 0)."""
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=-1, skip_errors=True)
assert False
except Exception as e:
assert str(e) == 'The number of test users (-1) should be > 0.'
def test_predictive_evaluation_11(model, test_interactions_ds):
"""Invalid metrics value (not a list)."""
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True, metrics={})
assert False
except Exception as e:
assert str(e) == 'Expected "metrics" argument to be a list and found <class \'dict\'>. ' \
'Should contain instances of PredictiveMetricABC.'
def test_predictive_evaluation_12(model, test_interactions_ds):
"""Invalid metrics value (list with non-PredictiveMetricABC instances)."""
fun = lambda x: 1
try:
predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True, metrics=[fun])
assert False
except Exception as e:
assert str(e) == f'Expected metric {fun} to be an instance of type PredictiveMetricABC.'
|
1707595
|
import pickle
import os
import numpy as np
class Params(object):
""" A simple dictionary that has its keys as attributes available. """
def __init__(self):
pass
def __str__(self):
s = ""
for name in sorted(self.__dict__.keys()):
s += "%-18s %s\n" % (name + ":", self.__dict__[name])
return s
def __repr__(self):
return self.__str__()
def save(path, var):
"""
Saves the variable ``var`` to the given path. The file format depends on the file extension.
List of supported file types:
- .pkl: pickle
- .npy: numpy
- .txt: text file, one element per line. ``var`` must be a string or list of strings.
"""
if path.endswith(".pkl"):
with open(path, 'wb') as f:
pickle.dump(var, f, 2)
elif path.endswith(".npy"):
np.save(path, var)
elif path.endswith(".txt"):
with open(path, 'w') as f:
if isinstance(var, basestring):
f.write(var)
else:
for i in var:
f.write(i)
f.write('\n')
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def load(path):
"""
Loads the content of a file. It is mainly a convenience function to
avoid adding the ``open()`` contexts. File type detection is based on extensions.
Can handle the following types:
- .pkl: pickles
- .txt: text files, result is a list of strings ending whitespace removed
:param path: path to the file
"""
if path.endswith('.pkl'):
with open(path, 'rb') as f:
return pickle.load(f)
elif path.endswith('.txt'):
with open(path, 'r') as f:
return [x.rstrip('\n\r') for x in list(f)]
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def ensuredir(path):
"""
Creates a folder if it doesn't exist.
:param path: path to the folder to create
"""
if not os.path.exists(path):
os.makedirs(path)
|
1707619
|
from agent.pipeline.config.stages.base import Stage
from agent import pipeline, source
class JDBCSource(Stage):
def get_config(self) -> dict:
return {
'query': pipeline.jdbc.query.Builder(self.pipeline).build(),
** self.get_connection_configs()
}
def get_connection_configs(self):
conf = {'hikariConfigBean.connectionString': 'jdbc:' + self.pipeline.source.config[
source.JDBCSource.CONFIG_CONNECTION_STRING]}
if self.pipeline.source.config.get(source.JDBCSource.CONFIG_USERNAME):
conf['hikariConfigBean.useCredentials'] = True
conf['hikariConfigBean.username'] = self.pipeline.source.config[source.JDBCSource.CONFIG_USERNAME]
conf['hikariConfigBean.password'] = self.pipeline.source.config[source.JDBCSource.CONFIG_PASSWORD]
return conf
|
1707653
|
import sys,os
import argparse
import numpy as np
import json
import heapq
import random
import numbers
# utils
def flatten(l):
""" Merges a list of lists into a single list. """
return [item for sublist in l for item in sublist]
class AveragePrecisionCalculator(object):
"""Calculate the average precision and average precision at n."""
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
@property
def heap_size(self):
"""Gets the heap size maintained in the class."""
return len(self._heap)
@property
def num_accumulated_positives(self):
"""Gets the number of positive samples that have been accumulated."""
return self._total_positives
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += np.size(np.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(np.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def clear(self):
"""Clear the accumulated predictions."""
self._heap = []
self._total_positives = 0
def peek_ap_at_n(self):
"""Peek the non-interpolated average precision at n.
Returns:
The non-interpolated average precision at n (default 0).
If n is larger than the length of the ranked list,
the average precision will be returned.
"""
if self.heap_size <= 0:
return 0
predlists = np.array(list(zip(*self._heap)))
ap = self.ap_at_n(predlists[0],
predlists[1],
n=self._top_n,
total_num_positives=self._total_positives)
return ap
@staticmethod
def ap(predictions, actuals):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
return AveragePrecisionCalculator.ap_at_n(predictions,
actuals,
n=None)
@staticmethod
def ap_at_n(predictions, actuals, n=20, total_num_positives=None):
"""Calculate the non-interpolated average precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
actuals: a numpy 1-D array storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
n: the top n items to be considered in ap@n.
total_num_positives : (optionally) you can specify the number of total
positive
in the list. If specified, it will be used in calculation.
Returns:
The non-interpolated average precision at n.
If n is larger than the length of the ranked list,
the average precision will be returned.
Raises:
ValueError: An error occurred when
1) the format of the input is not the numpy 1-D array;
2) the shape of predictions and actuals does not match;
3) the input n is not a positive integer.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if n is not None:
if not isinstance(n, int) or n <= 0:
raise ValueError("n must be 'None' or a positive integer."
" It was '%s'." % n)
ap = 0.0
predictions = np.array(predictions)
actuals = np.array(actuals)
# add a shuffler to avoid overestimating the ap
predictions, actuals = AveragePrecisionCalculator._shuffle(predictions,
actuals)
sortidx = sorted(
range(len(predictions)),
key=lambda k: predictions[k],
reverse=True)
if total_num_positives is None:
numpos = np.size(np.where(actuals > 0))
else:
numpos = total_num_positives
if numpos == 0:
return 0
if n is not None:
numpos = min(numpos, n)
delta_recall = 1.0 / numpos
poscount = 0.0
# calculate the ap
r = len(sortidx)
if n is not None:
r = min(r, n)
for i in range(r):
if actuals[sortidx[i]] > 0:
poscount += 1
ap += poscount / (i + 1) * delta_recall
return ap
@staticmethod
def _shuffle(predictions, actuals):
random.seed(0)
suffidx = random.sample(range(len(predictions)), len(predictions))
predictions = predictions[suffidx]
actuals = actuals[suffidx]
return predictions, actuals
@staticmethod
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = np.max(predictions) - np.min(predictions)
ret = (predictions - np.min(predictions)) / np.max(denominator,
epsilon)
return ret
def calculate_gap(predictions, actuals, top_k=6):
gap_calculator = AveragePrecisionCalculator()
sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, actuals, top_k)
gap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))
return gap_calculator.peek_ap_at_n()
def top_k_by_class(predictions, labels, k=20):
if k <= 0:
raise ValueError("k must be a positive integer.")
k = min(k, predictions.shape[1])
num_classes = predictions.shape[1]
prediction_triplets= []
for video_index in range(predictions.shape[0]):
prediction_triplets.extend(top_k_triplets(predictions[video_index],labels[video_index], k))
out_predictions = [[] for v in range(num_classes)]
out_labels = [[] for v in range(num_classes)]
for triplet in prediction_triplets:
out_predictions[triplet[0]].append(triplet[1])
out_labels[triplet[0]].append(triplet[2])
out_true_positives = [np.sum(labels[:,i]) for i in range(num_classes)]
return out_predictions, out_labels, out_true_positives
def top_k_triplets(predictions, labels, k=20):
"""Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in
(prediction, class) format"""
m = len(predictions)
k = min(k, m)
indices = np.argpartition(predictions, -k)[-k:]
return [(index, predictions[index], labels[index]) for index in indices]
def get_tag_id_dict(tag_id_file):
tag_id_dict={}
with open(tag_id_file, 'r') as lnf:
for line in lnf:
tag, idx = line.strip().split('\t')
tag_id_dict[tag] = int(idx)
return tag_id_dict
def convert_to_hot(tag_list, scores, tag_dict):
hot_list = np.zeros(len(tag_dict))
for i in range(len(tag_list)):
hot_list[int(tag_dict[tag_list[i]])] = float(scores[i])
return hot_list
def parse_gt_json(gt_json, tag_dict):
gt_dict = {}
with open(gt_json, "r", encoding='utf-8') as f:
gts = json.load(f)
for key in gts:
x = []
for ann in gts[key]["annotations"]:
x.extend(ann['labels'])
x = list(set(x))
gt_dict[key] = convert_to_hot(x, np.ones(len(x)), tag_dict)
return gt_dict
def parse_input_json(input_json, tag_dict):
pred_dict = {}
videos_list = []
with open(input_json, "r", encoding='utf-8') as f:
pred_result = json.load(f)
for video in pred_result:
videos_list.append(video)
pred_dict[video] = convert_to_hot(pred_result[video]["result"][0]["labels"],
pred_result[video]["result"][0]["scores"],tag_dict)
return pred_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pred_json', type=str, default="test100_pred.json")
parser.add_argument('--tag_id_file', type=str, default="tag-id-tagging.txt")
parser.add_argument('--gt_json', type=str, default="test100.json")
parser.add_argument('--top_k', type=int, default=20)
args = parser.parse_args()
assert os.path.exists(args.tag_id_file), "dict file {} not found".format(args.tag_id_file)
tag_dict = get_tag_id_dict(args.tag_id_file)
pred_dict = parse_input_json(args.pred_json, tag_dict)
gt_dict = parse_gt_json(args.gt_json, tag_dict)
assert(pred_dict.keys() == gt_dict.keys())
preds, labels = [], []
for k in pred_dict:
preds.append(pred_dict[k])
labels.append(gt_dict[k])
preds = np.stack(preds)
labels = np.stack(labels)
gap = calculate_gap(preds, labels, top_k = args.top_k)
print("The GAP result is {:.3f}".format(gap))
|
1707674
|
import math
import logging
from typing import Optional, TypeVar
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import Module
from .. import functional as F
from .. import _reduction as _Reduction
from ...distributed import gather
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
T = TypeVar('T', bound='Module')
class _Loss(Module):
reduction: str
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(_Loss, self).__init__()
if size_average is not None or reduce is not None:
self.reduction = _Reduction.legacy_get_string(size_average, reduce)
else:
self.reduction = reduction
class _WeightedLoss(_Loss):
def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(_WeightedLoss, self).__init__(size_average, reduce, reduction)
self.register_buffer('weight', weight)
class ParallelCrossEntropyLoss(_WeightedLoss):
"""
Parallel version of :class::`torch.nn.CrossEntropy`.
Arguments are similar to :class:`torch.nn.CrossEntropy`. Extra arguments:
Args:
None.
Returns:
:class::`torch.Tensor`: loss values.
"""
__constants__ = ['ignore_index', 'reduction']
ignore_index: int
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
reduce=None, reduction: str = 'mean') -> None:
super(ParallelCrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert self.weight is None or isinstance(self.weight, Tensor)
return F.parallel_cross_entropy(input, target, weight=self.weight,
ignore_index=self.ignore_index, reduction=self.reduction)
|
1707711
|
import unittest
from buildFeats import BuildFeatsV2
import os
class TestBuildFeatsV2(unittest.TestCase):
def setUp(self):
unittest.TestLoader.sortTestMethodsUsing = None
self.func = BuildFeatsV2()
def test(self):
self.assertTrue(True)
def test_load_Feats(self):
directory_path = os.getcwd()
self.assertGreater(len(self.func.pf.load_csv(directory_path+"/featscsv/RadGridExport-1.csv")), 0)
def test_norm_name(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Feats.aspx?ID=2516\">Aberration Kinship</a></u>"), "Aberration Kinship")
def test_norm_pfs(self):
self.assertEqual(self.func.pf.norm_pfs("<img alt=\"PFS Standard\" title=\"PFS Standard\" style=\"height:18px; padding:2px 10px 0px 2px\" src=\"Images\Icons\PFS_Standard.png\">"), "PFS Standard")
def test_norm_pfs_neg(self):
self.assertEqual(self.func.pf.norm_pfs("-"), "Excluded")
def test_norm_source(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Sources.aspx?ID=74\" title=\"Ancestry Guide\">Ancestry Guide</a></u>"), "Ancestry Guide")
def test_norm_rarity(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Traits.aspx?ID=28\">Common</a></u>"), "Common")
def test_norm_traits(self):
self.assertEqual(self.func.pf.norm_traits("<u><a href=\"Traits.aspx?ID=338\">Fleshwarp</a></u>"), ['Fleshwarp'])
def test_norm_multi_traits(self):
self.assertEqual(self.func.pf.norm_traits("<u><a href=\"Traits.aspx?ID=215\">Dhampir</a></u>, <u><a href=\"Traits.aspx?ID=317\">Lineage</a></u>"), ['Dhampir', 'Lineage'])
def test_normurl(self):
self.assertEqual(self.func.pf.norm_url("<u><a href=\"Feats.aspx?ID=2516\">Aberration Kinship</a></u>"), "https://2e.aonprd.com/Feats.aspx?ID=2516")
def test_norm_prereqs(self):
self.assertEqual(self.func.pf.norm_prereqs("trained in <u><a href=\"Skills.aspx?ID=8\">Lore</u></a>"), "trained in Lore")
def test_get_details(self):
#print(self.func.get_details("https://2e.aonprd.com/Feats.aspx?ID=779"))
self.assertIsNotNone(self.func.get_details("https://2e.aonprd.com/Feats.aspx?ID=779"))
def test_normalize_feat_data(self):
directory_path = os.getcwd()
self.assertGreater(len(self.func.normalize_feat_data(self.func.pf.load_csv(directory_path+"/featscsv/RadGridExport-1.csv"))), 0)
#def test_build_feats(self):
#self.assertGreater(len(self.func.build_feats()), 0)
#def test_save_feats(self):
##directory_path = os.getcwd()
#self.func.save_feats(self.func.build_feats())
##file = open(directory_path+"/feats-pf2-v3.json", "r")
##self.assertIsNotNone(file)
if __name__ == '__main__':
unittest.main()
|
1707714
|
from numpy import array
from sympy import sin, cos, pi, exp
def flux(u, q, w, v, x, t, mu, eta):
z = x[0];
r = x[1];
f = mu*r*q;
return f;
def source(u, q, w, v, x, t, mu, eta):
z = x[0];
r = x[1];
s = array([sin(r)/exp(z)]);
return s;
def fbou(u, q, w, v, x, t, mu, eta, uhat, n, tau):
f = flux(u, q, w, v, x, t, mu, eta);
tm = f[0]*n[0] + f[1]*n[1] + tau[0]*(u[0]-uhat[0]);
fb = array([0.0, tm, tm, tm]);
return fb;
def ubou(u, q, w, v, x, t, mu, eta, uhat, n, tau):
z = x[0];
r = x[1];
uexact = exp(-z)*cos(r);
ub = array([u, uexact, uexact, uexact]);
return ub;
def initu(x, mu, eta):
u0 = array([0.0]);
return u0;
|
1707715
|
import json
from ariadne import graphql_sync
from ariadne.constants import PLAYGROUND_HTML
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .graphql import schema
@csrf_exempt
def graphql_view(request):
if request.method == "GET":
return HttpResponse(PLAYGROUND_HTML)
if request.method != "POST":
return HttpResponseBadRequest()
if request.content_type != "application/json":
return HttpResponseBadRequest()
try:
data = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest()
# Execute the query
success, result = graphql_sync(
schema,
data,
context_value=request, # expose request as info.context
debug=settings.DEBUG,
)
status_code = 200 if success else 400
# Send response to client
return JsonResponse(result, status=status_code)
|
1707723
|
import torch
import torch.nn as nn
class Discriminator(nn.Module):
"""Discriminator Network"""
def __init__(self):
super(Discriminator, self).__init__()
self.in_channel = 3
self.ndf = 64
self.out_channel = 1
self.main = nn.Sequential(
nn.Conv2d(self.in_channel, self.ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf, self.ndf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf*2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf*2, self.ndf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf*4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf*4, self.ndf*8, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ndf*8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.ndf*8, self.out_channel, 4, 1, 0, bias=False)
)
def forward(self, x):
out = self.main(x)
out = out.view(-1, 1).squeeze(1)
return out
class Generator(nn.Module):
"""Generator Network"""
def __init__(self):
super(Generator, self).__init__()
self.nz = 100
self.ngf = 64
self.out_channel = 3
self.main = nn.Sequential(
nn.ConvTranspose2d(self.nz, self.ngf*8, 4, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf*8),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(self.ngf*8, self.ngf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf*4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(self.ngf*4, self.ngf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf*2),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(self.ngf*2, self.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(self.ngf, self.out_channel, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, x):
out = self.main(x)
return out
|
1707729
|
class SimpleTrainer:
"""Orchestrates training of an RL algorithm.
This trainer is "simple" in that it doesn't manager distributed sampling.
"""
def __init__(self, sampler, agent, logger):
"""
Args:
sampler: A class that samples trajectories from an environment.
agent: The agent that interacts with the environment and learns from experience.
logger: Class for logging information about the training.
"""
self.sampler = sampler
self.agent = agent
self.logger = logger
def train(self, max_steps):
for step in range(max_steps):
trajs = self.sampler.sample(self.agent)
info = self.agent.learn(trajs)
self.logger.log(step, trajs, info, self.sampler.env)
|
1707737
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSIS")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source (
"PoolSource",
fileNames = cms.untracked.vstring(
'file:display.root'
),
secondaryFileNames = cms.untracked.vstring(),
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck')
)
process.pfCandidateAnalyzer = cms.EDAnalyzer("PFCandidateAnalyzer",
PFCandidates = cms.InputTag("particleFlow"),
verbose = cms.untracked.bool(True),
printBlocks = cms.untracked.bool(False)
)
process.load("FastSimulation.Configuration.EventContent_cff")
process.aod = cms.OutputModule("PoolOutputModule",
process.AODSIMEventContent,
fileName = cms.untracked.string('aod.root')
)
process.outpath = cms.EndPath(process.aod )
process.p = cms.Path(process.pfCandidateAnalyzer)
|
1707738
|
from World.Object.model import Object
from World.Object.Constants.UpdateObjectFields import ObjectField
from World.WorldPacket.UpdatePacket.Constants.ObjectUpdateType import ObjectUpdateType
from World.WorldPacket.UpdatePacket.Builders.UpdatePacketBuilder import UpdatePacketBuilder
from DB.Connection.RealmConnection import RealmConnection
class ObjectManager(object):
def __init__(self, **kwargs):
self.update_packet_builder = None
self.fields = {}
self.object_update_type = ObjectUpdateType.CREATE_OBJECT.value
self.world_object = Object()
def find(self, **kwargs):
self.world_object = self.session.query(self.__class__).filter_by(**kwargs).first()
return self
def find_all(self, **kwargs):
return self.session.query(self.__class__).filter_by(**kwargs).all()
def save(self):
self.session.add(self.world_object)
self.session.commit()
return self
def delete(self, **kwargs):
self.session.query(Object).filter_by(**kwargs).delete()
return self
def get_object_field(self, field) -> int:
if field in self.fields:
return self.fields.get(field)
else:
return 0
def set_object_field(self, field, value) -> None:
self.fields[field] = value
def add_object_fields(self) -> None:
self.set_object_field(ObjectField.GUID, self.world_object.guid)
self.set_object_field(ObjectField.TYPE, self.world_object.type_mask)
self.set_object_field(ObjectField.ENTRY, self.world_object.entry)
self.set_object_field(ObjectField.SCALE_X, self.world_object.scale_x)
def set_object_update_type(self, object_update_type: ObjectUpdateType):
self.object_update_type = object_update_type.value
def create_batch(self, fields: list) -> bytes:
for field in fields:
self.update_packet_builder.add_field(field, self.get_object_field(field))
return self.update_packet_builder.create_batch(send_packed_guid=True)
def add_batch(self, batch: bytes):
# this method also can be used for adding batches from another managers
self.update_packet_builder.add_batch(batch)
return self
def add_field(self, field, value, offset=0):
self.update_packet_builder.add_field(field, value, offset)
return self
def build_update_packet(self):
self.update_packet_builder.build()
return self
def set_update_flags(self, update_flags: int):
self.update_packet_builder.set_update_flags(update_flags)
def get_update_packets(self):
return self.update_packet_builder.get_packets()
def init_update_packet_builder(self, **kwargs):
object_update_type = kwargs.pop('object_update_type')
update_flags = kwargs.pop('update_flags')
update_object = kwargs.pop('update_object')
self.set_object_update_type(object_update_type=object_update_type)
self.set(update_object)
self.prepare().set_update_flags(update_flags)
# overridable
def load(self, **kwargs):
id = kwargs.pop('id')
self.world_object = self.session.query(Object).filter_by(id=id).first()
return self
# overridable
def new(self, **kwargs):
return self
def set(self, world_object: Object):
self.world_object = world_object
return self
# inheritable
def prepare(self):
# init data for UpdatePacket
self.add_object_fields()
self.update_packet_builder = UpdatePacketBuilder(
update_object=self.world_object,
update_type=self.object_update_type,
object_type=self.world_object.object_type
)
return self
# enter/exit are safe, should be used instead of __del__
def __enter__(self):
connection = RealmConnection()
self.session = connection.session
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
# https://stackoverflow.com/a/58590249/5397119
return False
|
1707791
|
import json
import os
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_THIS_DIR, "chars_to_jyutping.json"), encoding="utf8") as f:
CHARS_TO_JYUTPING = json.load(f)
with open(os.path.join(_THIS_DIR, "lettered.json"), encoding="utf8") as f:
LETTERED = json.load(f)
|
1707798
|
import os
import pytest
from topo_processor.metadata.metadata_validators.metadata_validator_tiff import MetadataValidatorTiff
from topo_processor.stac import Asset, Item
def test_check_validity():
source_path = os.path.join(os.getcwd(), "test_data", "tiffs", "SURVEY_1", "CONTROL.tiff")
asset = Asset(source_path)
item = Item("item_id")
item.add_asset(asset)
item.properties.update({"linz:photo_type": "COLOUR"})
validator = MetadataValidatorTiff()
assert validator.is_applicable(item)
with pytest.raises(Exception, match=r"Wrong photo type of gray"):
validator.validate_metadata(item)
|
1707799
|
import numpy as np
import numpy.linalg as la
import scipy
import skimage
import PIL
from PIL import Image as PILImage
import TimestampedPacketMotionData_pb2
import argparse
import os
import google.protobuf.json_format
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import TimestampedImage_pb2
import Pose3d_pb2
import cv2
import PoseSequenceLabel_pb2
import bisect
import FrameId_pb2
import Vector3dStamped_pb2
import scipy.interpolate
import deepracing.protobuf_utils as protobuf_utils
import deepracing.pose_utils as pose_utils
import deepracing.arma_utils
import deepracing
def sortKey(packet):
return packet.udp_packet.m_header.m_sessionTime
parser = argparse.ArgumentParser()
parser.add_argument("motion_data_dir", help="Path to motion data to generate trackfile from", type=str)
parser.add_argument("--trackfileout", help="Path to an ARMA format matrix file", type=str, default="track.arma")
parser.add_argument("--json", action="store_true", help="Look for json files in motion_data_dir instead of binary .pb files")
args = parser.parse_args()
motion_data_dir = args.motion_data_dir
use_json = args.json
trackfileout = args.trackfileout
motion_packets = sorted(protobuf_utils.getAllMotionPackets(motion_data_dir, args.json), key=sortKey)
#print(motion_packets)
car_index = 0
poses = [ protobuf_utils.extractPose(p.udp_packet, car_index = car_index) for p in motion_packets]
t = np.array([sortKey(p) for p in motion_packets])
X = np.array([ pose[0] for pose in poses])
Xdot = np.array([protobuf_utils.extractVelocity(p.udp_packet, car_index = car_index) for p in motion_packets])
_,unique_indices = np.unique(t,return_index=True)
t = t[unique_indices]
X = X[unique_indices]
Xdot = Xdot[unique_indices]
Xdotnorm = Xdot.copy()
for i in range(Xdotnorm.shape[0]):
Xdotnorm[i,:] = Xdotnorm[i,:]/la.norm(Xdotnorm[i,:])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], c='r', marker='o', s = np.ones_like(X[:,0]))
ax.quiver(X[:,0], X[:,1], X[:,2], Xdotnorm[:,0], Xdotnorm[:,1], Xdotnorm[:,2])
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
if not os.path.isdir(os.path.dirname(trackfileout)):
os.makedirs(os.path.dirname(trackfileout))
deepracing.arma_utils.writeArmaFile(trackfileout,t,X,Xdot)
|
1707834
|
import os
import time
import unittest
import uuid
import warnings
from lib.config import gen_datadog_agent_config
from lib.const import CSPM_RUNNING_DOCKER_CHECK_LOG, CSPM_START_LOG
from lib.cspm.api import wait_for_compliance_event, wait_for_finding
from lib.cspm.finding import is_expected_docker_finding, parse_output_and_extract_findings
from lib.docker import DockerHelper
from lib.log import wait_agent_log
from lib.stepper import Step
class TestE2EDocker(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", category=ResourceWarning)
warnings.simplefilter("ignore", category=UserWarning)
self.docker_helper = DockerHelper()
def tearDown(self):
self.docker_helper.close()
def test_privileged_container(self):
print("")
test_id = str(uuid.uuid4())[:4]
agent_name = "security-agent"
with Step(msg="create privileged container", emoji=":construction:"):
pc = self.docker_helper.client.containers.run(
"ubuntu:latest",
command="sleep 7200",
detach=True,
remove=True,
privileged=True,
)
self.container_id = pc.id
with Step(msg="check agent start", emoji=":man_running:"):
image = os.getenv("DD_AGENT_IMAGE")
hostname = f"host_{test_id}"
self.datadog_agent_config = gen_datadog_agent_config(
hostname=hostname, log_level="DEBUG", tags=["tag1", "tag2"]
)
self.container = self.docker_helper.start_cspm_agent(
image,
datadog_agent_config=self.datadog_agent_config,
)
self.assertIsNotNone(self.container, msg="unable to start container")
self.docker_helper.wait_agent_container()
wait_agent_log(agent_name, self.docker_helper, CSPM_START_LOG)
with Step(msg="check agent event", emoji=":check_mark_button:"):
_, output = self.container.exec_run("security-agent compliance check --report")
findings = parse_output_and_extract_findings(output.decode(), [CSPM_RUNNING_DOCKER_CHECK_LOG])
self.finding = None
for f in findings:
if is_expected_docker_finding(f, self.container_id):
self.finding = f
if self.finding is None:
raise LookupError(f"{agent_name} | {CSPM_RUNNING_DOCKER_CHECK_LOG}")
with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"):
time.sleep(1 * 60)
with Step(msg="check app compliance event", emoji=":SOON_arrow:"):
wait_for_compliance_event(f"resource_id:*{self.container_id}")
with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"):
time.sleep(1 * 60)
with Step(msg="check app finding", emoji=":chart_increasing_with_yen:"):
wait_for_finding(f"@resource_type:docker_container @container_id:{self.container_id}")
def main():
unittest.main()
if __name__ == "__main__":
main()
|
1707929
|
import argparse
import pathlib
import siml
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'settings_yaml',
type=pathlib.Path,
help='YAML file name of settings.')
parser.add_argument(
'-o', '--out-dir',
type=pathlib.Path,
default=None,
help='Output directory name')
parser.add_argument(
'-g', '--gpu-id',
type=int,
default=-1,
help='GPU ID [-1, meaning CPU]')
parser.add_argument(
'-r', '--restart-dir',
type=pathlib.Path,
default=None,
help='Restart directory name')
parser.add_argument(
'-p', '--pretrained-directory',
type=pathlib.Path,
default=None,
help='Pretrained directory name')
args = parser.parse_args()
main_setting = siml.setting.MainSetting.read_settings_yaml(
args.settings_yaml)
if args.out_dir is not None:
main_setting.trainer.out_dir(args.out_dir)
main_setting.trainer.gpu_id = args.gpu_id
if args.restart_dir is not None:
main_setting.trainer.restart_directory = args.restart_dir
if args.pretrained_directory is not None:
main_setting.trainer.pretrain_directory = args.pretrained_directory
trainer = siml.trainer.Trainer(main_setting)
trainer.train()
if __name__ == '__main__':
main()
|
1707944
|
import torch
import torch.nn as nn
from .modules.legacy import *
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
class Discriminator(nn.Module):
def __init__(self, size, channels_in=3, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], activate=True):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(channels_in, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
EqualLinear(channels[4], 1),
)
self.activate = activate
def forward(self, x):
out = self.convs(x)
out = self.minibatch_discrimination(out, self.stddev_group, self.stddev_feat)
out = self.final_conv(out)
out = out.view(out.size(0), -1)
out = self.final_linear(out)
if self.activate:
out = out.sigmoid()
return {"out": out}
@staticmethod
def minibatch_discrimination(x, stddev_group, stddev_feat):
out = x
batch, channel, height, width = out.shape
group = min(batch, stddev_group)
stddev = out.view(group, -1, stddev_feat, channel // stddev_feat, height, width)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
out = torch.cat([out, stddev], 1)
return out
|
1707952
|
from openpredict.openpredict_model import get_predictions, get_similarities, load_similarity_embeddings, load_treatment_classifier, load_treatment_embeddings
import requests
import os
import re
def is_accepted_id(id_to_check):
if id_to_check.lower().startswith('omim') or id_to_check.lower().startswith('drugbank'):
return True
else:
return False
biolinkVersion = os.getenv('BIOLINK_VERSION', '2.2.3')
def get_biolink_parents(concept):
concept_snakecase = concept.replace('biolink:', '')
concept_snakecase = re.sub(r'(?<!^)(?=[A-Z])', '_', concept_snakecase).lower()
try:
resolve_curies = requests.get('https://bl-lookup-sri.renci.org/bl/' + concept_snakecase + '/ancestors',
params={'version': biolinkVersion})
resp = resolve_curies.json()
resp.append(concept)
return resp
except Exception as e:
print('Error querying https://bl-lookup-sri.renci.org, using the original IDs')
return [concept]
def resolve_ids_with_nodenormalization_api(resolve_ids_list, resolved_ids_object):
resolved_ids_list = []
ids_to_normalize = []
for id_to_resolve in resolve_ids_list:
if is_accepted_id(id_to_resolve):
resolved_ids_list.append(id_to_resolve)
resolved_ids_object[id_to_resolve] = id_to_resolve
else:
ids_to_normalize.append(id_to_resolve)
# Query Translator NodeNormalization API to convert IDs to OMIM/DrugBank IDs
if len(ids_to_normalize) > 0:
try:
resolve_curies = requests.get('https://nodenormalization-sri.renci.org/get_normalized_nodes',
params={'curie': ids_to_normalize})
# Get corresponding OMIM IDs for MONDO IDs if match
resp = resolve_curies.json()
for resolved_id, alt_ids in resp.items():
for alt_id in alt_ids['equivalent_identifiers']:
if is_accepted_id(str(alt_id['identifier'])):
resolved_ids_list.append(str(alt_id['identifier']))
resolved_ids_object[str(alt_id['identifier'])] = resolved_id
except Exception as e:
print('Error querying the NodeNormalization API, using the original IDs')
return resolved_ids_list, resolved_ids_object
def resolve_id(id_to_resolve, resolved_ids_object):
if id_to_resolve in resolved_ids_object.keys():
return resolved_ids_object[id_to_resolve]
return id_to_resolve
def resolve_trapi_query(reasoner_query, app):
"""Convert an array of predictions objects to ReasonerAPI format
Run the get_predict to get the QueryGraph edges and nodes
{disease: OMIM:1567, drug: DRUGBANK:DB0001, score: 0.9}
:param: reasoner_query Query from Reasoner API
:return: Results as ReasonerAPI object
"""
# Example TRAPI message: https://github.com/NCATSTranslator/ReasonerAPI/blob/master/examples/Message/simple.json
query_graph = reasoner_query["message"]["query_graph"]
# Default query_options
model_id = 'openpredict-baseline-omim-drugbank'
n_results = None
min_score = None
max_score = None
if 'query_options' in reasoner_query.keys():
query_options = reasoner_query["query_options"]
if 'n_results' in reasoner_query["query_options"]:
n_results = int(reasoner_query["query_options"]["n_results"])
if 'min_score' in reasoner_query["query_options"]:
min_score = float(reasoner_query["query_options"]["min_score"])
if 'max_score' in reasoner_query["query_options"]:
max_score = float(reasoner_query["query_options"]["max_score"])
query_plan = {}
resolved_ids_object = {}
# if not similarity_embeddings or similarity_embeddings == {}:
# similarity_embeddings = None
# treatment_embeddings = None
# Parse the query_graph to build the query plan
for edge_id, qg_edge in query_graph["edges"].items():
# Build dict with all infos of associations to predict
query_plan[edge_id] = {
# 'predicates': qg_edge['predicates'],
# 'qedge_subjects': qg_edge['subject'],
'qg_source_id': qg_edge['subject'],
'qg_target_id': qg_edge['object']
}
if 'predicates' in qg_edge.keys():
query_plan[edge_id]['predicates'] = qg_edge['predicates']
else:
# Quick fix: in case no relation is provided
query_plan[edge_id]['predicates'] = ['biolink:treats']
# If single value provided for predicate: make it an array
# if not isinstance(query_plan[edge_id]['predicate'], list):
# query_plan[edge_id]['predicate'] = [ query_plan[edge_id]['predicate'] ]
# Get the nodes infos in the query plan object
for node_id, node in query_graph["nodes"].items():
# for node in query_graph['nodes']:
if node_id == qg_edge['subject'] or node_id == qg_edge['object']:
# if node_id == qg_edge['subject']:
if 'ids' in node and 'from_qg_id' not in query_plan[edge_id].keys():
# TOREMOVE: If single values provided for id or category: make it an array
# if not isinstance(node['id'], list):
# node['id'] = [ node['id'] ]
# Resolve the curie provided with the NodeNormalization API
query_plan[edge_id]['from_kg_id'], resolved_ids_object = resolve_ids_with_nodenormalization_api(node['ids'], resolved_ids_object)
query_plan[edge_id]['from_qg_id'] = node_id
if 'categories' in node.keys():
query_plan[edge_id]['from_type'] = node['categories']
else:
query_plan[edge_id]['from_type'] = 'biolink:NamedThing'
# TOREMOVE: handling of single values
# if not isinstance(query_plan[edge_id]['from_type'], list):
# query_plan[edge_id]['from_type'] = [ query_plan[edge_id]['from_type'] ]
elif 'to_qg_id' not in query_plan[edge_id].keys():
# The node without curie is the association's "to"
query_plan[edge_id]['to_qg_id'] = node_id
if 'ids' in node.keys():
query_plan[edge_id]['to_kg_id'], resolved_ids_object = resolve_ids_with_nodenormalization_api(node['ids'], resolved_ids_object)
if 'categories' in node.keys():
query_plan[edge_id]['to_type'] = node['categories']
else:
query_plan[edge_id]['to_type'] = ['biolink:NamedThing']
if not isinstance(query_plan[edge_id]['to_type'], list):
query_plan[edge_id]['to_type'] = [ query_plan[edge_id]['to_type'] ]
knowledge_graph = {'nodes': {}, 'edges': {}}
node_dict = {}
query_results = []
kg_edge_count = 0
# supportedCategories = ['biolink:Drug', 'biolink:Disease', 'biolink:NamedThing', 'biolink:ChemicalSubstance']
# Now iterates the query plan to execute each query
for edge_qg_id in query_plan.keys():
# print('Resolve similar_to for ' + str(edge_qg_id))
similar_parents = get_biolink_parents('biolink:similar_to')
if any(i in similar_parents for i in query_plan[edge_qg_id]['predicates']):
if 'from_kg_id' in query_plan[edge_qg_id]:
for id_to_predict in query_plan[edge_qg_id]['from_kg_id']:
try:
# TODO: make it dynamic by passing the TRAPI app object with all models
# currently using default models for drug and disease similarity
similarity_model_id = 'drugs_fp_embed.txt'
if 'biolink:Disease' in query_plan[edge_qg_id]['from_type'] or query_plan[edge_qg_id]['from_type'] == 'biolink:Disease':
similarity_model_id = 'disease_hp_embed.txt'
# similarity_model_id = model_id
emb_vectors = app.similarity_embeddings[similarity_model_id]
similarity_json, source_target_predictions = get_similarities(
query_plan[edge_qg_id]['from_type'],
id_to_predict,
emb_vectors, min_score, max_score, n_results
)
# [
# {
# "id": "DRUGBANK:DB00390",
# "label": "Digoxin",
# "score": 0.9826133251190186,
# "type": "drug"
# },
# {
# "id": "DRUGBANK:DB00396",
# "label": "Progesterone",
# "score": 0.9735659956932068,
# "type": "drug"
# },
for hit in similarity_json:
source_node_id = resolve_id(id_to_predict, resolved_ids_object)
target_node_id = resolve_id(hit['id'], resolved_ids_object)
node_dict[source_node_id] = {
'type': query_plan[edge_qg_id]['from_type']
}
node_dict[target_node_id] = {
'type': hit['type']
}
if 'label' in hit.keys():
node_dict[target_node_id]['label'] = hit['label']
edge_kg_id = 'e' + str(kg_edge_count)
association_score = str(hit['score'])
# See attributes examples: https://github.com/NCATSTranslator/Evidence-Provenance-Confidence-Working-Group/blob/master/attribute_epc_examples/COHD_TRAPI1.1_Attribute_Example_2-3-21.yml
edge_dict = {
# TODO: not required anymore? 'association_type': edge_association_type,
# 'relation': relation,
# More details on attributes: https://github.com/NCATSTranslator/ReasonerAPI/blob/master/docs/reference.md#attribute-
'attributes': [
{
"description": "model_id",
"attribute_type_id": "EDAM:data_1048",
"value": similarity_model_id
},
{
# TODO: use has_confidence_level?
"description": "score",
"attribute_type_id": "EDAM:data_1772",
"value": association_score
# https://www.ebi.ac.uk/ols/ontologies/edam/terms?iri=http%3A%2F%2Fedamontology.org%2Fdata_1772&viewMode=All&siblings=false
},
{
'attribute_type_id': 'biolink:aggregator_knowledge_source',
'value': 'infores:openpredict',
'value_type_id': 'biolink:InformationResource',
'attribute_source': 'infores:openpredict',
'value_url': 'https://openpredict.semanticscience.org/query'
},
{
'attribute_type_id': 'biolink:supporting_data_source',
'value': 'infores:cohd',
'value_type_id': 'biolink:InformationResource',
'attribute_source': 'infores:openpredict',
'value_url': 'https://openpredict.semanticscience.org'
},
]
}
edge_dict['subject'] = source_node_id
edge_dict['object'] = target_node_id
edge_dict['predicate'] = 'biolink:similar_to'
knowledge_graph['edges'][edge_kg_id] = edge_dict
# Add the bindings to the results object
result = {'edge_bindings': {}, 'node_bindings': {}}
result['edge_bindings'][edge_qg_id] = [
{
"id": edge_kg_id
}
]
result['node_bindings'][query_plan[edge_qg_id]['from_qg_id']] = [
{
"id": source_node_id
}
]
result['node_bindings'][query_plan[edge_qg_id]['to_qg_id']] = [
{
"id": target_node_id
}
]
query_results.append(result)
kg_edge_count += 1
if kg_edge_count == n_results:
break
except Exception as e:
print('Error running similarity search')
print(e)
## Resolve treats/treated_by (slightly different object returned by get_predictions)
# print('Resolve treats/treated_by')
treats_parents = get_biolink_parents('biolink:treats') + get_biolink_parents('biolink:treated_by')
if any(i in treats_parents for i in query_plan[edge_qg_id]['predicates']):
# Resolve when asking for treats prediction
drugdisease_parents = get_biolink_parents('biolink:Drug') + get_biolink_parents('biolink:Disease')
if any(i in drugdisease_parents for i in query_plan[edge_qg_id]['from_type']) and any(i in drugdisease_parents for i in query_plan[edge_qg_id]['to_type']):
# Iterate over the list of ids provided
for id_to_predict in query_plan[edge_qg_id]['from_kg_id']:
try:
# Run OpenPredict to get predictions
bte_response, prediction_json = get_predictions(
id_to_predict, model_id, app,
min_score, max_score, n_results=None
)
except:
prediction_json = []
for association in prediction_json:
# id/type of nodes are registered in a dict to avoid duplicate in knowledge_graph.nodes
# Build dict of node ID : label
source_node_id = resolve_id(association['source']['id'], resolved_ids_object)
target_node_id = resolve_id(association['target']['id'], resolved_ids_object)
# If the target ID is given, we filter here from the predictions
if 'to_kg_id' in query_plan[edge_qg_id] and target_node_id not in query_plan[edge_qg_id]['to_kg_id']:
pass
else:
edge_kg_id = 'e' + str(kg_edge_count)
# Get the ID of the predicted entity in result association
# based on the type expected for the association "to" node
# node_dict[id_to_predict] = query_plan[edge_qg_id]['from_type']
# node_dict[association[query_plan[edge_qg_id]['to_type']]] = query_plan[edge_qg_id]['to_type']
node_dict[source_node_id] = {
'type': association['source']['type']
}
if 'label' in association['source'] and association['source']['label']:
node_dict[source_node_id]['label'] = association['source']['label']
node_dict[target_node_id] = {
'type': association['target']['type']
}
if 'label' in association['target'] and association['target']['label']:
node_dict[target_node_id]['label'] = association['target']['label']
# edge_association_type = 'biolink:ChemicalToDiseaseOrPhenotypicFeatureAssociation'
source = 'OpenPredict'
relation = 'RO:0002434'
# relation = 'OBOREL:0002606'
association_score = str(association['score'])
# See attributes examples: https://github.com/NCATSTranslator/Evidence-Provenance-Confidence-Working-Group/blob/master/attribute_epc_examples/COHD_TRAPI1.1_Attribute_Example_2-3-21.yml
edge_dict = {
# TODO: not required anymore? 'association_type': edge_association_type,
# 'relation': relation,
# More details on attributes: https://github.com/NCATSTranslator/ReasonerAPI/blob/master/docs/reference.md#attribute-
'attributes': [
{
"description": "model_id",
"attribute_type_id": "EDAM:data_1048",
"value": model_id
},
{
# TODO: use has_confidence_level?
"description": "score",
"attribute_type_id": "EDAM:data_1772",
"value": association_score
# https://www.ebi.ac.uk/ols/ontologies/edam/terms?iri=http%3A%2F%2Fedamontology.org%2Fdata_1772&viewMode=All&siblings=false
},
{
'attribute_type_id': 'biolink:aggregator_knowledge_source',
'value': 'infores:openpredict',
'value_type_id': 'biolink:InformationResource',
'attribute_source': 'infores:openpredict',
'value_url': 'https://openpredict.semanticscience.org/query'
},
{
'attribute_type_id': 'biolink:supporting_data_source',
'value': 'infores:cohd',
'value_type_id': 'biolink:InformationResource',
'attribute_source': 'infores:openpredict',
'value_url': 'https://openpredict.semanticscience.org'
},
]
}
# Map the source/target of query_graph to source/target of association
# if association['source']['type'] == query_plan[edge_qg_id]['from_type']:
edge_dict['subject'] = source_node_id
edge_dict['object'] = target_node_id
# Define the predicate depending on the association source type returned by OpenPredict classifier
if association['source']['type'] == 'drug':
# and 'biolink:Drug' in query_plan[edge_qg_id]['predicates']: ?
edge_dict['predicate'] = 'biolink:treats'
else:
edge_dict['predicate'] = 'biolink:treated_by'
# Add the association in the knowledge_graph as edge
# Use the type as key in the result association dict (for IDs)
knowledge_graph['edges'][edge_kg_id] = edge_dict
# Add the bindings to the results object
result = {'edge_bindings': {}, 'node_bindings': {}}
result['edge_bindings'][edge_qg_id] = [
{
"id": edge_kg_id
}
]
result['node_bindings'][query_plan[edge_qg_id]['from_qg_id']] = [
{
"id": source_node_id
}
]
result['node_bindings'][query_plan[edge_qg_id]['to_qg_id']] = [
{
"id": target_node_id
}
]
query_results.append(result)
kg_edge_count += 1
if kg_edge_count == n_results:
break
else:
print('BioLink category not parents of Drug or Disease, no results returned')
prediction_json = []
else:
prediction_json = []
# Generate kg nodes from the dict of nodes + result from query to resolve labels
for node_id, properties in node_dict.items():
node_category = properties['type']
if isinstance(node_category, str) and not node_category.startswith('biolink:'):
node_category = 'biolink:' + node_category.capitalize()
if isinstance(node_category, str):
node_category = [ node_category ]
node_to_add = {
'categories': node_category ,
}
if 'label' in properties and properties['label']:
node_to_add['name'] = properties['label']
knowledge_graph['nodes'][node_id] = node_to_add
return {"message": {'knowledge_graph': knowledge_graph, 'query_graph': query_graph, 'results': query_results}}
example_trapi = {
"message": {
"query_graph": {
"edges": {
"e01": {
"object": "n1",
"predicates": ["biolink:treated_by", "biolink:treats"],
"subject": "n0"
}
},
"nodes": {
"n0": {
"categories": ["biolink:Disease", "biolink:Drug"],
"ids": ["OMIM:246300", "DRUGBANK:DB00394"]
},
"n1": {
"categories": ["biolink:Drug", "biolink:Disease"]
}
}
}
},
"query_options": {
"max_score": 1,
"min_score": 0.5
}
}
|
1707956
|
import string
import numpy as np
from scipy.spatial import distance
def softmax(array):
"""Returns the numerically stable softmax of a given array"""
return (np.exp(array-np.max(array)))/np.sum(np.exp(array-np.max(array)))
def cosine_similarity(a, b):
"""Custom cosine similarity"""
return np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))
def norm_hamming(string1,string2):
"""Custom Normalized Hamming Distance"""
return distance.hamming(list(string1), list(string2))
def jaccard_binary(x,y):
"""Returns the similarity between two binary vectors"""
intersection = np.logical_and(x, y)
union = np.logical_or(x, y)
similarity = intersection.sum() / float(union.sum())
return similarity
def jaccard_similarity(doc1, doc2):
# List the unique words in a document
words_doc1 = set(doc1.lower().split())
words_doc2 = set(doc2.lower().split())
# Find the intersection of words list of doc1 & doc2
intersection = words_doc1.intersection(words_doc2)
# Find the union of words list of doc1 & doc2
union = words_doc1.union(words_doc2)
# Calculate Jaccard similarity score
# using length of intersection set divided by length of union set
return float(len(intersection)) / len(union)
|
1708006
|
import opendbpy as odb
import os
def createSimpleDB():
db = odb.dbDatabase.create()
tech = odb.dbTech.create(db)
L1 = odb.dbTechLayer_create(tech, 'L1', 'ROUTING')
lib = odb.dbLib.create(db, "lib")
odb.dbChip.create(db)
#Creating Master and2 and or2
and2 = createMaster2X1(lib, 'and2', 1000, 1000, 'a', 'b', 'o')
or2 = createMaster2X1(lib, 'or2', 500, 500, 'a', 'b', 'o')
return db, lib
def createMultiLayerDB():
db = odb.dbDatabase.create()
tech = odb.dbTech.create(db)
m1 = odb.dbTechLayer_create(tech, "M1", 'ROUTING')
m1.setWidth(2000)
m2 = odb.dbTechLayer_create(tech, "M2", 'ROUTING')
m2.setWidth(2000)
m3 = odb.dbTechLayer_create(tech, "M3", 'ROUTING')
m3.setWidth(2000)
v12 = odb.dbTechVia_create(tech, "VIA12")
odb.dbBox_create(v12, m1, 0, 0, 2000, 2000)
odb.dbBox_create(v12, m2, 0, 0, 2000, 2000)
v23 = odb.dbTechVia_create(tech, "VIA23")
odb.dbBox_create(v23, m2, 0, 0, 2000, 2000)
odb.dbBox_create(v23, m3, 0, 0, 2000, 2000)
return db, tech, m1, m2, m3, v12, v23
#logical expr OUT = (IN1&IN2)
#
# (n1) +-----
# IN1--------|a \ (n3)
# (n2) | (i1)o|-----------OUT
# IN2--------|b /
# +-----
def create1LevelBlock(db, lib, parent):
blockName = '1LevelBlock'
block = odb.dbBlock_create(parent, blockName, ',')
#Creating Master and2 and instance inst
and2 = lib.findMaster('and2')
inst = odb.dbInst.create(block, and2, "inst")
#creating our nets
n1 = odb.dbNet.create(block, "n1")
n2 = odb.dbNet.create(block, "n2")
n3 = odb.dbNet.create(block, "n3")
IN1 = odb.dbBTerm.create(n1, 'IN1')
IN1.setIoType('INPUT')
IN2 = odb.dbBTerm.create(n2, 'IN2')
IN2.setIoType('INPUT')
OUT = odb.dbBTerm.create(n3, 'OUT')
OUT.setIoType('OUTPUT')
#connecting nets
odb.dbITerm.connect(inst, n1, inst.getMaster().findMTerm('a'))
odb.dbITerm.connect(inst, n2, inst.getMaster().findMTerm('b'))
odb.dbITerm.connect(inst, n3, inst.getMaster().findMTerm('o'))
return block
#logical expr OUT = (IN1&IN2) | (IN3&IN4)
# (n1) +-----
# IN1--------|a \ (n5)
# (n2) | (i1)o|-----------+
# IN2--------|b / | +-------
# +----- +--------\a \ (n7)
# ) (i3)o|---------------OUT
# (n3) +----- +--------/b /
# IN3--------|a \ (n6) | +-------
# (n4) | (i2)o|-----------+
# IN4--------|b /
# +-----
def create2LevelBlock(db, lib, parent):
blockName = '2LevelBlock'
block = odb.dbBlock_create(parent, blockName, ',')
and2 = lib.findMaster('and2')
or2 = lib.findMaster('or2')
#creating instances
i1 = odb.dbInst.create(block, and2, "i1")
i2 = odb.dbInst.create(block, and2, "i2")
i3 = odb.dbInst.create(block, or2, "i3")
#creating nets and block terms
n1 = odb.dbNet.create(block, "n1")
n2 = odb.dbNet.create(block, "n2")
n3 = odb.dbNet.create(block, "n3")
n4 = odb.dbNet.create(block, "n4")
n5 = odb.dbNet.create(block, "n5")
n6 = odb.dbNet.create(block, "n6")
n7 = odb.dbNet.create(block, "n7")
IN1 = odb.dbBTerm.create(n1, 'IN1')
IN1.setIoType('INPUT')
IN2 = odb.dbBTerm.create(n2, 'IN2')
IN2.setIoType('INPUT')
IN3 = odb.dbBTerm.create(n3, 'IN3')
IN3.setIoType('INPUT')
IN4 = odb.dbBTerm.create(n4, 'IN4')
IN4.setIoType('INPUT')
OUT = odb.dbBTerm.create(n7, 'OUT')
OUT.setIoType('OUTPUT')
#connecting nets
odb.dbITerm.connect(i1, n1, i1.getMaster().findMTerm('a'))
odb.dbITerm.connect(i1, n2, i1.getMaster().findMTerm('b'))
odb.dbITerm.connect(i1, n5, i1.getMaster().findMTerm('o'))
odb.dbITerm.connect(i2, n3, i2.getMaster().findMTerm('a'))
odb.dbITerm.connect(i2, n4, i2.getMaster().findMTerm('b'))
odb.dbITerm.connect(i2, n6, i2.getMaster().findMTerm('o'))
odb.dbITerm.connect(i3, n5, i3.getMaster().findMTerm('a'))
odb.dbITerm.connect(i3, n6, i3.getMaster().findMTerm('b'))
odb.dbITerm.connect(i3, n7, i3.getMaster().findMTerm('o'))
P1 = odb.dbBPin_create(IN1)
P2 = odb.dbBPin_create(IN2)
P3 = odb.dbBPin_create(IN3)
P4 = odb.dbBPin_create(IN4)
P5 = odb.dbBPin_create(OUT)
return block
# +-----
# |in1 \
# out|
# |in2 /
# +-----
def createMaster2X1(lib, name, width, height, in1, in2, out):
master = odb.dbMaster_create(lib, name)
master.setWidth(width)
master.setHeight(height)
master.setType('CORE')
odb.dbMTerm.create(master, in1, 'INPUT')
odb.dbMTerm.create(master, in2, 'INPUT')
odb.dbMTerm.create(master, out, 'OUTPUT')
master.setFrozen()
return master
def createMaster3X1(lib, name, width, height, in1, in2, in3, out):
master = odb.dbMaster_create(lib, name)
master.setWidth(width)
master.setHeight(height)
master.setType('CORE')
odb.dbMTerm.create(master, in1, 'INPUT')
odb.dbMTerm.create(master, in2, 'INPUT')
odb.dbMTerm.create(master, in3, 'INPUT')
odb.dbMTerm.create(master, out, 'OUTPUT')
master.setFrozen()
return master
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.