code stringlengths 101 5.91M |
|---|
_materialize('core')
class Slice(UnaryOpBase):
in_dtypes = [(i,) for i in DTYPE_GEN_ALL]
INT_MAX = ((2 ** 63) - 1)
INT_MIN = (- (2 ** 63))
def __init__(self, start, end, step):
super().__init__()
self.inp_ranks = [rank_from(1)]
self.out_ranks = [rank_from(1)]
self.start = start
self.end = end
self.step = step
def __str__(self) -> str:
if ('axis' in self.extra_attrs):
tail = {'axis': self.extra_attrs['axis'], 'region': self.extra_attrs['region']}
else:
tail = {}
if isinstance(self.start, int):
tail['start'] = self.start
if isinstance(self.end, int):
tail['end'] = self.end
if isinstance(self.step, int):
tail['step'] = self.step
return ((self.name() + ' ') + str(tail).replace(':', '='))
def _get_attrs(self, ndims):
ConstraintCheck.true((ndims > 0))
if ('axis' not in self.extra_attrs):
self.extra_attrs['ndims'] = ndims
self.extra_attrs['axis'] = random.randint(0, (ndims - 1))
self.extra_attrs['region'] = random.choice(['left', 'mid', 'right'])
if (random.uniform(0, 1) < 0.1):
self.end = self.INT_MAX
return self.extra_attrs['axis']
def requires(self, input_shapes: List[AbsTensor]):
inp = input_shapes[0]
axis = self._get_attrs(inp.ndims)
reg = self.extra_attrs['region']
cons = []
dim_s = inp.shape[axis]
(l, r) = (0, nnsmith_sub(dim_s, 1))
(ll, rr) = (0, dim_s)
assert (not isinstance(self.start, int))
cons.append(z3.And(nnsmith_ge(self.start, l), nnsmith_le(self.start, r)))
if (not isinstance(self.end, int)):
cons.append(z3.And(nnsmith_ge(self.end, ll), nnsmith_le(self.end, rr)))
cons.append(nnsmith_gt(self.end, self.start))
else:
assert (self.end == self.INT_MAX)
cons.append(nnsmith_ge(self.step, 1))
cons.append(nnsmith_le(self.step, dim_s))
return cons
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
inp = input_shapes[0]
axis = self._get_attrs(inp.ndims)
s = list(inp.shape)
end = self.end
if (self.end == Slice.INT_MAX):
end = inp.shape[axis]
s[axis] = nnsmith_div(nnsmith_add(nnsmith_sub(end, self.start), nnsmith_sub(self.step, 1)), self.step)
return [AbsTensor(s, input_shapes[0].dtype)]
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
return [(out_abs_tensor[0].ndims, out_abs_tensor[0].dtype)] |
def train(model, device, train_loader, sm_loader, criterion, optimizer, epoch, args, writer):
num_class = 10
sa = np.zeros((num_class, (num_class - 1)), dtype=np.int32)
for i in range(sa.shape[0]):
for j in range(sa.shape[1]):
if (j < i):
sa[i][j] = j
else:
sa[i][j] = (j + 1)
sa = torch.LongTensor(sa)
batch_size = (args.batch_size * 2)
schedule_start = 0
num_steps_per_epoch = len(train_loader)
eps_scheduler = EpsilonScheduler('linear', args.schedule_start, (((args.schedule_start + args.schedule_length) - 1) * num_steps_per_epoch), args.starting_epsilon, args.epsilon, num_steps_per_epoch)
end_eps = eps_scheduler.get_eps((epoch + 1), 0)
start_eps = eps_scheduler.get_eps(epoch, 0)
print(' ->->->->->->->->->-> One epoch with CROWN-IBP ({:.6f}-{:.6f}) <-<-<-<-<-<-<-<-<-<-'.format(start_eps, end_eps))
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4f')
ibp_losses = AverageMeter('IBP_Loss', ':.4f')
top1 = AverageMeter('Acc_1', ':6.2f')
ibp_acc1 = AverageMeter('IBP1', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, ibp_losses, top1, ibp_acc1], prefix='Epoch: [{}]'.format(epoch))
model = BoundSequential.convert(model, {'same-slope': False, 'zero-lb': False, 'one-lb': False}).to(device)
model.train()
end = time.time()
dataloader = (train_loader if (sm_loader is None) else zip(train_loader, sm_loader))
for (i, data) in enumerate(dataloader):
if sm_loader:
(images, target) = (torch.cat([d[0] for d in data], 0).to(device), torch.cat([d[1] for d in data], 0).to(device))
else:
(images, target) = (data[0].to(device), data[1].to(device))
if (i == 0):
print(images.shape, target.shape, f'Batch_size from args: {args.batch_size}', 'lr: {:.5f}'.format(optimizer.param_groups[0]['lr']))
print(f'Training images range: {[torch.min(images), torch.max(images)]}')
output = model(images, method_opt='forward')
ce = nn.CrossEntropyLoss()(output, target)
eps = eps_scheduler.get_eps(epoch, i)
c = (torch.eye(num_class).type_as(images)[target].unsqueeze(1) - torch.eye(num_class).type_as(images).unsqueeze(0))
I = (~ (target.unsqueeze(1) == torch.arange(num_class).to(device).type_as(target).unsqueeze(0)))
c = c[I].view(images.size(0), (num_class - 1), num_class).to(device)
sa_labels = sa[target].to(device)
lb_s = torch.zeros(images.size(0), num_class).to(device)
ub_s = torch.zeros(images.size(0), num_class).to(device)
data_ub = torch.min((images + eps), images.max()).to(device)
data_lb = torch.max((images - eps), images.min()).to(device)
(ub, ilb, relu_activity, unstable, dead, alive) = model(norm=np.inf, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt='interval_range')
crown_final_beta = 0.0
beta = ((args.epsilon - (eps * (1.0 - crown_final_beta))) / args.epsilon)
if (beta < 1e-05):
lb = ilb
else:
(_, _, clb, bias) = model(norm=np.inf, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt='backward_range')
lb = ((clb * beta) + (ilb * (1 - beta)))
lb = lb_s.scatter(1, sa_labels, lb)
robust_ce = criterion((- lb), target)
racc = accuracy((- lb), target, topk=(1,))
loss = robust_ce
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0].item(), images.size(0))
losses.update(ce.item(), images.size(0))
ibp_losses.update(robust_ce.item(), images.size(0))
ibp_acc1.update(racc[0].item(), images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
progress.write_to_tensorboard(writer, 'train', ((epoch * len(train_loader)) + i))
if (i == 0):
writer.add_image('training-images', torchvision.utils.make_grid(images[0:(len(images) // 4)])) |
def _check_dt_is_sorted(df, dt_col):
import numpy as np
import warnings
df = df.copy()
try:
res = (np.diff(df[dt_col].values.astype(np.float32)) >= 0).all()
if (not res):
from bigdl.nano.utils.common import invalidInputError
invalidInputError(False, f'{dt_col} must be sorted.')
except (ValueError, TypeError):
warnings.warn(f'{dt_col} may not be sorted.', Warning) |
def test_ExponentialEps():
T = 5
eps_vals = np.logspace(np.log10(10), np.log10(5), T)
eps = abcpmc.ExponentialEps(T, eps_vals[0], eps_vals[(- 1)])
for (e1, e2) in zip(eps, eps_vals):
assert (e1 == e2)
assert (e1 == eps_vals[(- 1)]) |
_model
def ig_resnext101_32x8d(pretrained=True, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) |
def setup_training_loop_kwargs(gpus=None, snap=None, seed=None, data=None, video_balance=None, sg2_pkl=None, noise_mode=None, cfg=None, kimg=None, batch=None, optim=None, resume=None, allow_tf32=None, nobench=None, workers=None, suffix=None):
args = dnnlib.EasyDict()
if (gpus is None):
gpus = 1
assert isinstance(gpus, int)
if (not ((gpus >= 1) and ((gpus & (gpus - 1)) == 0))):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if (snap is None):
snap = 50
assert isinstance(snap, int)
if (snap < 1):
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
if (seed is None):
seed = 0
assert isinstance(seed, int)
args.random_seed = seed
assert (data is not None)
assert isinstance(data, str)
if (video_balance is None):
video_balance = False
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.PSPDataset', source_root=data, target_root=data, label_nc=0, transform_type='train', video_balance=video_balance)
args.test_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.PSPDataset', source_root=data, target_root=data, label_nc=0, transform_type='test', video_balance=video_balance)
args.train_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
args.test_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2, shuffle=False, drop_last=True)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs)
test_set = dnnlib.util.construct_class_by_name(**args.test_set_kwargs)
dataset_size = len(training_set.target_paths)
desc = training_set.name
del training_set
del test_set
except IOError as err:
raise UserError(f'--data: {err}')
if (cfg is None):
cfg = 'psp_paper'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {'psp_paper': dict(ref_gpus=8, kimg=4000, mb=64, mbstd=8, lrate=0.0001, id_lambda=0.1, l2_lambda=1, lpips_lambda=0.8), 'psp_auto': dict(ref_gpus=(- 1), kimg=4000, mb=(- 1), mbstd=8, lrate=0.0001, id_lambda=0.1, l2_lambda=1, lpips_lambda=0.8), 'psp_sky_auto': dict(ref_gpus=(- 1), kimg=4000, mb=(- 1), mbstd=8, lrate=0.0001, id_lambda=0, l2_lambda=1, lpips_lambda=0.8)}
assert (cfg in cfg_specs)
spec = dnnlib.EasyDict(cfg_specs[cfg])
if ('auto' in cfg):
spec.ref_gpus = args.num_gpus
spec.mb = (args.num_gpus * spec.mbstd)
assert (sg2_pkl is not None)
args.G_pkl = sg2_pkl
if (noise_mode is None):
noise_mode = 'const'
args.noise_mode = noise_mode
irse50 = psp_model_paths['ir_se50']
pSp_opts = dnnlib.EasyDict(output_size=256, encoder_type='BackboneEncoderUsingLastLayerIntoW', irse50=irse50, start_from_latent_avg=True, learn_in_w=True, input_nc=3)
args.PSP_kwargs = dnnlib.EasyDict(class_name='training.networks_psp.pSp', opts=pSp_opts)
lpips_kwargs = dnnlib.EasyDict(class_name='auxiliary.lpips.lpips.LPIPS', net_type='alex')
id_kwargs = dnnlib.EasyDict(class_name='auxiliary.id_loss.IDLoss')
args.auxiliary_kwargs = dnnlib.EasyDict(lpips_kwargs=lpips_kwargs, id_kwargs=id_kwargs)
if (optim is None):
optim = 'Ranger'
assert ((optim is not None) and (optim in ['Ranger', 'Adam']))
if (optim == 'Adam'):
args.PSP_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate)
else:
args.PSP_opt_kwargs = dnnlib.EasyDict(class_name='auxiliary.ranger.Ranger', lr=spec.lrate)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss_psp.pSpLoss', lpips_lambda=spec.lpips_lambda, l2_lambda=spec.l2_lambda, id_lambda=spec.id_lambda)
args.total_kimg = spec.kimg
args.batch_size = spec.mb
args.batch_gpu = (spec.mb // spec.ref_gpus)
if (kimg is not None):
assert isinstance(kimg, int)
if (not (kimg >= 1)):
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if (batch is not None):
assert isinstance(batch, int)
if (not ((batch >= 1) and ((batch % gpus) == 0))):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = (batch // gpus)
args.train_loader_kwargs.batch_size = args.test_loader_kwargs.batch_size = args.batch_gpu
assert ((resume is None) or isinstance(resume, str))
if (resume is None):
resume = 'noresume'
elif (resume == 'noresume'):
desc += '-noresume'
else:
desc += '-resumecustom'
args.resume_pkl = resume
if (nobench is None):
nobench = False
assert isinstance(nobench, bool)
if nobench:
args.cudnn_benchmark = False
if (allow_tf32 is None):
allow_tf32 = False
assert isinstance(allow_tf32, bool)
if allow_tf32:
args.allow_tf32 = True
if (workers is not None):
assert isinstance(workers, int)
if (not (workers >= 1)):
raise UserError('--workers must be at least 1')
args.train_loader_kwargs.num_workers = workers
args.test_loader_kwargs.num_workers = workers
if (suffix is None):
suffix = ''
desc += f'_{suffix}'
return (desc, args, dataset_size) |
class TestQKVLinear(unittest.TestCase):
device_dtype_combine = [('cpu', torch.float32), ('cuda', torch.float32), ('cuda', torch.float16)]
def setUp(self) -> None:
torch.manual_seed(1241)
return super().setUp()
def test_qkv_fused(self):
for (device, dtype) in self.device_dtype_combine:
with self.subTest('test qkv fused', dtypes=dtype, device=device):
unittest.skipIf(((not torch.cuda.is_available()) and (device == 'cuda')), 'skip cuda test')(self.__test_qkv_fused)(dtype, device)
if (device == 'cuda'):
with self.subTest('test amp fused', dtypes=dtype, device=device):
unittest.skipIf(((not torch.cuda.is_available()) and (device == 'cuda')), 'skip cuda test')(self.__test_amp_fused)(dtype, device=device)
with self.subTest('test 2d', dtypes=dtype, device=device):
unittest.skipIf(((not torch.cuda.is_available()) and (device == 'cuda')), 'skip cuda test')(self.__test_amp_2d)(dtype, device=device)
def __test_qkv_fused(self, dtype, device):
(bs, seq_length, embed_dim) = (10, 500, 128)
x = torch.randn(bs, seq_length, embed_dim, dtype=dtype, device=device)
(wq, wk, wv) = (torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim))
wq.to(device).to(dtype)
wk.to(device).to(dtype)
wv.to(device).to(dtype)
q_origin = wq(x)
k_origin = wk(x)
v_origin = wv(x)
concat_weight = torch.cat([wq.weight.data, wk.weight, wv.weight], dim=0)
wqkv = torch.nn.Linear(embed_dim, (embed_dim * 3))
wqkv.to(device).to(dtype)
wqkv.weight.data.copy_(concat_weight)
concat_bias = torch.cat([wq.bias.data, wk.bias, wv.bias], dim=0)
wqkv.bias.data.copy_(concat_bias)
qkv = wqkv(x)
(q, k, v) = torch.split(qkv, embed_dim, dim=(- 1))
assert_close(q, q_origin)
assert_close(k, k_origin)
assert_close(v, v_origin)
def __test_amp_fused(self, dtype, device):
(bs, seq_length, embed_dim) = (10, 500, 128)
x = torch.randn(bs, seq_length, embed_dim, device=device, dtype=dtype)
wqkv = torch.nn.Linear(embed_dim, (embed_dim * 3))
wqkv.to(device)
fused_linear = AmpFusedDense(wqkv.in_features, wqkv.out_features, bias=True)
fused_linear.to(device)
fused_linear.weight.data.copy_(wqkv.weight)
fused_linear.bias.data.copy_(wqkv.bias)
with torch.cuda.amp.autocast(dtype=dtype):
qkv = wqkv(x)
out = fused_linear(x)
assert_close(qkv, out)
def __test_amp_2d(self, dtype, device):
(bs, seq_length, embed_dim) = (10, 500, 128)
x = torch.randn((bs * seq_length), embed_dim, device=device, dtype=dtype)
wqkv = torch.nn.Linear(embed_dim, (embed_dim * 3))
wqkv.to(device)
fused_linear = AmpFusedDense(wqkv.in_features, wqkv.out_features, bias=True)
fused_linear.to(device)
fused_linear.weight.data.copy_(wqkv.weight)
fused_linear.bias.data.copy_(wqkv.bias)
with torch.cuda.amp.autocast(dtype=dtype):
qkv = wqkv(x)
out = fused_linear(x)
assert_close(qkv, out) |
def send_message(text):
if NOTIFY:
body = (('News from ' + socket.gethostname()) + ': \n')
body += text
updater.bot.send_message(chat_id=CHAT_ID, text=body) |
class _UpProjection(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_UpProjection, self).__init__()
self.conv1 = nn.Conv2d(num_input_features, num_output_features, kernel_size=5, stride=1, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(num_output_features)
self.relu = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(num_output_features, num_output_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_2 = nn.BatchNorm2d(num_output_features)
self.conv2 = nn.Conv2d(num_input_features, num_output_features, kernel_size=5, stride=1, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(num_output_features)
def forward(self, x, size):
x = F.upsample(x, size=size, mode='bilinear')
x_conv1 = self.relu(self.bn1(self.conv1(x)))
bran1 = self.bn1_2(self.conv1_2(x_conv1))
bran2 = self.bn2(self.conv2(x))
out = self.relu((bran1 + bran2))
return out |
def setup(opt):
if (opt.caption_model == 'fc'):
model = FCModel(opt)
elif (opt.caption_model == 'language_model'):
model = LMModel(opt)
elif (opt.caption_model == 'newfc'):
model = NewFCModel(opt)
elif (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'att2in'):
model = Att2inModel(opt)
elif (opt.caption_model == 'att2in2'):
model = Att2in2Model(opt)
elif (opt.caption_model == 'att2all2'):
model = Att2all2Model(opt)
elif (opt.caption_model == 'adaatt'):
model = AdaAttModel(opt)
elif (opt.caption_model == 'adaattmo'):
model = AdaAttMOModel(opt)
elif (opt.caption_model == 'topdown'):
model = TopDownModel(opt)
elif (opt.caption_model == 'stackatt'):
model = StackAttModel(opt)
elif (opt.caption_model == 'denseatt'):
model = DenseAttModel(opt)
elif (opt.caption_model == 'transformer'):
model = TransformerModel(opt)
elif (opt.caption_model == 'sat'):
model = SAT(opt)
else:
raise Exception('Caption model not supported: {}'.format(opt.caption_model))
if (vars(opt).get('start_from', None) is not None):
assert os.path.isdir(opt.start_from), (' %s must be a a path' % opt.start_from)
assert os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl'))), ('infos.pkl file does not exist in path %s' % opt.start_from)
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
return model |
.parametrize('hidden_dims, static_dim, static_in_all_layers', [([10, 5, 20], False, False), ([10, 100, 10], True, True), ([10, 50, 30, 40], True, False)])
def test_stacked(hidden_dims, static_dim, static_in_all_layers):
input_dim = 50
static_dim = 5
(data, labels) = make_time_series_problem(n_channels=input_dim, static_dim=static_dim)
model = ncde.StackedNeuralCDE(input_dim=input_dim, hidden_dims=hidden_dims, static_dim=static_dim, output_dim=1, static_in_all_layers=static_in_all_layers)
out = model(data)
assert (~ torch.any(torch.isnan(out))) |
def IEMOCAPUnbalanced_train(sample):
if sample:
return {'class_balance': (lambda r: True), 'lr': (lambda r: (10 ** r.uniform((- 3), (- 5)))), 'weight_decay': (lambda r: 0.0), 'batch_size': (lambda r: int((2 ** r.uniform(4, 6))))}
else:
return {'class_balance': (lambda r: True), 'lr': (lambda r: 0.0001), 'weight_decay': (lambda r: 1e-05), 'batch_size': (lambda r: 30)} |
def check_current_planes(realfen, planes):
cur = planes[0:12]
assert (cur.shape == (12, 8, 8))
fakefen = (['1'] * 64)
for i in range(12):
for rank in range(8):
for file in range(8):
if (cur[i][rank][file] == 1):
assert (fakefen[((rank * 8) + file)] == '1')
fakefen[((rank * 8) + file)] = pieces_order[i]
castling = planes[12:16]
fiftymove = planes[16][0][0]
ep = planes[17]
castlingstring = ''
for i in range(4):
if (castling[i][0][0] == 1):
castlingstring += castling_order[i]
if (len(castlingstring) == 0):
castlingstring = '-'
epstr = '-'
for rank in range(8):
for file in range(8):
if (ep[rank][file] == 1):
epstr = coord_to_alg((rank, file))
realfen = maybe_flip_fen(realfen, flip=is_black_turn(realfen))
realparts = realfen.split(' ')
assert (realparts[1] == 'w')
assert (realparts[2] == castlingstring)
assert (realparts[3] == epstr)
assert (int(realparts[4]) == fiftymove)
return (''.join(fakefen) == replace_tags_board(realfen)) |
def redirect(graph, node1, node2):
if (not isinstance(node1, Node)):
node1 = graph[node1]
if (not isinstance(node2, Node)):
node2 = graph[node2]
for e in graph.edges:
if (node1 in (e.node1, e.node2)):
if ((e.node1 == node1) and (e.node2 != node2)):
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if ((e.node2 == node1) and (e.node1 != node2)):
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1) |
class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
def __init__(self, *args, **kwargs):
super(QuantizableInvertedResidual, self).__init__(*args, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x):
if (self.stride == 1):
(x1, x2) = x.chunk(2, dim=1)
out = self.cat.cat((x1, self.branch2(x2)), dim=1)
else:
out = self.cat.cat((self.branch1(x), self.branch2(x)), dim=1)
out = shufflenetv2.channel_shuffle(out, 2)
return out |
def _check_col_within(df, col_name):
from bigdl.nano.utils.common import invalidInputError
invalidInputError((col_name in df.columns), f'{col_name} is expected in dataframe while not found') |
def reconstruction_error(S1, S2, reduction='mean'):
S1_hat = compute_similarity_transform_batch(S1, S2)
re = np.sqrt(((S1_hat - S2) ** 2).sum(axis=(- 1))).mean(axis=(- 1))
if (reduction == 'mean'):
re = re.mean()
elif (reduction == 'sum'):
re = re.sum()
return re |
class LookupValidation():
data: ValidationResult
def __init__(self):
self.data = ValidationResult()
def has_error(self, name: str) -> bool:
return (name in self.data.errors)
def get_error_count(self) -> int:
return self.data.error_count
def set_error_status(self) -> None:
self.data.status = f'Found {self.data.error_count} errors'
def add_error(self, name: str, error: str) -> None:
if (not self.has_error(name)):
self.data.errors[name] = [error]
self.data.error_count = (self.data.error_count + 1)
return
self.data.errors[name].append(error)
self.data.error_count = (self.data.error_count + 1)
def to_dict(self):
return self.data.dict() |
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
self.c = None |
def custom_draw_geometry_with_camera_trajectory(pcd, render_option_path, camera_trajectory_path):
custom_draw_geometry_with_camera_trajectory.index = (- 1)
custom_draw_geometry_with_camera_trajectory.trajectory = o3d.io.read_pinhole_camera_trajectory(camera_trajectory_path)
custom_draw_geometry_with_camera_trajectory.vis = o3d.visualization.Visualizer()
image_path = os.path.join(test_data_path, 'image')
if (not os.path.exists(image_path)):
os.makedirs(image_path)
depth_path = os.path.join(test_data_path, 'depth')
if (not os.path.exists(depth_path)):
os.makedirs(depth_path)
def move_forward(vis):
ctr = vis.get_view_control()
glb = custom_draw_geometry_with_camera_trajectory
if (glb.index >= 0):
print('Capture image {:05d}'.format(glb.index))
depth = vis.capture_depth_float_buffer(False)
image = vis.capture_screen_float_buffer(False)
plt.imsave(os.path.join(depth_path, '{:05d}.png'.format(glb.index)), np.asarray(depth), dpi=1)
plt.imsave(os.path.join(image_path, '{:05d}.png'.format(glb.index)), np.asarray(image), dpi=1)
glb.index = (glb.index + 1)
if (glb.index < len(glb.trajectory.parameters)):
ctr.convert_from_pinhole_camera_parameters(glb.trajectory.parameters[glb.index], allow_arbitrary=True)
else:
custom_draw_geometry_with_camera_trajectory.vis.register_animation_callback(None)
return False
vis = custom_draw_geometry_with_camera_trajectory.vis
vis.create_window()
vis.add_geometry(pcd)
vis.get_render_option().load_from_json(render_option_path)
vis.register_animation_callback(move_forward)
vis.run()
vis.destroy_window() |
def test_is_terminated(phantom_env):
phantom_env._terminations = set()
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A'])
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A', 'B'])
assert phantom_env.is_terminated() |
class OutputTransition(nn.Module):
def __init__(self, inChans, elu, nll):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2)
self.bn1 = ContBatchNorm3d(2)
self.conv2 = nn.Conv3d(2, 2, kernel_size=1)
self.relu1 = ELUCons(elu, 2)
if nll:
self.softmax = F.log_softmax
else:
self.softmax = F.softmax
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.conv2(out)
out = out.permute(0, 2, 3, 4, 1).contiguous()
out = out.view((out.numel() // 2), 2)
out = self.softmax(out)
return out |
def evaluate(model, data_in, data_out, metrics, samples_perc_per_epoch=1, batch_size=500):
metrics = deepcopy(metrics)
model.eval()
for m in metrics:
m['score'] = []
for batch in generate(batch_size=batch_size, device=device, data_in=data_in, data_out=data_out, samples_perc_per_epoch=samples_perc_per_epoch):
ratings_in = batch.get_ratings_to_dev()
ratings_out = batch.get_ratings(is_out=True)
ratings_pred = model(ratings_in, calculate_loss=False).cpu().detach().numpy()
if (not (data_in is data_out)):
ratings_pred[batch.get_ratings().nonzero()] = (- np.inf)
for m in metrics:
m['score'].append(m['metric'](ratings_pred, ratings_out, k=m['k']))
for m in metrics:
m['score'] = np.concatenate(m['score']).mean()
return [x['score'] for x in metrics] |
def demo_basic(local_world_size, local_rank):
init_seed((1 + local_rank))
torch.cuda.set_device(local_rank)
device = torch.device('cuda:0')
loader = FB15KLoader(dataset_path='../../dataset', download=True)
(train_data, valid_data, test_data) = loader.load_all_data()
(node_lut, relation_lut) = loader.load_all_lut()
processor = FB15KProcessor(node_lut, relation_lut, reprocess=True)
train_dataset = processor.process(train_data)
valid_dataset = processor.process(valid_data)
test_dataset = processor.process(test_data)
(node_lut, relation_lut) = processor.process_lut()
train_sampler = DistributedSampler(train_dataset)
valid_sampler = DistributedSampler(valid_dataset)
test_sampler = DistributedSampler(test_dataset)
model = ComplEx(entity_dict_len=len(node_lut), relation_dict_len=len(relation_lut), embedding_dim=50, penalty_weight=0.1)
loss = NegLogLikehoodLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0)
metric = Link_Prediction(link_prediction_raw=True, link_prediction_filt=False, batch_size=50000, reverse=True)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=3, threshold_mode='abs', threshold=5, factor=0.5, min_lr=1e-09, verbose=True)
negative_sampler = UnifNegativeSampler(triples=train_dataset, entity_dict_len=len(node_lut), relation_dict_len=len(relation_lut))
trainer = Trainer(train_dataset=train_dataset, valid_dataset=valid_dataset, test_dataset=test_dataset, train_sampler=train_sampler, valid_sampler=valid_sampler, test_sampler=test_sampler, model=model, loss=loss, optimizer=optimizer, negative_sampler=negative_sampler, device=device, output_path='../../dataset', lookuptable_E=node_lut, lookuptable_R=relation_lut, metric=metric, trainer_batch_size=256, total_epoch=1000, lr_scheduler=lr_scheduler, apex=True, dataloaderX=True, num_workers=4, pin_memory=True, use_tensorboard_epoch=100, use_matplotlib_epoch=100, use_savemodel_epoch=100, use_metric_epoch=50, rank=local_rank)
dist.barrier()
trainer.train() |
def distributed_init(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if (not cfg.common.tpu):
if (torch.distributed.is_available() and torch.distributed.is_initialized()):
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(cfg.distributed_training.distributed_rank, cfg.distributed_training.distributed_init_method))
dist.init_process_group(backend=cfg.distributed_training.distributed_backend, init_method=cfg.distributed_training.distributed_init_method, world_size=cfg.distributed_training.distributed_world_size, rank=cfg.distributed_training.distributed_rank)
logger.info('initialized host {} as rank {}'.format(socket.gethostname(), cfg.distributed_training.distributed_rank))
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
else:
assert (xm.xrt_world_size() == cfg.distributed_training.distributed_world_size)
global _USE_XLA
_USE_XLA = True
cfg.distributed_training.device_id = xm.get_local_ordinal()
cfg.distributed_training.distributed_rank = xm.get_ordinal()
xm.rendezvous('distributed_init')
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if (cfg.common.model_parallel_size > 1):
try:
from fairseq.model_parallel.megatron.mpu import initialize_model_parallel, model_parallel_cuda_manual_seed
except ImportError:
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += '-model_part-{0}'.format(model_part_number)
if (getattr(cfg.model, 'base_layers', 0) > 0):
cfg.checkpoint.checkpoint_suffix = f'-rank-{cfg.distributed_training.distributed_rank}'
return cfg.distributed_training.distributed_rank |
class OSSPath():
__slots__ = ('_client', 'bucket', '_key_parts')
def __new__(cls, s3url: Optional[str]=None, endpoint_url=OSS_ENDPOINT):
_client = boto3.client('s3', endpoint_url=endpoint_url)
(bucket, parts) = cls._parse_s3url(s3url)
return cls._create(_client, bucket, parts)
def _parse_s3url(cls, s3url: Optional[str]=None):
if (s3url is None):
return ('', ())
if (not s3url.startswith('s3://')):
raise ValueError("s3url must be formated as 's3://<bucket_name>/path/to/object'")
r = urlparse(s3url)
assert (r.scheme == 's3')
key = r.path.lstrip('/')
parts = PosixPath(key).parts
return (r.netloc, parts)
def _create(cls, client, bucket: str, key_parts: Tuple[str]):
assert isinstance(key_parts, tuple)
self = object.__new__(cls)
self._client = client
self.bucket = bucket
self._key_parts = key_parts
return self
def key(self) -> str:
return '/'.join(self._key_parts)
def parent(self):
if (not len(self._key_parts)):
return self
return self._create(self._client, self.bucket, self._key_parts[:(- 1)])
def root(self):
return self._create(self._client, self.bucket, key_parts=())
def name(self):
if (len(self._key_parts) < 1):
return ''
return self._key_parts[(- 1)]
def suffix(self):
name = self.name
i = name.rfind('.')
if (0 < i < (len(name) - 1)):
return name[i:]
else:
return ''
def suffixes(self):
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return [('.' + suffix) for suffix in name.split('.')[1:]]
def stem(self):
name = self.name
i = name.rfind('.')
if (0 < i < (len(name) - 1)):
return name[:i]
else:
return name
def parts(self):
return self._key_parts
def __str__(self) -> str:
return 's3://{}/{}'.format(self.bucket, self.key)
def __eq__(self, other):
if (not isinstance(other, OSSPath)):
return False
return ((self.bucket == other.bucket) and (self.key == other.key))
def __hash__(self):
return hash(str(self))
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, str(self))
def __lt__(self, other):
if (not isinstance(other, OSSPath)):
raise NotImplementedError()
return (str(self) < str(other))
def __le__(self, other):
if (not isinstance(other, OSSPath)):
raise NotImplementedError()
return (str(self) <= str(other))
def __gt__(self, other):
if (not isinstance(other, OSSPath)):
raise NotImplementedError()
return (str(self) > str(other))
def __ge__(self, other):
if (not isinstance(other, OSSPath)):
raise NotImplementedError()
return (str(self) >= str(other))
def with_name(self, name):
if (not self.name):
raise ValueError(('%r has an empty name' % (self,)))
r = urlparse(name)
if (not (((r.scheme == '') and (r.netloc == '')) or ('/' in name))):
raise ValueError(('invalid name %r' % name))
return self._create(self._client, self.bucket, (self._key_parts[:(- 1)] + (name,)))
def with_suffix(self, suffix):
if ('/' in suffix):
raise ValueError(('Invalid suffix %r' % (suffix,)))
if ((suffix and (not suffix.startswith('.'))) or (suffix == '.')):
raise ValueError(('Invalid suffix %r' % suffix))
name = self.name
if (not name):
raise ValueError(('%r has an empty name' % (self,)))
old_suffix = self.suffix
if (not old_suffix):
name = (name + suffix)
else:
name = (name[:(- len(old_suffix))] + suffix)
return self._create(self._client, self.bucket, (self._key_parts[:(- 1)] + (name,)))
def with_bucket(self, bucket):
if (not isinstance(bucket, str)):
raise ValueError('bucket be string')
bucket = bucket.strip('/')
if (not bucket):
raise ValueError('bucket must not be empty')
if ('/' in bucket):
raise ValueError("bucket_name must not contain '/'")
return self._create(self._client, bucket, self._key_parts)
def _make_child(self, args: Iterable[str]):
if (not self.bucket):
(bucket, *rest_args) = args
bucket = bucket.lstrip('/')
(bucket, *rest_parts) = PosixPath(bucket).parts
return self.with_bucket(bucket)._make_child((rest_parts + rest_args))
parts = [p for p in self._key_parts]
for item in args:
if (not isinstance(item, str)):
raise ValueError('child must be string')
item = item.lstrip('/')
if (not item):
raise ValueError('child must not be empty')
for p in PosixPath(item).parts:
parts.append(p)
return self._create(self._client, self.bucket, tuple(parts))
def joinpath(self, *args):
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
raise NotImplemented
def is_dir(self):
if (not self.bucket):
return False
if (not self.key):
try:
self._client.head_bucket(Bucket=self.bucket)
return True
except ClientError as e:
if (e.response['Error']['Code'] == '404'):
return False
prefix = self.key
if (prefix[(- 1)] != '/'):
prefix = (prefix + '/')
resp = self._client.list_objects(Bucket=self.bucket, Delimiter='/', Prefix=prefix)
return (('CommonPrefixes' in resp) or ('Contents' in resp))
def is_file(self):
if (not self.bucket):
return False
if (not self.key):
return False
try:
self._client.head_object(Bucket=self.bucket, Key=self.key)
return True
except ClientError as e:
if (e.response['Error']['Code'] == '404'):
return False
def exists(self):
if (not self.bucket):
return False
if self.is_dir():
return True
elif self.is_file():
return True
return False
def get_size(self):
if (not self.bucket):
return (- 1)
if self.is_dir():
return 0
if (not self.is_file()):
return (- 1)
key = self.key.lstrip('/')
return self._client.head_object(Bucket=self.bucket, Key=key)['ContentLength']
def list_all(self, batch_size=1000):
if (not self.is_dir()):
return
if (batch_size > 1000):
print('At most 1000 keys can be operated at once. Clipping batch_size to 1000.')
batch_size = 1000
prefix = self.key
if (prefix[(- 1)] != '/'):
prefix = (prefix + '/')
marker = None
while True:
request = dict(Bucket=self.bucket, Delimiter='', Prefix=prefix, MaxKeys=batch_size)
if marker:
request['Marker'] = marker
resp = self._client.list_objects(**request)
for p in resp.get('Contents', []):
(yield (self.root / p['Key']))
if (not resp['IsTruncated']):
break
print('More than {} objects are found under {}, you should avoid putting too many small objects!'.format(batch_size, self))
marker = resp['NextMarker']
def walk(self, topdown=True, recursive=True, batch_size=1000):
if (not self.is_dir()):
return
if (batch_size > 1000):
print('At most 1000 keys can be operated at once. Clipping batch_size to 1000.')
batch_size = 1000
prefix = self.key
if (prefix[(- 1)] != '/'):
prefix = (prefix + '/')
(dirs, files) = ([], [])
marker = None
while True:
request = dict(Bucket=self.bucket, Delimiter='/', Prefix=prefix, MaxKeys=batch_size)
if marker:
request['Marker'] = marker
resp = self._client.list_objects(**request)
dirs += [(self.root / p['Prefix']) for p in resp.get('CommonPrefixes', [])]
files += [(self.root / p['Key']) for p in resp.get('Contents', [])]
if (not resp['IsTruncated']):
break
print('More than {} objects are found under {}, you should avoid putting too many small objects!'.format(batch_size, self))
marker = resp['NextMarker']
if topdown:
(yield (self, dirs, files))
if recursive:
for subdir in dirs:
(yield from subdir.walk(recursive=True, topdown=topdown, batch_size=batch_size))
if (not topdown):
(yield (self, dirs, files))
def iterdir(self, batch_size=1000):
for (root, dirs, files) in self.walk(batch_size=batch_size, recursive=False):
(yield from dirs)
(yield from files)
def download(self, encoding=None) -> Optional[io.IOBase]:
if (not self.is_file()):
raise FileNotFoundError('{!r} is not an existing object.'.format(self))
r = self._client.get_object(Bucket=self.bucket, Key=self.key)
b = r['Body']
if (encoding is not None):
b = codecs.getreader(encoding)(b)
return b
def put(self, bytes_or_file) -> bool:
if ((not self.bucket) or (not self.key)):
raise ValueError('Invalid path to put object: {!r}'.format(self))
if self.key.endswith('/'):
raise ValueError('Object key cannot endswith "/": {}'.format(self.key))
r = self._client.put_object(Body=bytes_or_file, Bucket=self.bucket, Key=self.key)
return (r['ResponseMetadata']['HTTPStatusCode'] == 200)
def delete(self) -> bool:
if (not self.is_file()):
return True
r = self._client.delete_object(Bucket=self.bucket, Key=self.key)
return (r['ResponseMetadata']['HTTPStatusCode'] == 204)
def rmtree(self, batch_size=1000) -> List[str]:
if (not self.is_dir()):
if self.is_file():
raise ValueError('{!r} is not a directory'.format(self))
return True
if (batch_size > 1000):
print('At most 1000 keys can be operated at once. Clipping batch_size to 1000.')
batch_size = 1000
prefix = self.key
if (prefix[(- 1)] != '/'):
prefix = (prefix + '/')
ret = []
while True:
lr = self._client.list_objects(Bucket=self.bucket, Delimiter='', Prefix=prefix, MaxKeys=batch_size)
dr = self._client.delete_objects(Bucket=self.bucket, Delete={'Objects': [{'Key': i['Key']} for i in lr.get('Contents', [])]})
for i in dr['Deleted']:
ret.append('s3://{}/{}'.format(self.bucket, i['Key']))
if (not lr['IsTruncated']):
break
print('More than {} objects are found under {}, you should avoid putting too many small objects!'.format(batch_size, self))
return ret |
class OrderedSet(OrderedDict, MutableSet):
def update(self, *args, **kwargs):
if kwargs:
raise TypeError('update() takes no keyword arguments')
for s in args:
for e in s:
self.add(e)
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
def __le__(self, other):
return all(((e in other) for e in self))
def __lt__(self, other):
return ((self <= other) and (self != other))
def __ge__(self, other):
return all(((e in self) for e in other))
def __gt__(self, other):
return ((self >= other) and (self != other))
def __repr__(self):
return ('OrderedSet([%s])' % ', '.join(map(repr, self.keys())))
def __str__(self):
return ('{%s}' % ', '.join(map(repr, self.keys())))
difference = property((lambda self: self.__sub__))
difference_update = property((lambda self: self.__isub__))
intersection = property((lambda self: self.__and__))
intersection_update = property((lambda self: self.__iand__))
issubset = property((lambda self: self.__le__))
issuperset = property((lambda self: self.__ge__))
symmetric_difference = property((lambda self: self.__xor__))
symmetric_difference_update = property((lambda self: self.__ixor__))
union = property((lambda self: self.__or__)) |
class StochasticBottleneck(nn.Module):
def __init__(self, m, stochastic_depth_p=0.2, stochastic_depth_mode='row'):
super(StochasticBottleneck, self).__init__()
self.m = m
self.sd = StochasticDepth(stochastic_depth_p, mode=stochastic_depth_mode)
def forward(self, x):
identity = x
out = self.m.conv1(x)
out = self.m.bn1(out)
out = self.m.relu(out)
out = self.m.conv2(out)
out = self.m.bn2(out)
out = self.m.relu(out)
out = self.m.conv3(out)
out = self.m.bn3(out)
out = self.sd(out)
if (self.m.downsample is not None):
identity = self.m.downsample(x)
out += identity
out = self.m.relu(out)
return out |
class DumpBeams(InferenceTask):
def __init__(self, params):
super(DumpBeams, self).__init__(params)
self._beam_accum = {'predicted_ids': [], 'beam_parent_ids': [], 'scores': [], 'log_probs': []}
if (not self.params['file']):
raise ValueError('Must specify file for DumpBeams')
def default_params():
params = {}
params.update({'file': ''})
return params
def before_run(self, _run_context):
fetches = {}
fetches['beam_search_output.predicted_ids'] = self._predictions['beam_search_output.predicted_ids']
fetches['beam_search_output.beam_parent_ids'] = self._predictions['beam_search_output.beam_parent_ids']
fetches['beam_search_output.scores'] = self._predictions['beam_search_output.scores']
fetches['beam_search_output.log_probs'] = self._predictions['beam_search_output.log_probs']
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
self._beam_accum['predicted_ids'].append(fetches['beam_search_output.predicted_ids'])
self._beam_accum['beam_parent_ids'].append(fetches['beam_search_output.beam_parent_ids'])
self._beam_accum['scores'].append(fetches['beam_search_output.scores'])
self._beam_accum['log_probs'].append(fetches['beam_search_output.log_probs'])
def end(self, _session):
np.savez(self.params['file'], **self._beam_accum) |
class Params():
def __init__(self, path):
assert os.path.exists(path), 'Cannot find configuration file: {}'.format(path)
self.path = path
config = configparser.ConfigParser()
config.read(self.path)
params = config['DEFAULT']
self.issia_path = params.get('issia_path', None)
if (self.issia_path is not None):
temp = params.get('issia_train_cameras', '1, 2, 3, 4')
self.issia_train_cameras = [int(e) for e in temp.split(',')]
temp = params.get('issia_val_cameras', '5, 6')
self.issia_val_cameras = [int(e) for e in temp.split(',')]
self.spd_path = params.get('spd_path', None)
if (self.spd_path is not None):
temp = params.get('spd_set', '1, 2')
self.spd_set = [int(e) for e in temp.split(',')]
self.num_workers = params.getint('num_workers', 0)
self.batch_size = params.getint('batch_size', 4)
self.epochs = params.getint('epochs', 20)
self.lr = params.getfloat('lr', 0.001)
self.model = params.get('model', 'fb1')
self.model_name = 'model_{}_{}'.format(self.model, get_datetime())
self._check_params()
def _check_params(self):
assert os.path.exists(self.issia_path), 'Cannot access ISSIA CNR dataset: {}'.format(self.issia_path)
assert os.path.exists(self.spd_path), 'Cannot access SoccerPlayerDetection_bmvc17 dataset: {}'.format(self.spd_path)
for c in self.issia_train_cameras:
assert (1 <= c <= 6), 'ISSIA CNR camera number must be between 1 and 6. Is: {}'.format(c)
for c in self.issia_val_cameras:
assert (1 <= c <= 6), 'ISSIA CNR camera number must be between 1 and 6. Is: {}'.format(c)
for c in self.spd_set:
assert ((c == 1) or (c == 2)), 'SPD dataset number must be 1 or 2. Is: {}'.format(c)
def print(self):
print('Parameters:')
param_dict = vars(self)
for e in param_dict:
print('{}: {}'.format(e, param_dict[e]))
print('') |
def define_D(input_nc, ndf, use_sigmoid=True, gpu_ids=None):
if (gpu_ids is None):
gpu_ids = []
use_gpu = (len(gpu_ids) > 0)
if use_gpu:
assert torch.cuda.is_available()
netD = Discriminator(in_channels=7, use_sigmoid=True)
if use_gpu:
netD.cuda(gpu_ids[0])
return netD |
def simxSetUISlider(clientID, uiHandle, uiButtonID, position, operationMode):
return c_SetUISlider(clientID, uiHandle, uiButtonID, position, operationMode) |
_grad()
def get_predictions(p, dataloader, model, return_features=False):
model.eval()
predictions = [[] for _ in range(p['num_heads'])]
probs = [[] for _ in range(p['num_heads'])]
targets = []
if return_features:
ft_dim = get_feature_dimensions_backbone(p)
features = torch.zeros((len(dataloader.sampler), ft_dim)).cuda()
if isinstance(dataloader.dataset, NeighborsDataset):
key_ = 'anchor'
include_neighbors = True
neighbors = []
else:
key_ = 'image'
include_neighbors = False
ptr = 0
for (batch, _) in dataloader:
images = batch[key_].cuda(non_blocking=True)
bs = images.shape[0]
res = model(images, forward_pass='return_all')
output = res['output']
if return_features:
features[ptr:(ptr + bs)] = res['features']
ptr += bs
for (i, output_i) in enumerate(output):
predictions[i].append(torch.argmax(output_i, dim=1))
probs[i].append(F.softmax(output_i, dim=1))
targets.append(batch['target'])
if include_neighbors:
neighbors.append(batch['possible_neighbors'])
predictions = [torch.cat(pred_, dim=0).cpu() for pred_ in predictions]
probs = [torch.cat(prob_, dim=0).cpu() for prob_ in probs]
targets = torch.cat(targets, dim=0)
if include_neighbors:
neighbors = torch.cat(neighbors, dim=0)
out = [{'predictions': pred_, 'probabilities': prob_, 'targets': targets, 'neighbors': neighbors} for (pred_, prob_) in zip(predictions, probs)]
else:
out = [{'predictions': pred_, 'probabilities': prob_, 'targets': targets} for (pred_, prob_) in zip(predictions, probs)]
if return_features:
return (out, features)
else:
return out |
class Data_MIONet_Cartesian(Data):
def __init__(self, X_train=None, y_train=None, X_test=None, y_test=None):
super(Data_MIONet_Cartesian, self).__init__(X_train, y_train, X_test, y_test)
def get_batch(self, batch_size):
_elementwise
def batch_mask(X, num):
return np.random.choice(X.size(0), num, replace=False)
_elementwise
def batch(X, mask):
return X[mask]
mask = batch_mask(self.y_train, batch_size)
return ((*batch(self.X_train[:(- 1)], mask), self.X_train[(- 1)]), batch(self.y_train, mask)) |
def parse_range(range_str):
param = map(float, range_str.split(','))
return np.arange(*param) |
def get_default_config_with_chosen_model(model_type, use_det_resnet=None, determinant_fn_mode=None, explicit_antisym_subtype=None, use_products_covariance=None):
config = default_config.get_default_config()
config.model.type = model_type
if (use_det_resnet is not None):
config.model.ferminet.use_det_resnet = use_det_resnet
config.model.embedded_particle_ferminet.use_det_resnet = use_det_resnet
if (determinant_fn_mode is not None):
config.model.ferminet.determinant_fn_mode = determinant_fn_mode
config.model.embedded_particle_ferminet.determinant_fn_mode = determinant_fn_mode
if (explicit_antisym_subtype is not None):
config.model.explicit_antisym.antisym_type = explicit_antisym_subtype
if (use_products_covariance is not None):
config.model.orbital_cofactor_net.use_products_covariance = use_products_covariance
config.model.per_particle_dets_net.use_products_covariance = use_products_covariance
config.model = default_config.choose_model_type_in_model_config(config.model)
return config |
class MobileNetV3RCNN(MobileNetV3):
def __init__(self, scale=1.0, model_name='large', conv_decay=0.0, norm_type='bn', norm_decay=0.0, freeze_norm=True, feature_maps=[2, 3, 4, 5], lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0]):
super(MobileNetV3RCNN, self).__init__(scale=scale, model_name=model_name, conv_decay=conv_decay, norm_type=norm_type, norm_decay=norm_decay, lr_mult_list=lr_mult_list, feature_maps=feature_maps)
self.curr_stage = 0
self.block_stride = 1
def _residual_unit(self, input, num_in_filter, num_mid_filter, num_out_filter, stride, filter_size, act=None, use_se=False, name=None):
input_data = input
conv0 = self._conv_bn_layer(input=input, filter_size=1, num_filters=num_mid_filter, stride=1, padding=0, if_act=True, act=act, name=(name + '_expand'))
feature_level = int(np.log2(self.block_stride))
if ((feature_level in self.feature_maps) and (stride == 2)):
self.end_points.append(conv0)
conv1 = self._conv_bn_layer(input=conv0, filter_size=filter_size, num_filters=num_mid_filter, stride=stride, padding=int(((filter_size - 1) // 2)), if_act=True, act=act, num_groups=num_mid_filter, use_cudnn=False, name=(name + '_depthwise'))
if use_se:
conv1 = self._se_block(input=conv1, num_out_filter=num_mid_filter, name=(name + '_se'))
conv2 = self._conv_bn_layer(input=conv1, filter_size=1, num_filters=num_out_filter, stride=1, padding=0, if_act=False, name=(name + '_linear'))
if ((num_in_filter != num_out_filter) or (stride != 1)):
return conv2
else:
return fluid.layers.elementwise_add(x=input_data, y=conv2, act=None)
def __call__(self, input):
scale = self.scale
inplanes = self.inplanes
cfg = self.cfg
conv = self._conv_bn_layer(input, filter_size=3, num_filters=self._make_divisible((inplanes * scale)), stride=2, padding=1, num_groups=1, if_act=True, act='hard_swish', name='conv1')
i = 0
inplanes = self._make_divisible((inplanes * scale))
for layer_cfg in cfg:
self.block_stride *= layer_cfg[5]
conv = self._residual_unit(input=conv, num_in_filter=inplanes, num_mid_filter=self._make_divisible((scale * layer_cfg[1])), num_out_filter=self._make_divisible((scale * layer_cfg[2])), act=layer_cfg[4], stride=layer_cfg[5], filter_size=layer_cfg[0], use_se=layer_cfg[3], name=('conv' + str((i + 2))))
inplanes = self._make_divisible((scale * layer_cfg[2]))
i += 1
self.curr_stage += 1
if (np.max(self.feature_maps) >= 5):
conv = self._conv_bn_layer(input=conv, filter_size=1, num_filters=self._make_divisible((scale * cfg[(- 1)][1])), stride=1, padding=0, num_groups=1, if_act=True, act='hard_swish', name='conv_last')
self.end_points.append(conv)
i += 1
res = OrderedDict([('mv3_{}'.format(idx), self.end_points[idx]) for (idx, feat_idx) in enumerate(self.feature_maps)])
return res |
_torch
_pytesseract
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
_property
def get_images(self):
from datasets import load_dataset
ds = load_dataset('hf-internal-testing/fixtures_docvqa', split='test')
image_1 = Image.open(ds[0]['file']).convert('RGB')
image_2 = Image.open(ds[1]['file']).convert('RGB')
return (image_1, image_2)
_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained('microsoft/layoutlmv2-base-uncased')
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained('microsoft/layoutlmv2-base-uncased')
return [slow_tokenizer, fast_tokenizer]
def test_processor_case_1(self):
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
input_feat_extract = feature_extractor(images[0], return_tensors='pt')
input_processor = processor(images[0], return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
self.assertAlmostEqual(input_feat_extract['pixel_values'].sum(), input_processor['image'].sum(), delta=0.01)
expected_decoding = '[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president introductory remarks lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]'
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
input_feat_extract = feature_extractor(images, return_tensors='pt')
input_processor = processor(images, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
self.assertAlmostEqual(input_feat_extract['pixel_values'].sum(), input_processor['image'].sum(), delta=0.01)
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc s brands national assets, adding to india s competitiveness. it is itc s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
def test_processor_case_2(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
words = ['hello', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors='pt')
expected_keys = ['input_ids', 'bbox', 'token_type_ids', 'attention_mask', 'image']
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
expected_decoding = '[CLS] hello world [SEP]'
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '[CLS] hello world [SEP] [PAD] [PAD] [PAD]'
decoding = tokenizer.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
def test_processor_case_3(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
words = ['weirdly', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'labels', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '[CLS] weirdly world [SEP]'
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_labels = [(- 100), 1, (- 100), 2, (- 100)]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'labels', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '[CLS] my name is niels [SEP]'
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
expected_labels = [(- 100), 6, 3, 10, 2, (- 100), (- 100)]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
def test_processor_case_4(self):
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president introductory remarks lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]"
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
questions = ['How old is he?', "what's the time"]
input_processor = processor(images, questions, padding='max_length', max_length=20, truncation=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc s [SEP]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
def test_processor_case_5(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
question = "What's his name?"
words = ['hello', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
questions = ['How old is he?', "what's the time"]
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'token_type_ids']
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]'
decoding = tokenizer.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[(- 5):], expected_bbox) |
_module()
class MSELoss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0, negative=False):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.negative = negative
def forward(self, pred, target, weight=None, avg_factor=None):
loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction, avg_factor=avg_factor))
if self.negative:
loss = torch.exp(((- 1.0) * loss))
return loss |
def test_invalid_runs_data(invalid_runs_raw_data: Dict[(str, Dict[(str, Any)])]) -> None:
data_diag_tools = DiagnoseData(raw_data=invalid_runs_raw_data)
check_data_results = data_diag_tools.check_data()['env_1']
assert (check_data_results == {'valid_algorithms': True, 'valid_algorithm_names': True, 'valid_runs': False, 'valid_steps': True, 'valid_metrics': True}) |
def retrieve_top(args):
config = RobertaConfig.from_pretrained(args.model_path, gradient_checkpointing=False)
model = RobertaDot.from_pretrained(args.model_path, config=config)
output_embedding_size = model.output_embedding_size
model = model.to(args.device)
query_inference(model, args, output_embedding_size)
doc_inference(model, args, output_embedding_size)
model = None
torch.cuda.empty_cache()
doc_embeddings = np.memmap(args.doc_memmap_path, dtype=np.float32, mode='r')
doc_ids = np.memmap(args.docid_memmap_path, dtype=np.int32, mode='r')
doc_embeddings = doc_embeddings.reshape((- 1), output_embedding_size)
query_embeddings = np.memmap(args.query_memmap_path, dtype=np.float32, mode='r')
query_embeddings = query_embeddings.reshape((- 1), output_embedding_size)
query_ids = np.memmap(args.queryids_memmap_path, dtype=np.int32, mode='r')
index = construct_flatindex_from_embeddings(doc_embeddings, doc_ids)
if (torch.cuda.is_available() and (not args.not_faiss_cuda)):
index = convert_index_to_gpu(index, list(range(args.n_gpu)), False)
else:
faiss.omp_set_num_threads(32)
nearest_neighbors = index_retrieve(index, query_embeddings, (args.topk + 10), batch=320)
with open(args.output_rank_file, 'w') as outputfile:
for (qid, neighbors) in zip(query_ids, nearest_neighbors):
for (idx, pid) in enumerate(neighbors):
outputfile.write(f'''{qid} {pid} {(idx + 1)}
''') |
_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
assert (len(gallery) >= num)
is_tensor = isinstance(gallery, torch.Tensor)
if (not is_tensor):
gallery = torch.tensor(gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if (not is_tensor):
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False)
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False)
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.random_choice(neg_inds, num_expected) |
class ListToTensor(object):
def __init__(self):
self.totensor = ToTensor()
def __call__(self, img_rp):
tensor = self.totensor(img_rp[0])
return [tensor, img_rp[1]] |
class MobileNetV1OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch'})])
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'image-classification'):
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
def atol_for_validation(self) -> float:
return 0.0001 |
def main():
args = parse_args()
cfg.set_args(args.gpu_ids, args.continue_train, exp_dir=args.exp_dir)
cudnn.benchmark = True
if args.cfg:
cfg.update(args.cfg)
trainer = Trainer()
trainer._make_batch_generator()
trainer._make_model()
scaler = amp.GradScaler(init_scale=args.init_scale, enabled=args.use_mixed_precision)
for epoch in range(trainer.start_epoch, cfg.end_epoch):
trainer.set_lr(epoch)
trainer.tot_timer.tic()
trainer.read_timer.tic()
for (itr, (inputs, targets, meta_info)) in enumerate(trainer.batch_generator):
trainer.read_timer.toc()
trainer.gpu_timer.tic()
trainer.optimizer.zero_grad()
with amp.autocast(args.use_mixed_precision):
loss = trainer.model(inputs, targets, meta_info, 'train')
loss = {k: loss[k].mean() for k in loss}
_loss = sum((loss[k] for k in loss))
with amp.autocast(False):
_loss = scaler.scale(_loss)
_loss.backward()
scaler.step(trainer.optimizer)
scaler.update(args.init_scale)
trainer.gpu_timer.toc()
screen = [('Epoch %d/%d itr %d/%d:' % (epoch, cfg.end_epoch, itr, trainer.itr_per_epoch)), ('lr: %g' % trainer.get_lr()), ('speed: %.2f(%.2fs r%.2f)s/itr' % (trainer.tot_timer.average_time, trainer.gpu_timer.average_time, trainer.read_timer.average_time)), ('%.2fh/epoch' % ((trainer.tot_timer.average_time / 3600.0) * trainer.itr_per_epoch))]
screen += [('%s: %.4f' % (('loss_' + k), v.detach())) for (k, v) in loss.items()]
trainer.logger.info(' '.join(screen))
trainer.tot_timer.toc()
trainer.tot_timer.tic()
trainer.read_timer.tic()
trainer.save_model({'epoch': epoch, 'network': trainer.model.state_dict(), 'optimizer': trainer.optimizer.state_dict()}, epoch) |
def squared_norm(x, axis=None, keepdims=False):
return (x ** 2).sum(axis=axis, keepdims=keepdims) |
def download(path):
url = (' + path)
print(url)
dir = os.path.dirname(path)
os.makedirs(dir, exist_ok=True)
wget.download(url, path) |
class PyClassnameExceptionRaiser():
def raise_creating_clex(self, message):
raise PyImarisWriterException('Error creating {}: {}'.format(self.__class__.__name__, message)) |
_arg_scope
def convolution3d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None):
return convolution(inputs, num_outputs, kernel_size, stride, padding, data_format, rate, activation_fn, normalizer_fn, normalizer_params, weights_initializer, weights_regularizer, biases_initializer, biases_regularizer, reuse, variables_collections, outputs_collections, trainable, scope, conv_dims=3) |
def find_best(restraints):
(df, header) = load()
filt = filter_data(df, header, restraints)
search = 'best_val_f1'
loc = header[search]
m = 0
best = None
for f in filt:
if (f[(- 1)][loc] > m):
m = f[(- 1)][loc]
best = f[(- 1)]
return gen_config(restraints, header, best) |
class CLUEWSC2020(CLSProcessor):
def __init__(self):
super().__init__(labels_origin=['false', 'true'], labels_mapped=['', ''])
def get_examples(self, data_dir, split):
path = os.path.join(data_dir, f'{split}.json')
with open(path, encoding='utf8') as f:
for line in f:
example_json = json.loads(line)
example = InputExample(meta={'that': example_json['target']['span1_text'], 'it': example_json['target']['span2_text'], 'text': example_json['text'], 'options': self.labels_mapped}, tgt_text=self.get_label(example_json['label']))
examples.append(example)
def get_templates(self):
return [':{text} :,{it} "{that}" ?{options}'] |
def augment_dictionary(dictionary: Dictionary, language_list: List[str], lang_tok_style: str, langtoks_specs: Sequence[str]=(LangTokSpec.main.value,), extra_data: Optional[Dict[(str, str)]]=None) -> None:
for spec in langtoks_specs:
for language in language_list:
dictionary.add_symbol(get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec))
if ((lang_tok_style == LangTokStyle.mbart.value) or ((extra_data is not None) and (LangTokSpec.mono_dae.value in extra_data))):
dictionary.add_symbol('<mask>') |
def parse_args():
parser = argparse.ArgumentParser(description='Arguments for building pointnet2 ffi extension')
parser.add_argument('--objs', nargs='*')
clean_arg = parser.add_mutually_exclusive_group()
clean_arg.add_argument('--build', dest='build', action='store_true')
clean_arg.add_argument('--clean', dest='clean', action='store_true')
parser.set_defaults(build=False, clean=False)
args = parser.parse_args()
assert (args.build or args.clean)
return args |
def dice_coeff(input, target):
if input.is_cuda:
s = torch.FloatTensor(1).cuda().zero_()
else:
s = torch.FloatTensor(1).zero_()
for (i, c) in enumerate(zip(input, target)):
s = (s + DiceCoeff().forward(c[0], c[1]))
return (s / (i + 1)) |
_torch
class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((DecisionTransformerModel,) if is_torch_available() else ())
all_generative_model_classes = ()
pipeline_model_mapping = ({'feature-extraction': DecisionTransformerModel} if is_torch_available() else {})
test_generate_without_input_ids = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_attention_outputs = False
test_hidden_states_output = False
test_inputs_embeds = False
test_model_common_attributes = False
test_gradient_checkpointing = False
test_torchscript = False
def setUp(self):
self.model_tester = DecisionTransformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DecisionTransformerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask']
self.assertListEqual(arg_names[:len(expected_arg_names)], expected_arg_names) |
class ONNXRTITFilters(object):
def __init__(self):
self.filters = {}
self.filters.update(ONNXRT_IT_FILTERS) |
def get_fullD(model_config):
model_d = NLayerDiscriminator(n_layers=5, norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), use_sigmoid=False)
return model_d |
def load_model_weights(model, model_name, dataset, classes, include_top, **kwargs):
(_, _, _, keras_utils) = get_submodules_from_kwargs(kwargs)
weights = _find_weights(model_name, dataset, include_top)
if weights:
weights = weights[0]
if (include_top and (weights['classes'] != classes)):
raise ValueError('If using `weights` and `include_top` as true, `classes` should be {}'.format(weights['classes']))
weights_path = keras_utils.get_file(weights['name'], weights['url'], cache_subdir='models', md5_hash=weights['md5'])
model.load_weights(weights_path)
else:
raise ValueError((('There is no weights for such configuration: ' + 'model = {}, dataset = {}, '.format(model.name, dataset)) + 'classes = {}, include_top = {}.'.format(classes, include_top))) |
class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def nopeak_mask(size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
np_mask = Variable((torch.from_numpy(np_mask) == 0))
return np_mask |
class ScenarioLoaderV2():
def load(self, file_path, name=None):
self.yaml_dict = u.load_yaml(file_path)
if (name is None):
name = u.get_file_name(file_path)
self.name = name
self._parse_subnets()
self._parse_topology()
self._parse_os()
self._parse_services()
self._parse_processes()
self._parse_sensitive_hosts()
self._parse_exploits()
self._parse_privescs()
self._parse_scan_costs()
self._parse_host_configs()
self._parse_firewall()
self._parse_hosts()
self._parse_step_limit()
self.address_space_bounds = eval(self.yaml_dict['address_space_bounds'])
return self._construct_scenario()
def _construct_scenario(self):
scenario_dict = dict()
scenario_dict[u.SUBNETS] = self.subnets
scenario_dict[u.TOPOLOGY] = self.topology
scenario_dict[u.OS] = self.os
scenario_dict[u.SERVICES] = self.services
scenario_dict[u.PROCESSES] = self.processes
scenario_dict[u.SENSITIVE_HOSTS] = self.sensitive_hosts
scenario_dict[u.EXPLOITS] = self.exploits
scenario_dict[u.PRIVESCS] = self.privescs
scenario_dict[u.OS_SCAN_COST] = self.os_scan_cost
scenario_dict[u.SERVICE_SCAN_COST] = self.service_scan_cost
scenario_dict[u.SUBNET_SCAN_COST] = self.subnet_scan_cost
scenario_dict[u.PROCESS_SCAN_COST] = self.process_scan_cost
scenario_dict[u.FIREWALL] = self.firewall
scenario_dict[u.HOSTS] = self.hosts
scenario_dict[u.STEP_LIMIT] = self.step_limit
scenario_dict['address_space_bounds'] = self.address_space_bounds
return Scenario(scenario_dict, name=self.name, generated=False)
def _check_scenario_sections_valid(self):
assert (len(self.yaml_dict) >= len(VALID_CONFIG_KEYS)), f'Too few config file keys: {len(self.yaml_dict)} < {len(VALID_CONFIG_KEYS)}'
for (k, v) in self.yaml_dict.items():
assert ((k in VALID_CONFIG_KEYS) or (k in OPTIONAL_CONFIG_KEYS)), f'{k} not a valid config file key'
if (k in VALID_CONFIG_KEYS):
expected_type = VALID_CONFIG_KEYS[k]
else:
expected_type = OPTIONAL_CONFIG_KEYS[k]
assert isinstance(v, expected_type), f"{v} invalid type for config file key '{k}': {type(v)} != {expected_type}"
def _parse_subnets(self):
subnets = self.yaml_dict[u.SUBNETS]
for (s_id, subnet) in enumerate(subnets):
if (type(subnet) is str):
(size_min, size_max) = subnet.split('-')
size = random.randint(int(size_min), int(size_max))
subnets[s_id] = size
self._validate_subnets(subnets)
subnets.insert(0, 1)
self.subnets = subnets
self.num_hosts = (sum(subnets) - 1)
def _validate_subnets(self, subnets):
assert (len(subnets) > 0), 'Subnets cannot be empty list'
for subnet_size in subnets:
assert ((type(subnet_size) is int) and (subnet_size > 0)), f'{subnet_size} invalid subnet size, must be positive int'
def _parse_topology(self):
topology = self.yaml_dict[u.TOPOLOGY]
self._validate_topology(topology)
self.topology = topology
def _validate_topology(self, topology):
assert (len(topology) == len(self.subnets)), f'Number of rows in topology adjacency matrix must equal number of subnets: {len(topology)} != {len(self.subnets)}'
for row in topology:
assert isinstance(row, list), 'topology must be 2D adjacency matrix (i.e. list of lists)'
assert (len(row) == len(self.subnets)), f'Number of columns in topology matrix must equal number of subnets: {len(topology)} != {len(self.subnets)}'
for col in row:
assert (isinstance(col, int) and ((col == 1) or (col == 0))), f'Subnet_connections adjaceny matrix must contain only 1 (connected) or 0 (not connected): {col} invalid'
def _parse_os(self):
os = self.yaml_dict[u.OS]
self._validate_os(os)
self.os = os
def _validate_os(self, os):
assert (len(os) > 0), f'{len(os)}. Invalid number of OSs, must be >= 1'
assert (len(os) == len(set(os))), f'{os}. OSs must not contain duplicates'
def _parse_services(self):
services = self.yaml_dict[u.SERVICES]
self._validate_services(services)
self.services = services
def _validate_services(self, services):
assert (len(services) > 0), f'{len(services)}. Invalid number of services, must be > 0'
assert (len(services) == len(set(services))), f'{services}. Services must not contain duplicates'
def _parse_processes(self):
processes = self.yaml_dict[u.PROCESSES]
self._validate_processes(processes)
self.processes = processes
def _validate_processes(self, processes):
assert (len(processes) >= 1), f'{len(processes)}. Invalid number of services, must be > 0'
assert (len(processes) == len(set(processes))), f'{processes}. Processes must not contain duplicates'
def _parse_sensitive_hosts(self):
sensitive_hosts = self.yaml_dict[u.SENSITIVE_HOSTS]
self.sensitive_hosts = dict()
for (s_id, subnet_size) in enumerate(self.subnets):
if (s_id == 0):
continue
sensitive_prob = sensitive_hosts[s_id]
for host_id in range(subnet_size):
if (random.random() < sensitive_prob):
self.sensitive_hosts[(s_id, host_id)] = 100.0
def _validate_sensitive_hosts(self, sensitive_hosts):
assert (len(sensitive_hosts) > 0), f'Number of sensitive hosts must be >= 1: {len(sensitive_hosts)} not >= 1'
assert (len(sensitive_hosts) <= self.num_hosts), f'Number of sensitive hosts must be <= total number of hosts: {len(sensitive_hosts)} not <= {self.num_hosts}'
for (address, value) in sensitive_hosts.items():
(subnet_id, host_id) = eval(address)
assert self._is_valid_subnet_ID(subnet_id), f'Invalid sensitive host tuple: subnet_id must be a valid subnet: {subnet_id} != non-negative int less than {(len(self.subnets) + 1)}'
assert self._is_valid_host_address(subnet_id, host_id), f'Invalid sensitive host tuple: host_id must be a valid int: {host_id} != non-negative int less than {self.subnets[subnet_id]}'
assert (isinstance(value, (float, int)) and (value > 0)), f'Invalid sensitive host tuple: invalid value: {value} != a positive int or float'
for (i, m) in enumerate(sensitive_hosts.keys()):
h1_addr = eval(m)
for (j, n) in enumerate(sensitive_hosts.keys()):
if (i == j):
continue
h2_addr = eval(n)
assert (h1_addr != h2_addr), f'Sensitive hosts list must not contain duplicate host addresses: {m} == {n}'
def _is_valid_subnet_ID(self, subnet_ID):
if ((type(subnet_ID) is not int) or (subnet_ID < 1) or (subnet_ID > len(self.subnets))):
return False
return True
def _is_valid_host_address(self, subnet_ID, host_ID):
if (not self._is_valid_subnet_ID(subnet_ID)):
return False
if ((type(host_ID) is not int) or (host_ID < 0) or (host_ID >= self.subnets[subnet_ID])):
return False
return True
def _parse_exploits(self):
exploits = self.yaml_dict[u.EXPLOITS]
self._validate_exploits(exploits)
self.exploits = exploits
def _validate_exploits(self, exploits):
for (e_name, e) in exploits.items():
self._validate_single_exploit(e_name, e)
def _validate_single_exploit(self, e_name, e):
assert isinstance(e, dict), f'{e_name}. Exploit must be a dict.'
for (k, t) in EXPLOIT_KEYS.items():
assert (k in e), f"{e_name}. Exploit missing key: '{k}'"
assert isinstance(e[k], t), f"{e_name}. Exploit '{k}' incorrect type. Expected {t}"
assert (e[u.EXPLOIT_SERVICE] in self.services), f"{e_name}. Exploit target service invalid: '{e[u.EXPLOIT_SERVICE]}'"
if (str(e[u.EXPLOIT_OS]).lower() == 'none'):
e[u.EXPLOIT_OS] = None
assert ((e[u.EXPLOIT_OS] is None) or (e[u.EXPLOIT_OS] in self.os)), f"{e_name}. Exploit target OS is invalid. '{e[u.EXPLOIT_OS]}'. Should be None or one of the OS in the os list."
assert (0 <= e[u.EXPLOIT_PROB] <= 1), f"{e_name}. Exploit probability, '{e[u.EXPLOIT_PROB]}' not a valid probability"
assert (e[u.EXPLOIT_COST] > 0), f'{e_name}. Exploit cost must be > 0.'
assert (e[u.EXPLOIT_ACCESS] in VALID_ACCESS_VALUES), f"{e_name}. Exploit access value '{e[u.EXPLOIT_ACCESS]}' invalid. Must be one of {VALID_ACCESS_VALUES}"
if isinstance(e[u.EXPLOIT_ACCESS], str):
e[u.EXPLOIT_ACCESS] = ACCESS_LEVEL_MAP[e[u.EXPLOIT_ACCESS]]
def _parse_privescs(self):
self.privescs = self.yaml_dict[u.PRIVESCS]
self._validate_privescs(self.privescs)
def _validate_privescs(self, privescs):
for (pe_name, pe) in privescs.items():
self._validate_single_privesc(pe_name, pe)
def _validate_single_privesc(self, pe_name, pe):
s_name = 'Priviledge Escalation'
assert isinstance(pe, dict), f'{pe_name}. {s_name} must be a dict.'
for (k, t) in PRIVESC_KEYS.items():
assert (k in pe), f"{pe_name}. {s_name} missing key: '{k}'"
assert (isinstance(pe[k], t) or (pe[k] is None)), f"{pe_name}. {s_name} '{k}' incorrect type. Expected {t}"
assert (pe[u.PRIVESC_PROCESS] in self.processes), f"{pe_name}. {s_name} target process invalid: '{pe[u.PRIVESC_PROCESS]}'"
if (str(pe[u.PRIVESC_OS]).lower() == 'none'):
pe[u.PRIVESC_OS] = None
assert ((pe[u.PRIVESC_OS] is None) or (pe[u.PRIVESC_OS] in self.os)), f"{pe_name}. {s_name} target OS is invalid. '{pe[u.PRIVESC_OS]}'. Should be None or one of the OS in the os list."
assert (0 <= pe[u.PRIVESC_PROB] <= 1.0), f"{pe_name}. {s_name} probability, '{pe[u.PRIVESC_PROB]}' not a valid probability"
assert (pe[u.PRIVESC_COST] > 0), f'{pe_name}. {s_name} cost must be > 0.'
assert (pe[u.PRIVESC_ACCESS] in VALID_ACCESS_VALUES), f"{pe_name}. {s_name} access value '{pe[u.PRIVESC_ACCESS]}' invalid. Must be one of {VALID_ACCESS_VALUES}"
if isinstance(pe[u.PRIVESC_ACCESS], str):
pe[u.PRIVESC_ACCESS] = ACCESS_LEVEL_MAP[pe[u.PRIVESC_ACCESS]]
def _parse_scan_costs(self):
self.os_scan_cost = self.yaml_dict[u.OS_SCAN_COST]
self.service_scan_cost = self.yaml_dict[u.SERVICE_SCAN_COST]
self.subnet_scan_cost = self.yaml_dict[u.SUBNET_SCAN_COST]
self.process_scan_cost = self.yaml_dict[u.PROCESS_SCAN_COST]
for (n, c) in [('OS', self.os_scan_cost), ('Service', self.service_scan_cost), ('Subnet', self.subnet_scan_cost), ('Process', self.process_scan_cost)]:
self._validate_scan_cost(n, c)
def _validate_scan_cost(self, scan_name, scan_cost):
assert (scan_cost >= 0), f'{scan_name} Scan Cost must be >= 0.'
def _parse_host_configs(self):
def is_for_os(x, os):
if (x is None):
return True
x_os = x.split('_')[1]
return ((x_os == os) or (x_os == 'any'))
if (self.yaml_dict[u.HOST_CONFIGS] == '_random'):
self.host_configs = {}
for (s_id, subnet_size) in enumerate(self.subnets):
if (s_id == 0):
continue
for host_id in range(subnet_size):
os = random.choice(self.os)
all_services = [x for x in self.services if is_for_os(x, os)]
sensitive_services = [x for x in self.yaml_dict['sensitive_services'] if (x in all_services)]
non_sensitive_services = [x for x in all_services if (x not in sensitive_services)]
processes = [x for x in self.processes if is_for_os(x, os)]
host_config = {'os': os, 'services': random.sample(non_sensitive_services, random.randint(1, len(non_sensitive_services))), 'processes': random.sample(processes, random.randint(1, len(processes)))}
if ((s_id, host_id) in self.sensitive_hosts):
assert (len(sensitive_services) > 0)
service_to_add = random.choice(sensitive_services)
host_config['services'].append(service_to_add)
elif (random.random() < 0.1):
service_to_add = random.choice(sensitive_services)
host_config['services'].append(service_to_add)
self.host_configs[(s_id, host_id)] = host_config
else:
self.host_configs = self.yaml_dict[u.HOST_CONFIGS]
self._validate_host_configs(self.host_configs)
def _validate_host_configs(self, host_configs):
assert (len(host_configs) == self.num_hosts), f'Number of host configurations must match the number of hosts in network: {len(host_configs)} != {self.num_hosts}'
assert self._has_all_host_addresses(host_configs.keys()), 'Host configurations must have no duplicates and have an address for each host on network.'
for (addr, cfg) in host_configs.items():
self._validate_host_config(addr, cfg)
def _has_all_host_addresses(self, addresses):
for (s_id, s_size) in enumerate(self.subnets[1:]):
for m in range(s_size):
if ((str(((s_id + 1), m)) not in addresses) and (((s_id + 1), m) not in addresses)):
return False
return True
def _validate_host_config(self, addr, cfg):
err_prefix = f'Host {addr}'
assert (isinstance(cfg, dict) and (len(cfg) >= len(HOST_CONFIG_KEYS))), f'{err_prefix} configurations must be a dict of length >= {len(HOST_CONFIG_KEYS)}. {cfg} is invalid'
for k in HOST_CONFIG_KEYS:
assert (k in cfg), f'{err_prefix} configuration missing key: {k}'
host_services = cfg[u.HOST_SERVICES]
for service in host_services:
assert (service in self.services), f'{err_prefix} Invalid service in configuration services list: {service}'
assert (len(host_services) == len(set(host_services))), f'{err_prefix} configuration services list cannot contain duplicates'
host_processes = cfg[u.HOST_PROCESSES]
for process in host_processes:
assert (process in self.processes), f'{err_prefix} invalid process in configuration processes list: {process}'
assert (len(host_processes) == len(set(host_processes))), f'{err_prefix} configuation processes list cannot contain duplicates'
host_os = cfg[u.HOST_OS]
assert (host_os in self.os), f'{err_prefix} invalid os in configuration: {host_os}'
fw_err_prefix = f'{err_prefix} {u.HOST_FIREWALL}'
if (u.HOST_FIREWALL in cfg):
firewall = cfg[u.HOST_FIREWALL]
assert isinstance(firewall, dict), f'{fw_err_prefix} must be a dictionary, with host addresses as keys and a list of denied services as values. {firewall} is invalid.'
for (addr, srv_list) in firewall.items():
addr = self._validate_host_address(addr, err_prefix)
assert self._is_valid_firewall_setting(srv_list), f'{fw_err_prefix} setting must be a list, contain only valid services and contain no duplicates: {srv_list} is not valid'
else:
cfg[u.HOST_FIREWALL] = dict()
v_err_prefix = f'{err_prefix} {u.HOST_VALUE}'
if (u.HOST_VALUE in cfg):
host_value = cfg[u.HOST_VALUE]
assert isinstance(host_value, (int, float)), f'{v_err_prefix} must be an integer or float value. {host_value} is invalid'
if (addr in self.sensitive_hosts):
sh_value = self.sensitive_hosts[addr]
assert math.isclose(host_value, sh_value), f'{v_err_prefix} for a sensitive host must either match the value specified in the {u.SENSITIVE_HOSTS} section or be excluded the host config. The value {host_value} is invalid as it does not match value {sh_value}.'
def _validate_host_address(self, addr, err_prefix=''):
try:
addr = eval(addr)
except Exception:
raise AssertionError(f'{err_prefix} address invalid. Must be (subnet, host) tuple of integers. {addr} is invalid.')
assert (isinstance(addr, tuple) and (len(addr) == 2) and all([isinstance(a, int) for a in addr])), f'{err_prefix} address invalid. Must be (subnet, host) tuple of integers. {addr} is invalid.'
assert (0 < addr[0] < len(self.subnets)), f'{err_prefix} address invalid. Subnet address must be in range 0 < subnet addr < {len(self.subnets)}. {addr[0]} is invalid.'
assert (0 <= addr[1] < self.subnets[addr[0]]), f'{err_prefix} address invalid. Host address must be in range 0 < host addr < {self.subnets[addr[0]]}. {addr[1]} is invalid.'
return True
def _parse_firewall(self):
firewall = self.yaml_dict[u.FIREWALL]
if (firewall == '_subnets'):
firewall = {}
for (src, row) in enumerate(self.topology):
for (dest, col) in enumerate(row):
if (src == dest):
continue
if (col == 1):
firewall[str((src, dest))] = ['_all']
self._validate_firewall(firewall)
self.firewall = {}
for (connect, v) in firewall.items():
if ('_all' in v):
self.firewall[eval(connect)] = self.services
else:
self.firewall[eval(connect)] = v
def _validate_firewall(self, firewall):
assert self._contains_all_required_firewalls(firewall), 'Firewall dictionary must contain two entries for each subnet connection in network (including from outside) as defined by network topology matrix'
for f in firewall.values():
assert self._is_valid_firewall_setting(f), f'Firewall setting must be a list, contain only valid services and contain no duplicates: {f} is not valid'
def _contains_all_required_firewalls(self, firewall):
for (src, row) in enumerate(self.topology):
for (dest, col) in enumerate(row):
if (src == dest):
continue
if ((col == 1) and ((str((src, dest)) not in firewall) or (str((dest, src)) not in firewall))):
return False
return True
def _is_valid_firewall_setting(self, f):
if (type(f) != list):
return False
for service in f:
if (service not in (self.services + ['_all'])):
return False
for (i, x) in enumerate(f):
for (j, y) in enumerate(f):
if ((i != j) and (x == y)):
return False
return True
def _parse_hosts(self):
hosts = dict()
for (address, h_cfg) in self.host_configs.items():
(os_cfg, srv_cfg, proc_cfg) = self._construct_host_config(h_cfg)
value = self._get_host_value(address, h_cfg)
hosts[address] = Host(address=address, os=os_cfg, services=srv_cfg, processes=proc_cfg, firewall=h_cfg[u.HOST_FIREWALL], value=value)
self.hosts = hosts
def _construct_host_config(self, host_cfg):
os_cfg = {}
for os_name in self.os:
os_cfg[os_name] = (os_name == host_cfg[u.HOST_OS])
services_cfg = {}
for service in self.services:
services_cfg[service] = (service in host_cfg[u.HOST_SERVICES])
processes_cfg = {}
for process in self.processes:
processes_cfg[process] = (process in host_cfg[u.HOST_PROCESSES])
return (os_cfg, services_cfg, processes_cfg)
def _get_host_value(self, address, host_cfg):
if (address in self.sensitive_hosts):
return float(self.sensitive_hosts[address])
return float(host_cfg.get(u.HOST_VALUE, u.DEFAULT_HOST_VALUE))
def _parse_step_limit(self):
if (u.STEP_LIMIT not in self.yaml_dict):
step_limit = None
else:
step_limit = self.yaml_dict[u.STEP_LIMIT]
assert (step_limit > 0), f'Step limit must be positive int: {step_limit} is invalid'
self.step_limit = step_limit |
class Object():
def init(self, args):
self.gamma = 0.99
self.batch = args.batch
self.epoch = args.epoch
self.alpha_v = 0.1
self.alpha_h = args.alpha_h
self.target_rho = 0.005
self.mp_iterations = args.mp_iterations
self.seed = args.seed
self.device = get_device(args.device)
self.cpus = get_cpu_count(args.cpus)
self.opt_lr = args.lr
self.opt_l2 = 0.0001
self.opt_max_norm = 3.0
self.sched_lr_factor = 0.5
self.sched_lr_min = (self.opt_lr / 30)
self.sched_lr_rate = (20 * self.epoch)
self.sched_alpha_h_factor = 1.0
self.sched_alpha_h_min = (self.alpha_h / 2)
self.sched_alpha_h_rate = (1 * self.epoch)
self.box_num_obj = args.boxes
self.box_max_steps = 100
self.q_range = ((- 15), 15)
self.max_epochs = args.max_epochs
self.log_rate = (1 * self.epoch)
self.eval_problems = 1000
self.eval_batch = 64
self.load_model = args.load_model
def __str__(self):
return str(vars(self)) |
def get_document_parse_tree_and_str(inp: List[str]) -> (List[TreeNode], List[str]):
(tree_bag, str_bag) = ([], [])
for sent in inp:
out = read_single_parse_tree(sent)
tree_bag.append(out)
s = ' '.join(out.text)
str_bag.append(s)
return (tree_bag, str_bag) |
class TestNet(spaic.Network):
def __init__(self):
super(TestNet, self).__init__()
self.input = spaic.Encoder(num=node_num, coding_method='poisson')
self.layer1 = spaic.NeuronGroup(node_num, neuron_model='lif')
self.layer2 = spaic.NeuronGroup(label_num, neuron_model='lif')
self.output = spaic.Decoder(num=label_num, dec_target=self.layer2, coding_method='spike_counts')
self.connection1 = spaic.Connection(self.input, self.layer1, link_type='full')
self.connection2 = spaic.Connection(self.layer1, self.layer2, link_type='full')
self.learner = spaic.Learner(trainable=self, algorithm='STCA')
self.learner.set_optimizer('Adam', 0.001) |
class ConvLSTMPeephole3D(Layer):
def __init__(self, input_size, output_size, kernel_i, kernel_c, stride=1, padding=(- 1), wRegularizer=None, uRegularizer=None, bRegularizer=None, cRegularizer=None, with_peephole=True, bigdl_type='float'):
super(ConvLSTMPeephole3D, self).__init__(None, bigdl_type, input_size, output_size, kernel_i, kernel_c, stride, padding, wRegularizer, uRegularizer, bRegularizer, cRegularizer, with_peephole) |
def init_weights(model, conv='kaiming', batchnorm='normal', linear='kaiming', lstm='kaiming'):
for m in model.modules():
if isinstance(m, _ConvNd):
if (conv == 'kaiming'):
initer.kaiming_normal_(m.weight)
elif (conv == 'xavier'):
initer.xavier_normal_(m.weight)
else:
raise ValueError('init type of conv error.\n')
if (m.bias is not None):
initer.constant_(m.bias, 0)
elif isinstance(m, _BatchNorm):
if (batchnorm == 'normal'):
initer.normal_(m.weight, 1.0, 0.02)
elif (batchnorm == 'constant'):
initer.constant_(m.weight, 1.0)
else:
raise ValueError('init type of batchnorm error.\n')
initer.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
if (linear == 'kaiming'):
initer.kaiming_normal_(m.weight)
elif (linear == 'xavier'):
initer.xavier_normal_(m.weight)
else:
raise ValueError('init type of linear error.\n')
if (m.bias is not None):
initer.constant_(m.bias, 0)
elif isinstance(m, nn.LSTM):
for (name, param) in m.named_parameters():
if ('weight' in name):
if (lstm == 'kaiming'):
initer.kaiming_normal_(param)
elif (lstm == 'xavier'):
initer.xavier_normal_(param)
else:
raise ValueError('init type of lstm error.\n')
elif ('bias' in name):
initer.constant_(param, 0) |
class HyperParams():
MaxStateVisitCount = 5
MaxNumConversationRounds = 100
DefaultTemperature = 1
DefaultMaxTokens = 2000 |
def ReadFileGS(x_axis, batchInterval, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity):
(w, h) = (3, len(x_axis))
y = [[] for _ in range(w)]
NUM_ACCESS_range = [2, 4, 6]
key_skewness_range = [25, 50, 75]
abort_ratio_range = [1, 10, 100]
NUM_ITEMS_range = [12288, 122880, 1228800]
random_setting = ['NUM_ACCESS', 'key_skewness', 'abort_ratio', 'NUM_ITEMS']
for punctuation_interval in x_axis:
new_NUM_ACCESS = NUM_ACCESS
new_key_skewness = key_skewness
new_abort_ratio = abort_ratio
new_NUM_ITEMS = NUM_ITEMS
setting = random.choice(random_setting)
if (setting == 'NUM_ACCESS'):
new_NUM_ACCESS = random.choice(NUM_ACCESS_range)
elif (setting == 'key_skewness'):
new_key_skewness = random.choice(key_skewness_range)
elif (setting == 'abort_ratio'):
new_abort_ratio = random.choice(abort_ratio_range)
elif (setting == 'NUM_ITEMS'):
new_NUM_ITEMS = random.choice(NUM_ITEMS_range)
if (isCyclic == 'true'):
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OPGSA', inputEvents, tthread, new_NUM_ITEMS, new_NUM_ACCESS, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
print(op_gs_path)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
elif (isCyclic == 'false'):
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('GSA', inputEvents, tthread, new_NUM_ITEMS, new_NUM_ACCESS, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
else:
print('error')
inputEvents = (tthread * batchInterval)
op_dfs_path = getPathGS('TStream', inputEvents, tthread, new_NUM_ITEMS, new_NUM_ACCESS, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
inputEvents = (tthread * batchInterval)
op_dfs_path = getPathGS('PAT', inputEvents, tthread, new_NUM_ITEMS, new_NUM_ACCESS, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(': ')[1]
y[2].append(float(throughput))
print(y)
return y |
def show_colorful_images(prediction, palettes):
im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()])
im.show() |
def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (((1 - pred_sigmoid) * target) + (pred_sigmoid * (1 - target)))
focal_weight = (((alpha * target) + ((1 - alpha) * (1 - target))) * pt.pow(gamma))
loss = (F.binary_cross_entropy_with_logits(pred, target, reduction='none') * focal_weight)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss |
def Linear(name, input_dim, output_dim, inputs, biases=True, initialization=None, weightnorm=None, gain=1.0):
with tf.name_scope(name) as scope:
def uniform(stdev, size):
if (_weights_stdev is not None):
stdev = _weights_stdev
return np.random.uniform(low=((- stdev) * np.sqrt(3)), high=(stdev * np.sqrt(3)), size=size).astype('float32')
if (initialization == 'lecun'):
weight_values = uniform(np.sqrt((1.0 / input_dim)), (input_dim, output_dim))
elif ((initialization == 'glorot') or (initialization == None)):
weight_values = uniform(np.sqrt((2.0 / (input_dim + output_dim))), (input_dim, output_dim))
elif (initialization == 'he'):
weight_values = uniform(np.sqrt((2.0 / input_dim)), (input_dim, output_dim))
elif (initialization == 'glorot_he'):
weight_values = uniform(np.sqrt((4.0 / (input_dim + output_dim))), (input_dim, output_dim))
elif ((initialization == 'orthogonal') or ((initialization == None) and (input_dim == output_dim))):
def sample(shape):
if (len(shape) < 2):
raise RuntimeError('Only shapes of length 2 or more are supported.')
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
(u, _, v) = np.linalg.svd(a, full_matrices=False)
q = (u if (u.shape == flat_shape) else v)
q = q.reshape(shape)
return q.astype('float32')
weight_values = sample((input_dim, output_dim))
elif (initialization[0] == 'uniform'):
weight_values = np.random.uniform(low=(- initialization[1]), high=initialization[1], size=(input_dim, output_dim)).astype('float32')
else:
raise Exception('Invalid initialization!')
weight_values *= gain
weight = lib.param((name + '.W'), weight_values)
if (weightnorm == None):
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
target_norms = lib.param((name + '.g'), norm_values)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
weight = (weight * (target_norms / norms))
if (inputs.get_shape().ndims == 2):
result = tf.matmul(inputs, weight)
else:
reshaped_inputs = tf.reshape(inputs, [(- 1), input_dim])
result = tf.matmul(reshaped_inputs, weight)
result = tf.reshape(result, tf.pack((tf.unpack(tf.shape(inputs))[:(- 1)] + [output_dim])))
if biases:
result = tf.nn.bias_add(result, lib.param((name + '.b'), np.zeros((output_dim,), dtype='float32')))
return result |
def data_load(filename, axisname, label):
datanumber = axisname.split('.')
if (eval(datanumber[0]) < 100):
realaxis = (('X0' + datanumber[0]) + axis[0])
else:
realaxis = (('X' + datanumber[0]) + axis[0])
fl = loadmat(filename)[realaxis]
fl = fl.reshape((- 1))
data = []
lab = []
(start, end) = (0, signal_size)
while (end <= fl.shape[0]):
x = fl[start:end]
imgs = STFT(x)
data.append(imgs)
lab.append(label)
start += signal_size
end += signal_size
return (data, lab) |
def setup_ddp():
if (('SLURM_PROCID' in os.environ) and (not ('RANK' in os.environ))):
world_size = int(os.environ['WORLD_SIZE'])
rank = int(os.environ['SLURM_PROCID'])
gpus_per_node = int(os.environ['SLURM_GPUS_ON_NODE'])
gpu = (rank - (gpus_per_node * (rank // gpus_per_node)))
torch.cuda.set_device(gpu)
dist.init_process_group(backend='nccl', world_size=world_size, rank=rank, timeout=datetime.timedelta(seconds=7200))
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
gpu = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(gpu)
dist.init_process_group('nccl', init_method='env://', world_size=world_size, rank=rank, timeout=datetime.timedelta(seconds=7200))
dist.barrier()
else:
gpu = 0
return gpu |
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task):
outputData = inputData
if (dropoutRate > 0.001):
activationRate = (1 - dropoutRate)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX)
if (task == 0):
outputData = (inputData * dropoutMask)
else:
outputData = (inputData * activationRate)
return outputData |
class MyProcessRunner(ProcessRunner):
def summarize(self, force=False):
THRE0 = 0.6
results_fname = 'outputs/results.pkl'
if (os.path.exists(results_fname) and (not force)):
print('loading results from {}'.format(results_fname))
with open(results_fname, 'rb') as f:
results = pickle.load(f)
else:
print('start reading results...')
results = {}
t0 = time.time()
eof_error_fnames = []
for (t_i, task) in enumerate(self.tasks):
cfg = task.cfg
del cfg['project_dir']
del cfg['dataset_dir']
result_fname1 = task.procs[0].result_fname
with open(result_fname1, 'rb') as f:
try:
res = pickle.load(f)
last_loss = res[(- 1)]['min_dist']
key = tuple(cfg.values())
results[key] = last_loss
except EOFError as e:
eof_error_fnames.append(result_fname1)
if ((t_i % 100) == 0):
print(f'''reading {t_i}/{len(self.tasks)} done
''', end='')
print('')
print('removing eof_error files: ..')
for fname in eof_error_fnames:
print('remove:', fname)
os.remove(fname)
assert (len(eof_error_fnames) == 0)
with open(results_fname, 'wb') as f:
pickle.dump(results, f)
t1 = time.time()
print(f'reading and saving results done in {(t1 - t0):.3f}sec')
cfg2 = copy.deepcopy(self.cfg)
del cfg2['data_seed']
del cfg2['train_seed']
del cfg2['lr']
cfgs2 = list(product_dict(**cfg2))
plot_data = {}
print(f"n {cfg2['n'][0]}")
for cfg in cfgs2:
key1 = cfg.keys()
key1v = cfg.values()
THRE = (THRE0 * cfg['noise_scale'])
success_rate = 0
min_values = []
for d_i in self.cfg['data_seed']:
is_success = False
min_value = (- 1)
for t_i in self.cfg['train_seed']:
for lr in self.cfg['lr']:
key2 = tuple((list(key1v) + [d_i, t_i, lr]))
last_value = results[key2]
if np.isnan(last_value):
continue
if (min_value == (- 1)):
min_value = last_value
elif (min_value > last_value):
min_value = last_value
else:
pass
min_values.append(min_value)
is_success = (min_value < THRE)
if is_success:
success_rate += 1
success_rate /= len(self.cfg['data_seed'])
print(cfg, f" min {np.min(min_values):.5f} avg {np.mean(min_values):.5f} max {np.max(min_values):.5f} TH {THRE} sr {success_rate:.3f} ds {len(self.cfg['data_seed'])}")
plot_data[(cfg['m'], cfg['n'])] = success_rate
data1 = {}
data1['plot_data'] = plot_data
data1['cfg'] = self.cfg
print(plot_data)
with open('plot_data.pkl', 'wb') as f:
pickle.dump(data1, f) |
class ReinitServer(ABC):
def __init__(self, args, config, model, save_interval=50):
self.config = config
self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.experiment_name = args.experiment_name
self.save_path = os.path.join('results', config.EXP_NAME, args.experiment_name)
self.save_interval = save_interval
self.mode = args.mode
assert (self.mode in ['r', 'rr'])
self.model = model.to(self.device)
self.adaptive_folder = 'adaptive{}{}'.format(('_target' if args.targeted else ''), ('_cs' if args.client_selection else ''))
init_model_path = os.path.join('results', config.EXP_NAME, self.adaptive_folder, 'init_model.pt')
final_model_path = os.path.join('results', config.EXP_NAME, self.adaptive_folder, 'model.pt')
final_model = load(final_model_path)
if (self.mode == 'r'):
self.model = load(init_model_path).to(self.device)
self.model.reinit_from_model(final_model)
elif (self.mode == 'rr'):
for (layer, final_layer) in zip(self.model.prunable_layers, final_model.prunable_layers):
layer.mask = final_layer.mask.clone().to(layer.mask.device)
else:
raise ValueError('Mode {} not supported'.format(self.mode))
with torch.no_grad():
for layer in self.model.prunable_layers:
layer.weight.mul_(layer.mask)
disp_num_params(self.model)
self.model.train()
mkdir_save(self.model, os.path.join(self.save_path, 'init_model.pt'))
self.test_loader = None
self.init_test_loader()
self.init_clients()
def init_test_loader(self):
pass
def init_clients(self):
pass
def main(self, idx, list_sd, list_num_proc, lr, start, list_loss, list_acc, list_est_time, list_model_size):
total_num_proc = sum(list_num_proc)
with torch.no_grad():
for (key, param) in self.model.state_dict().items():
avg_inc_val = None
for (num_proc, state_dict) in zip(list_num_proc, list_sd):
if (key in state_dict.keys()):
mask = self.model.get_mask_by_name(key)
if (mask is None):
inc_val = (state_dict[key] - param)
else:
inc_val = (state_dict[key] - (param * self.model.get_mask_by_name(key)))
if (avg_inc_val is None):
avg_inc_val = ((num_proc / total_num_proc) * inc_val)
else:
avg_inc_val += ((num_proc / total_num_proc) * inc_val)
if ((avg_inc_val is None) or key.endswith('num_batches_tracked')):
continue
else:
param.add_(avg_inc_val)
if ((idx % self.config.EVAL_DISP_INTERVAL) == 0):
(loss, acc) = self.model.evaluate(self.test_loader)
list_loss.append(loss)
list_acc.append(acc)
print('Round #{} (Experiment = {}).'.format(idx, self.experiment_name))
print('Loss/acc (at round {}) = {}/{}'.format(((len(list_loss) - 1) * self.config.EVAL_DISP_INTERVAL), loss, acc))
print('Estimated time = {}'.format(sum(list_est_time)))
print('Elapsed time = {}'.format((timer() - start)))
print('Current lr = {}'.format(lr))
est_time = self.config.TIME_CONSTANT
for (layer, comp_coeff) in zip(self.model.prunable_layers, self.config.COMP_COEFFICIENTS):
est_time += (layer.num_weight * (comp_coeff + self.config.COMM_COEFFICIENT))
model_size = self.model.calc_num_all_active_params(True)
list_est_time.append(est_time)
list_model_size.append(model_size)
if ((idx % self.save_interval) == 0):
mkdir_save(list_loss, os.path.join(self.save_path, 'loss.pt'))
mkdir_save(list_acc, os.path.join(self.save_path, 'accuracy.pt'))
mkdir_save(list_est_time, os.path.join(self.save_path, 'est_time.pt'))
mkdir_save(list_model_size, os.path.join(self.save_path, 'model_size.pt'))
mkdir_save(self.model, os.path.join(self.save_path, 'model.pt'))
return ([layer.mask for layer in self.model.prunable_layers], [self.model.state_dict() for _ in range(self.config.NUM_CLIENTS)]) |
def test_reconfigure_with_n_smaller_than_subtree_size():
pytest.importorskip('opt_einsum')
import opt_einsum as oe
(eq, shapes) = oe.helpers.rand_equation(10, 3)
(_, info) = oe.contract_path(eq, *shapes, shapes=True)
tree = ctg.ContractionTree.from_info(info)
tree.subtree_reconfigure(12) |
class FromTensors(MultiResolutionBatch):
def __init__(self, xs, y):
self._xs = xs
self._y = y
def targets(self):
return self._y
def inputs(self):
return []
def patches(self, samples, offsets, sample_space, previous_patch_size, patch_size, fromlevel, tolevel):
sample_space = to_tensor(sample_space)
previous_patch_size = to_tensor(previous_patch_size)
patch_size = to_tensor(patch_size)
shape_from = to_tensor(self._shape(fromlevel))
shape_to = to_tensor(self._shape(tolevel))
scale_samples = self._scale(sample_space, shape_to)
scale_offsets = self._scale(shape_from, shape_to)
space_available = (to_float32(previous_patch_size) * scale_offsets)
steps = (space_available / to_float32(sample_space))
offsets = to_int32(K.round(((((to_float32(offsets) * expand_many(scale_offsets, [0, 0])) + (to_float32(samples) * expand_many(steps, [0, 0]))) + expand_many((steps / 2), [0, 0])) - expand_many((to_float32(patch_size) / 2), [0, 0]))))
patches = extract_patches(self._xs[tolevel], offsets, patch_size)
return (patches, offsets)
def data(self, level):
return self._xs[level]
def _scale(self, shape_from, shape_to):
shape_from = to_float32(to_tensor(shape_from))
shape_to = to_float32(to_tensor(shape_to))
return (shape_to / shape_from)
def _shape(self, level):
x = self._xs[level]
int_shape = K.int_shape(x)[1:(- 1)]
if (not any(((s is None) for s in int_shape))):
return int_shape
return K.shape(x)[1:(- 1)] |
class PhrasecutEvaluator(object):
def __init__(self, split, ann_folder, output_dir='phrasecut_eval', eval_mask=False):
subset = PhraseCutSubsets(ann_folder)
loader = RefVGLoader(ann_folder, subset, split=split)
if dist.is_main_process():
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
self.output_dir = output_dir
self.evaluator = Evaluator(loader, summary_path=output_dir)
self.eval_mask = eval_mask
self.predictions = []
def update(self, predictions):
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = dist.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if dist.is_main_process():
imgid2pred: Dict[(str, List)] = defaultdict(list)
for p in self.predictions:
imgid2pred[p['original_id']].append(p)
for (img_id, pred) in imgid2pred.items():
im_pred_dict = {p['task_id']: p for p in pred}
self.evaluator.eval_single_img(img_id, im_pred_dict, pred_mask_tag=('masks' if self.eval_mask else None), pred_boxes_tag='boxes', verbose=False)
mask_box = ['box']
if self.eval_mask:
mask_box.append('mask')
results = self.evaluator.analyze_stats(mask_box, exp_name_in_summary=None, save_result_to_path=None)
results = results['all']['pred_box_acc']
return {f'{k}': v for (k, v) in results.items()}
return None |
def run_preprocess_test(data, fakefs, mocker):
fakefs.create_dir(data.data_dir)
fakefs.create_file(Path(data.data_dir).joinpath(data.meta_file))
mocker.patch('json.load', side_effect=processed_modal_metadata)
mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset')
mocked_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', side_effect=partial(mock_modal_audio_load, sample_rate=data.sample_rate, num_samples=data.num_samples))
num_hops = ((data.num_samples // data.hop_length) + 1)
mocker.patch.object(CQTModalAnalysis, '__init__', return_value=None)
mocker.patch.object(CQTModalAnalysis, '__call__', side_effect=partial(mock_cqt_call, num_samples=data.num_samples, num_frames=num_hops, num_bins=data.n_bins))
mocked_save = mocker.patch(f'{TESTED_MODULE}.torch.save')
mocked_jsondump = mocker.patch(f'{TESTED_MODULE}.json.dump', side_effect=partial(mock_json_dump_update, expected_outfile=Path(data.data_dir).joinpath(data.meta_file)))
data.preprocess_dataset()
mocked_preprocess.assert_called_once()
filenames = []
load_calls = []
with open(Path(data.data_dir).joinpath(data.meta_file), 'r') as f:
metadata = processed_modal_metadata(f)
for idx in metadata:
filename = Path(data.data_dir).joinpath(metadata[idx]['filename'])
load_calls.append(mocker.call(filename))
mocked_load.assert_has_calls(load_calls)
feature_dir = Path(data.data_dir).joinpath('features')
mocked_save.assert_has_calls([mocker.call(mocker.ANY, feature_dir.joinpath(Path(f).with_suffix('.pt'))) for f in filenames])
mocked_jsondump.assert_called_once() |
class TestTransformations(ChannelTestCase):
qubits_test_cases = (1, 2)
repetitions = 2
def _unitary_to_other(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix(dim, dim)
chan1 = Operator(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _other_to_operator(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix(dim, dim)
chan1 = rep(Operator(mat))
rho1 = chan1._evolve(rho)
chan2 = Operator(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _choi_to_other_cp(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = (dim * self.rand_rho((dim ** 2)))
chan1 = Choi(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _choi_to_other_noncp(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix((dim ** 2), (dim ** 2))
chan1 = Choi(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _superop_to_other(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix((dim ** 2), (dim ** 2))
chan1 = SuperOp(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _kraus_to_other_single(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
kraus = self.rand_kraus(dim, dim, (dim ** 2))
chan1 = Kraus(kraus)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _kraus_to_other_double(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
kraus_l = self.rand_kraus(dim, dim, (dim ** 2))
kraus_r = self.rand_kraus(dim, dim, (dim ** 2))
chan1 = Kraus((kraus_l, kraus_r))
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _stinespring_to_other_single(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix((dim ** 2), dim)
chan1 = Stinespring(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _stinespring_to_other_double(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat_l = self.rand_matrix((dim ** 2), dim)
mat_r = self.rand_matrix((dim ** 2), dim)
chan1 = Stinespring((mat_l, mat_r))
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _chi_to_other(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix((dim ** 2), (dim ** 2), real=True)
chan1 = Chi(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def _ptm_to_other(self, rep, qubits_test_cases, repetitions):
for nq in qubits_test_cases:
dim = (2 ** nq)
for _ in range(repetitions):
rho = self.rand_rho(dim)
mat = self.rand_matrix((dim ** 2), (dim ** 2), real=True)
chan1 = PTM(mat)
rho1 = chan1._evolve(rho)
chan2 = rep(chan1)
rho2 = chan2._evolve(rho)
self.assertAllClose(rho1, rho2)
def test_unitary_to_choi(self):
self._unitary_to_other(Choi, self.qubits_test_cases, self.repetitions)
def test_unitary_to_superop(self):
self._unitary_to_other(SuperOp, self.qubits_test_cases, self.repetitions)
def test_unitary_to_kraus(self):
self._unitary_to_other(Kraus, self.qubits_test_cases, self.repetitions)
def test_unitary_to_stinespring(self):
self._unitary_to_other(Stinespring, self.qubits_test_cases, self.repetitions)
def test_unitary_to_chi(self):
self._unitary_to_other(Chi, self.qubits_test_cases, self.repetitions)
def test_unitary_to_ptm(self):
self._unitary_to_other(PTM, self.qubits_test_cases, self.repetitions)
def test_choi_to_operator(self):
self._other_to_operator(Choi, self.qubits_test_cases, self.repetitions)
def test_choi_to_superop_cp(self):
self._choi_to_other_cp(SuperOp, self.qubits_test_cases, self.repetitions)
def test_choi_to_kraus_cp(self):
self._choi_to_other_cp(Kraus, self.qubits_test_cases, self.repetitions)
def test_choi_to_stinespring_cp(self):
self._choi_to_other_cp(Stinespring, self.qubits_test_cases, self.repetitions)
def test_choi_to_chi_cp(self):
self._choi_to_other_cp(Chi, self.qubits_test_cases, self.repetitions)
def test_choi_to_ptm_cp(self):
self._choi_to_other_cp(PTM, self.qubits_test_cases, self.repetitions)
def test_choi_to_superop_noncp(self):
self._choi_to_other_noncp(SuperOp, self.qubits_test_cases, self.repetitions)
def test_choi_to_kraus_noncp(self):
self._choi_to_other_noncp(Kraus, self.qubits_test_cases, self.repetitions)
def test_choi_to_stinespring_noncp(self):
self._choi_to_other_noncp(Stinespring, self.qubits_test_cases, self.repetitions)
def test_choi_to_chi_noncp(self):
self._choi_to_other_noncp(Chi, self.qubits_test_cases, self.repetitions)
def test_choi_to_ptm_noncp(self):
self._choi_to_other_noncp(PTM, self.qubits_test_cases, self.repetitions)
def test_superop_to_operator(self):
self._other_to_operator(SuperOp, self.qubits_test_cases, self.repetitions)
def test_superop_to_choi(self):
self._superop_to_other(Choi, self.qubits_test_cases, self.repetitions)
def test_superop_to_kraus(self):
self._superop_to_other(Kraus, self.qubits_test_cases, self.repetitions)
def test_superop_to_stinespring(self):
self._superop_to_other(Stinespring, self.qubits_test_cases, self.repetitions)
def test_superop_to_chi(self):
self._superop_to_other(Chi, self.qubits_test_cases, self.repetitions)
def test_superop_to_ptm(self):
self._superop_to_other(PTM, self.qubits_test_cases, self.repetitions)
def test_kraus_to_operator(self):
self._other_to_operator(Kraus, self.qubits_test_cases, self.repetitions)
def test_kraus_to_choi_single(self):
self._kraus_to_other_single(Choi, self.qubits_test_cases, self.repetitions)
def test_kraus_to_superop_single(self):
self._kraus_to_other_single(SuperOp, self.qubits_test_cases, self.repetitions)
def test_kraus_to_stinespring_single(self):
self._kraus_to_other_single(Stinespring, self.qubits_test_cases, self.repetitions)
def test_kraus_to_chi_single(self):
self._kraus_to_other_single(Chi, self.qubits_test_cases, self.repetitions)
def test_kraus_to_ptm_single(self):
self._kraus_to_other_single(PTM, self.qubits_test_cases, self.repetitions)
def test_kraus_to_choi_double(self):
self._kraus_to_other_double(Choi, self.qubits_test_cases, self.repetitions)
def test_kraus_to_superop_double(self):
self._kraus_to_other_double(SuperOp, self.qubits_test_cases, self.repetitions)
def test_kraus_to_stinespring_double(self):
self._kraus_to_other_double(Stinespring, self.qubits_test_cases, self.repetitions)
def test_kraus_to_chi_double(self):
self._kraus_to_other_double(Chi, self.qubits_test_cases, self.repetitions)
def test_kraus_to_ptm_double(self):
self._kraus_to_other_double(PTM, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_operator(self):
self._other_to_operator(Stinespring, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_choi_single(self):
self._stinespring_to_other_single(Choi, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_superop_single(self):
self._stinespring_to_other_single(SuperOp, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_kraus_single(self):
self._stinespring_to_other_single(Kraus, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_chi_single(self):
self._stinespring_to_other_single(Chi, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_ptm_single(self):
self._stinespring_to_other_single(PTM, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_choi_double(self):
self._stinespring_to_other_double(Choi, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_superop_double(self):
self._stinespring_to_other_double(SuperOp, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_kraus_double(self):
self._stinespring_to_other_double(Kraus, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_chi_double(self):
self._stinespring_to_other_double(Chi, self.qubits_test_cases, self.repetitions)
def test_stinespring_to_ptm_double(self):
self._stinespring_to_other_double(PTM, self.qubits_test_cases, self.repetitions)
def test_ptm_to_operator(self):
self._other_to_operator(PTM, self.qubits_test_cases, self.repetitions)
def test_ptm_to_choi(self):
self._ptm_to_other(Choi, self.qubits_test_cases, self.repetitions)
def test_ptm_to_superop(self):
self._ptm_to_other(SuperOp, self.qubits_test_cases, self.repetitions)
def test_ptm_to_kraus(self):
self._ptm_to_other(Kraus, self.qubits_test_cases, self.repetitions)
def test_ptm_to_stinespring(self):
self._ptm_to_other(Stinespring, self.qubits_test_cases, self.repetitions)
def test_ptm_to_chi(self):
self._ptm_to_other(Chi, self.qubits_test_cases, self.repetitions)
def test_chi_to_operator(self):
self._other_to_operator(Chi, self.qubits_test_cases, self.repetitions)
def test_chi_to_choi(self):
self._chi_to_other(Choi, self.qubits_test_cases, self.repetitions)
def test_chi_to_superop(self):
self._chi_to_other(SuperOp, self.qubits_test_cases, self.repetitions)
def test_chi_to_kraus(self):
self._chi_to_other(Kraus, self.qubits_test_cases, self.repetitions)
def test_chi_to_stinespring(self):
self._chi_to_other(Stinespring, self.qubits_test_cases, self.repetitions)
def test_chi_to_ptm(self):
self._chi_to_other(PTM, self.qubits_test_cases, self.repetitions) |
def pack_kwargs(*args, **kwargs) -> Tuple[(List[str], List[Any])]:
kwarg_keys = []
flat_args = list(args)
for (k, v) in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return (kwarg_keys, flat_args) |
def get_version():
init_py_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), SOURCE_FOLDER, '__init__.py')
init_py = open(init_py_path, 'r').readlines()
version_line = [l.strip() for l in init_py if l.startswith('__version__')][0]
version = version_line.split('=')[(- 1)].strip().strip('\'"')
return version |
def tune_odin_hyperparams():
print('Tuning hyper-parameters...')
stypes = ['ODIN']
save_dir = os.path.join('output/odin_hyperparams/', args.in_dataset, args.name, 'tmp')
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
transform = transforms.Compose([transforms.ToTensor()])
if (args.in_dataset == 'CIFAR-10'):
normalizer = transforms.Normalize(((125.3 / 255), (123.0 / 255), (113.9 / 255)), ((63.0 / 255), (62.1 / 255.0), (66.7 / 255.0)))
trainset = torchvision.datasets.CIFAR10('./datasets/cifar10', train=True, download=True, transform=transform)
trainloaderIn = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./datasets/cifar10', train=False, download=True, transform=transform)
testloaderIn = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=True)
num_classes = 10
elif (args.in_dataset == 'CIFAR-100'):
normalizer = transforms.Normalize(((125.3 / 255), (123.0 / 255), (113.9 / 255)), ((63.0 / 255), (62.1 / 255.0), (66.7 / 255.0)))
trainset = torchvision.datasets.CIFAR100('./datasets/cifar100', train=True, download=True, transform=transform)
trainloaderIn = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR100(root='./datasets/cifar100', train=False, download=True, transform=transform)
testloaderIn = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=True)
num_classes = 100
elif (args.in_dataset == 'SVHN'):
normalizer = None
trainloaderIn = torch.utils.data.DataLoader(svhn.SVHN('datasets/svhn/', split='train', transform=transforms.ToTensor(), download=False), batch_size=args.batch_size, shuffle=True)
testloaderIn = torch.utils.data.DataLoader(svhn.SVHN('datasets/svhn/', split='test', transform=transforms.ToTensor(), download=False), batch_size=args.batch_size, shuffle=True)
args.epochs = 20
num_classes = 10
valloaderOut = torch.utils.data.DataLoader(TinyImages(transform=transforms.Compose([transforms.ToTensor(), transforms.ToPILImage(), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()])), batch_size=args.batch_size, shuffle=False)
valloaderOut.dataset.offset = np.random.randint(len(valloaderOut.dataset))
if (args.model_arch == 'densenet'):
model = dn.DenseNet3(args.layers, num_classes, normalizer=normalizer)
elif (args.model_arch == 'wideresnet'):
model = wn.WideResNet(args.depth, num_classes, widen_factor=args.width, normalizer=normalizer)
else:
assert False, 'Not supported model arch: {}'.format(args.model_arch)
checkpoint = torch.load('./checkpoints/{in_dataset}/{name}/checkpoint_{epochs}.pth.tar'.format(in_dataset=args.in_dataset, name=args.name, epochs=args.epochs))
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model.cuda()
m = 1000
val_in = []
val_out = []
cnt = 0
for (data, target) in testloaderIn:
for x in data:
val_in.append(x.numpy())
cnt += 1
if (cnt == m):
break
if (cnt == m):
break
cnt = 0
for (data, target) in valloaderOut:
for x in data:
val_out.append(x.numpy())
cnt += 1
if (cnt == m):
break
if (cnt == m):
break
print('Len of val in: ', len(val_in))
print('Len of val out: ', len(val_out))
best_fpr = 1.1
best_magnitude = 0.0
for magnitude in np.arange(0, 0.0041, (0.004 / 20)):
t0 = time.time()
f1 = open(os.path.join(save_dir, 'confidence_ODIN_In.txt'), 'w')
f2 = open(os.path.join(save_dir, 'confidence_ODIN_Out.txt'), 'w')
print('Processing in-distribution images')
count = 0
for i in range((int((m / args.batch_size)) + 1)):
if ((i * args.batch_size) >= m):
break
images = torch.tensor(val_in[(i * args.batch_size):min(((i + 1) * args.batch_size), m)])
images = images.cuda()
batch_size = images.shape[0]
scores = get_odin_score(images, model, temper=1000, noiseMagnitude1=magnitude)
for k in range(batch_size):
f1.write('{}\n'.format(scores[k]))
count += batch_size
t0 = time.time()
t0 = time.time()
print('Processing out-of-distribution images')
count = 0
for i in range((int((m / args.batch_size)) + 1)):
if ((i * args.batch_size) >= m):
break
images = torch.tensor(val_out[(i * args.batch_size):min(((i + 1) * args.batch_size), m)])
images = images.cuda()
batch_size = images.shape[0]
scores = get_odin_score(images, model, temper=1000, noiseMagnitude1=magnitude)
for k in range(batch_size):
f2.write('{}\n'.format(scores[k]))
count += batch_size
t0 = time.time()
f1.close()
f2.close()
results = metric(save_dir, stypes)
print_results(results, stypes)
fpr = results['ODIN']['FPR']
if (fpr < best_fpr):
best_fpr = fpr
best_magnitude = magnitude
return best_magnitude |
def prepare_model(input_model, output_model):
batch_size = 1
model = torchvision.models.vgg16(pretrained=True)
x = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
torch.onnx.export(model, x, output_model, export_params=True, opset_version=14, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
assert os.path.exists(output_model), f"Export failed! {output_model} doesn't exist!" |
class Criterion(nn.Module):
def __init__(self, threshold: int=3, validation_max_disp: int=(- 1), loss_weight: list=None):
super(Criterion, self).__init__()
if (loss_weight is None):
loss_weight = {}
self.px_threshold = threshold
self.validation_max_disp = validation_max_disp
self.weights = loss_weight
self.l1_criterion = nn.SmoothL1Loss()
self.epe_criterion = nn.L1Loss()
_grad()
def calc_px_error(self, pred: Tensor, disp: Tensor, loss_dict: dict, invalid_mask: Tensor):
loss_dict['error_px'] = torch.sum((torch.abs((pred[(~ invalid_mask)] - disp[(~ invalid_mask)])) > self.px_threshold)).item()
loss_dict['total_px'] = torch.sum((~ invalid_mask)).item()
return
_grad()
def compute_epe(self, pred: Tensor, disp: Tensor, loss_dict: dict, invalid_mask: Tensor):
loss_dict['epe'] = self.epe_criterion(pred[(~ invalid_mask)], disp[(~ invalid_mask)])
return
_grad()
def compute_iou(self, pred: Tensor, occ_mask: Tensor, loss_dict: dict, invalid_mask: Tensor):
pred_mask = (pred > 0.5)
inter_occ = torch.logical_and(pred_mask, occ_mask).sum()
union_occ = torch.logical_or(torch.logical_and(pred_mask, (~ invalid_mask)), occ_mask).sum()
inter_noc = torch.logical_and((~ pred_mask), (~ invalid_mask)).sum()
union_noc = torch.logical_or(torch.logical_and((~ pred_mask), occ_mask), (~ invalid_mask)).sum()
loss_dict['iou'] = ((inter_occ + inter_noc).float() / (union_occ + union_noc))
return
def compute_rr_loss(self, outputs: dict, inputs: NestedTensor, invalid_mask: Tensor):
if (invalid_mask is not None):
if (inputs.sampled_cols is not None):
invalid_mask = batched_index_select(invalid_mask, 2, inputs.sampled_cols)
if (inputs.sampled_rows is not None):
invalid_mask = batched_index_select(invalid_mask, 1, inputs.sampled_rows)
gt_response = outputs['gt_response']
eps = 1e-06
rr_loss = (- torch.log((gt_response + eps)))
if (invalid_mask is not None):
rr_loss = rr_loss[(~ invalid_mask)]
try:
rr_loss_occ_left = (- torch.log((outputs['gt_response_occ_left'] + eps)))
rr_loss = torch.cat([rr_loss, rr_loss_occ_left])
except KeyError:
pass
try:
rr_loss_occ_right = (- torch.log((outputs['gt_response_occ_right'] + eps)))
rr_loss = torch.cat([rr_loss, rr_loss_occ_right])
except KeyError:
pass
return rr_loss.mean()
def compute_l1_loss(self, pred: Tensor, inputs: NestedTensor, invalid_mask: Tensor, fullres: bool=True):
disp = inputs.disp
if (not fullres):
if (inputs.sampled_cols is not None):
if (invalid_mask is not None):
invalid_mask = batched_index_select(invalid_mask, 2, inputs.sampled_cols)
disp = batched_index_select(disp, 2, inputs.sampled_cols)
if (inputs.sampled_rows is not None):
if (invalid_mask is not None):
invalid_mask = batched_index_select(invalid_mask, 1, inputs.sampled_rows)
disp = batched_index_select(disp, 1, inputs.sampled_rows)
return self.l1_criterion(pred[(~ invalid_mask)], disp[(~ invalid_mask)])
def compute_entropy_loss(self, occ_pred: Tensor, inputs: NestedTensor, invalid_mask: Tensor):
eps = 1e-06
occ_mask = inputs.occ_mask
entropy_loss_occ = (- torch.log((occ_pred[occ_mask] + eps)))
entropy_loss_noc = (- torch.log(((1.0 - occ_pred[(~ invalid_mask)]) + eps)))
entropy_loss = torch.cat([entropy_loss_occ, entropy_loss_noc])
return entropy_loss.mean()
def aggregate_loss(self, loss_dict: dict):
loss = 0.0
for key in loss_dict:
loss += (loss_dict[key] * self.weights[key])
loss_dict['aggregated'] = loss
return
def forward(self, inputs: NestedTensor, outputs: dict):
loss = {}
if (self.validation_max_disp == (- 1)):
invalid_mask = (inputs.disp <= 0.0)
else:
invalid_mask = torch.logical_or((inputs.disp <= 0.0), (inputs.disp >= self.validation_max_disp))
loss['rr'] = self.compute_rr_loss(outputs, inputs, invalid_mask)
loss['l1_raw'] = self.compute_l1_loss(outputs['disp_pred_low_res'], inputs, invalid_mask, fullres=False)
loss['l1'] = self.compute_l1_loss(outputs['disp_pred'], inputs, invalid_mask)
loss['occ_be'] = self.compute_entropy_loss(outputs['occ_pred'], inputs, invalid_mask)
self.aggregate_loss(loss)
self.calc_px_error(outputs['disp_pred'], inputs.disp, loss, invalid_mask)
self.compute_epe(outputs['disp_pred'], inputs.disp, loss, invalid_mask)
self.compute_iou(outputs['occ_pred'], inputs.occ_mask, loss, invalid_mask)
return OrderedDict(loss) |
class Node():
def __init__(self, x, y, cost, parent_index):
self.x = x
self.y = y
self.cost = cost
self.parent_index = parent_index
def __str__(self):
return ((((((str(self.x) + ',') + str(self.y)) + ',') + str(self.cost)) + ',') + str(self.parent_index)) |
class DBSNLoss(nn.Module):
def __init__(self):
super(DBSNLoss, self).__init__()
def forward(self, target, mu, sigma_mu, sigma_n, sigma_y):
loss = 0
eps = 1e-06
target = target.detach()
t1 = (((target - mu) ** 2) / sigma_y)
t2 = sigma_n.clamp(eps).log()
t3 = (sigma_mu / sigma_n.clamp(eps))
loss = ((t1 + t2) + t3)
loss = loss.mean()
if ((t1.max() > .0) or (t3.max() > .0)):
loss.data.zero_()
return loss |
def prepare_src_path(video_names):
global iPER_images_dir
template_path = 'path?={path},name?={name}'
src_paths = []
for vid_name in video_names:
vid_img_dir = os.path.join(iPER_images_dir, vid_name)
assert os.path.exists(vid_img_dir)
path = template_path.format(path=vid_img_dir, name=vid_name)
src_paths.append(path)
print(path)
return src_paths |
class DCNPooling(DCNv2Pooling):
def __init__(self, spatial_scale, pooled_size, output_dim, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, deform_fc_dim=1024):
super(DCNPooling, self).__init__(spatial_scale, pooled_size, output_dim, no_trans, group_size, part_size, sample_per_part, trans_std)
self.deform_fc_dim = deform_fc_dim
if (not no_trans):
self.func_offset = DCNv2PoolingFunction(self.spatial_scale, self.pooled_size, self.output_dim, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
self.offset_fc = nn.Sequential(nn.Linear(((self.pooled_size * self.pooled_size) * self.output_dim), self.deform_fc_dim), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_dim, self.deform_fc_dim), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_dim, ((self.pooled_size * self.pooled_size) * 2)))
self.offset_fc[4].weight.data.zero_()
self.offset_fc[4].bias.data.zero_()
self.mask_fc = nn.Sequential(nn.Linear(((self.pooled_size * self.pooled_size) * self.output_dim), self.deform_fc_dim), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_dim, ((self.pooled_size * self.pooled_size) * 1)), nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, data, rois):
if self.no_trans:
offset = data.new()
else:
n = rois.shape[0]
offset = data.new()
x = self.func_offset(data, rois, offset)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.pooled_size, self.pooled_size)
mask = self.mask_fc(x.view(n, (- 1)))
mask = mask.view(n, 1, self.pooled_size, self.pooled_size)
feat = (self.func(data, rois, offset) * mask)
return feat
return self.func(data, rois, offset) |
def parse_args():
parser = ArgumentParser(description='PyTorch implementation of Noise2Noise from Lehtinen et al. (2018)')
parser.add_argument('-d', '--data', help='dataset root path', default='../data')
parser.add_argument('--load-ckpt', help='load model checkpoint')
parser.add_argument('--show-output', help='pop up window to display outputs', default=0, type=int)
parser.add_argument('--cuda', help='use cuda', action='store_true')
parser.add_argument('-n', '--noise-type', help='noise type', choices=['gaussian', 'poisson', 'text', 'mc'], default='gaussian', type=str)
parser.add_argument('-v', '--noise-param', help='noise parameter (e.g. sigma for gaussian)', default=50, type=float)
parser.add_argument('-s', '--seed', help='fix random seed', type=int)
parser.add_argument('-c', '--crop-size', help='image crop size', default=256, type=int)
return parser.parse_args() |
def make_task_cmds():
data_dir_fixtures = f'{tests_dir}/fixtures'
data_dir_samples = f'{data_dir_fixtures}/tests_samples'
data_dir_wmt = f'{data_dir_samples}/wmt_en_ro'
data_dir_xsum = f'{data_dir_samples}/xsum'
args_main = '\n --do_train\n --max_train_samples 4\n --per_device_train_batch_size 2\n --num_train_epochs 1\n --fp16\n --report_to none\n --overwrite_output_dir\n '.split()
tasks2models = dict(trans=['bart', 'fsmt', 'marian', 'mbart', 't5'], sum=['pegasus'], clm=['gpt2', 'xlm-roberta'], mlm=['electra', 'distilbert'], qa=['roberta'], clas=['bert', 'xlnet'])
scripts_dir = f'{root_dir}/examples/pytorch'
tasks = dict(trans=f'''
{scripts_dir}/translation/run_translation.py
--train_file {data_dir_wmt}/train.json
--source_lang en
--target_lang ro
''', sum=f'''
{scripts_dir}/summarization/run_summarization.py
--train_file {data_dir_xsum}/sample.json
--max_source_length 12
--max_target_length 12
--lang en
''', clm=f'''
{scripts_dir}/language-modeling/run_clm.py
--train_file {data_dir_fixtures}/sample_text.txt
--block_size 8
''', mlm=f'''
{scripts_dir}/language-modeling/run_mlm.py
--train_file {data_dir_fixtures}/sample_text.txt
''', qa=f'''
{scripts_dir}/question-answering/run_qa.py
--train_file {data_dir_samples}/SQUAD/sample.json
''', clas=f'''
{scripts_dir}/text-classification/run_glue.py
--train_file {data_dir_samples}/MRPC/train.csv
--max_seq_length 12
--task_name MRPC
''')
launcher = get_launcher(distributed=True)
cmds = {}
for (task, args) in tasks.items():
args = args.split()
for model in tasks2models[task]:
model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"]
args_model = f'--model_name_or_path {model_name}'.split()
cmds[f'{task}_{model}'] = (((launcher + args) + args_model) + args_main)
return cmds |
def load_graph(model_path):
if os.path.exists(model_path):
if os.path.isdir(model_path):
graph = load_graph_from_ir(model_path)
else:
graph = compile(model_path)
else:
log.error("Model path doesn't exist.")
raise ValueError()
return graph |
def print_tensor_statistics(tensor, name='', formatting='standard'):
print(get_tensor_statistics_str(tensor, name, formatting)) |
class DynamicConvolution2D(nn.Module):
def __init__(self, wshare, n_feat, dropout_rate, kernel_size, use_kernel_mask=False, use_bias=False):
super(DynamicConvolution2D, self).__init__()
assert ((n_feat % wshare) == 0)
self.wshare = wshare
self.use_kernel_mask = use_kernel_mask
self.dropout_rate = dropout_rate
self.kernel_size = kernel_size
self.padding_size = int((kernel_size / 2))
self.attn_t = None
self.attn_f = None
self.linear1 = nn.Linear(n_feat, (n_feat * 2))
self.linear2 = nn.Linear((n_feat * 2), n_feat)
self.linear_weight = nn.Linear(n_feat, ((self.wshare * 1) * kernel_size))
nn.init.xavier_uniform(self.linear_weight.weight)
self.linear_weight_f = nn.Linear(n_feat, kernel_size)
nn.init.xavier_uniform(self.linear_weight_f.weight)
self.act = nn.GLU()
self.use_bias = use_bias
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(n_feat))
def forward(self, query, key, value, mask):
x = query
(B, T, C) = x.size()
H = self.wshare
k = self.kernel_size
x = self.linear1(x)
x = self.act(x)
weight_f = self.linear_weight_f(x).view((B * T), 1, k)
self.attn_f = weight_f.view(B, T, k).unsqueeze(1)
xf = F.conv1d(x.view(1, (B * T), C), weight_f, padding=self.padding_size, groups=(B * T))
xf = xf.view(B, T, C)
weight = self.linear_weight(x)
weight = F.dropout(weight, self.dropout_rate, training=self.training)
weight = weight.view(B, T, H, k).transpose(1, 2).contiguous()
weight_new = torch.zeros((((B * H) * T) * ((T + k) - 1)), dtype=weight.dtype)
weight_new = weight_new.view(B, H, T, ((T + k) - 1)).fill_(float('-inf'))
weight_new = weight_new.to(x.device)
weight_new.as_strided((B, H, T, k), (((((T + k) - 1) * T) * H), (((T + k) - 1) * T), (T + k), 1)).copy_(weight)
weight_new = weight_new.narrow((- 1), int(((k - 1) / 2)), T)
if self.use_kernel_mask:
kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)
weight_new = weight_new.masked_fill((kernel_mask == 0.0), float('-inf'))
weight_new = F.softmax(weight_new, dim=(- 1))
self.attn_t = weight_new
weight_new = weight_new.view((B * H), T, T)
x = x.transpose(1, 2).contiguous()
x = x.view((B * H), int((C / H)), T).transpose(1, 2)
x = torch.bmm(weight_new, x)
x = x.transpose(1, 2).contiguous().view(B, C, T)
if self.use_bias:
x = (x + self.bias.view(1, (- 1), 1))
x = x.transpose(1, 2)
x = torch.cat((x, xf), (- 1))
if ((mask is not None) and (not self.use_kernel_mask)):
mask = mask.transpose((- 1), (- 2))
x = x.masked_fill((mask == 0), 0.0)
x = self.linear2(x)
return x |
def grid_parameters(grid: Dict):
grid_copy = dict(grid)
for k in grid_copy:
if (not isinstance(grid_copy[k], Iterable)):
grid_copy[k] = [grid_copy[k]]
for p in itertools.product(*grid_copy.values()):
(yield dict(zip(grid.keys(), p))) |
class RODEncode_SC1(nn.Module):
def __init__(self):
super(RODEncode_SC1, self).__init__()
self.conv1a = nn.Conv3d(in_channels=1, out_channels=64, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv1b = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(9, 5, 5), stride=(2, 2, 2), padding=(4, 2, 2))
self.conv2a = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv2b = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(9, 5, 5), stride=(2, 2, 2), padding=(4, 2, 2))
self.conv3a = nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv3b = nn.Conv3d(in_channels=256, out_channels=256, kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
self.bn1a = nn.BatchNorm3d(num_features=64)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn2a = nn.BatchNorm3d(num_features=128)
self.bn2b = nn.BatchNorm3d(num_features=128)
self.bn3a = nn.BatchNorm3d(num_features=256)
self.bn3b = nn.BatchNorm3d(num_features=256)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn2a(self.conv2a(x)))
x = self.relu(self.bn2b(self.conv2b(x)))
x = self.relu(self.bn3a(self.conv3a(x)))
x = self.relu(self.bn3b(self.conv3b(x)))
return x |
def tia_stretch(src, segment=4):
(img_h, img_w) = src.shape[:2]
cut = (img_w // segment)
thresh = ((cut * 4) // 5)
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = (thresh * 0.5)
for cut_idx in np.arange(1, segment, 1):
move = (np.random.randint(thresh) - half_thresh)
src_pts.append([(cut * cut_idx), 0])
src_pts.append([(cut * cut_idx), img_h])
dst_pts.append([((cut * cut_idx) + move), 0])
dst_pts.append([((cut * cut_idx) + move), img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst |
def test_3(**init_kwargs):
zpy.init(**init_kwargs)
dataset_config = zpy.DatasetConfig('dumpster_v2')
zpy.generate('dumpster_v2.21', dataset_config, num_datapoints=3, materialize=True) |
def main():
st.title('Retrospective Reader Demo')
st.markdown('## Model name')
option = st.selectbox(label='Choose the model used in retro reader', options=('[ko_KR] klue/roberta-large', '[ko_KR] monologg/koelectra-small-v3-discriminator', '[en_XX] google/electra-large-discriminator'), index=1)
(lang_code, model_name) = option.split(' ')
retro_reader = RETRO_READER_HOST[model_name]
lang_prefix = ('KO' if (lang_code == '[ko_KR]') else 'EN')
height = (300 if (lang_code == '[ko_KR]') else 200)
retro_reader.null_score_diff_threshold = st.sidebar.slider(label='null_score_diff_threshold', min_value=(- 10.0), max_value=10.0, value=0.0, step=1.0, help='ma!')
retro_reader.rear_threshold = st.sidebar.slider(label='rear_threshold', min_value=(- 10.0), max_value=10.0, value=0.0, step=1.0, help='ma!')
retro_reader.n_best_size = st.sidebar.slider(label='n_best_size', min_value=1, max_value=50, value=20, step=1, help='ma!')
retro_reader.beta1 = st.sidebar.slider(label='beta1', min_value=(- 10.0), max_value=10.0, value=1.0, step=1.0, help='ma!')
retro_reader.beta2 = st.sidebar.slider(label='beta2', min_value=(- 10.0), max_value=10.0, value=1.0, step=1.0, help='ma!')
retro_reader.best_cof = st.sidebar.slider(label='best_cof', min_value=(- 10.0), max_value=10.0, value=1.0, step=1.0, help='ma!')
return_submodule_outputs = st.sidebar.checkbox('return_submodule_outputs', value=False)
st.markdown('## Demonstration')
with st.form(key='my_form'):
query = st.text_input(label='Type your query', value=getattr(C, f'{lang_prefix}_EXAMPLE_QUERY'), max_chars=None, help=getattr(C, f'{lang_prefix}_QUERY_HELP_TEXT'))
context = st.text_area(label='Type your context', value=getattr(C, f'{lang_prefix}_EXAMPLE_CONTEXTS'), height=height, max_chars=None, help=getattr(C, f'{lang_prefix}_CONTEXT_HELP_TEXT'))
submit_button = st.form_submit_button(label='Submit')
if submit_button:
with st.spinner('Please wait..'):
outputs = retro_reader(query=query, context=context, return_submodule_outputs=return_submodule_outputs)
(answer, score) = (outputs[0]['id-01'], outputs[1])
if (not answer):
answer = 'No answer'
st.markdown('## Results')
st.write(answer)
st.markdown('### Rear Verification Score')
st.json(score)
if return_submodule_outputs:
(score_ext, nbest_preds, score_diff) = outputs[2:]
st.markdown('### Sketch Reader Score (score_ext)')
st.json(score_ext)
st.markdown('### Intensive Reader Score (score_diff)')
st.json(score_diff)
st.markdown('### N Best Predictions (from intensive reader)')
st.json(nbest_preds) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.