code stringlengths 101 5.91M |
|---|
def as_dict_handler(obj: Any) -> (dict[(str, Any)] | None):
try:
return obj.as_dict()
except AttributeError:
return None |
def test_diskdf_sample():
from galpy.df import dehnendf, shudf
(ro, vo) = (7.0, 230.0)
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
dfs = shudf(ro=ro, vo=vo)
dfsnou = shudf()
numpy.random.seed(1)
du = (df.sampledSurfacemassLOS((11.0 * units.deg), n=1, maxd=(10.0 * units.kpc)).to(units.kpc).value / ro)
numpy.random.seed(1)
dnou = dfnou.sampledSurfacemassLOS(((11.0 * numpy.pi) / 180.0), n=1, maxd=(10.0 / ro))
assert (numpy.fabs((du - dnou)) < (10.0 ** (- 8.0))), 'diskdf sampling method sampledSurfacemassLOS does not return expected Quantity'
numpy.random.seed(1)
du = (df.sampleVRVT(1.1, n=1).to((units.km / units.s)).value / vo)
numpy.random.seed(1)
dnou = dfnou.sampleVRVT(1.1, n=1)
assert numpy.all((numpy.fabs((du - dnou)) < (10.0 ** (- 8.0)))), 'diskdf sampling method sampleVRVT does not return expected Quantity'
numpy.random.seed(1)
du = df.sampleLOS((11.0 * units.deg), n=1)
numpy.random.seed(1)
dnou = dfnou.sampleLOS(11.0, n=1, deg=True)
assert numpy.all((numpy.fabs((numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv))) < (10.0 ** (- 8.0)))), 'diskdf sampling method sampleLOS does not work as expected with Quantity input'
numpy.random.seed(1)
du = df.sample(rrange=[(4.0 * units.kpc), (12.0 * units.kpc)], n=1)
numpy.random.seed(1)
dnou = dfnou.sample(rrange=[(4.0 / ro), (12.0 / ro)], n=1)
assert numpy.all((numpy.fabs((numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv))) < (10.0 ** (- 8.0)))), 'diskdf sampling method sample does not work as expected with Quantity input'
numpy.random.seed(1)
du = dfs.sample(rrange=[(4.0 * units.kpc), (12.0 * units.kpc)], n=1)
numpy.random.seed(1)
dnou = dfsnou.sample(rrange=[(4.0 / ro), (12.0 / ro)], n=1)
assert numpy.all((numpy.fabs((numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv))) < (10.0 ** (- 8.0)))), 'diskdf sampling method sample does not work as expected with Quantity input'
return None |
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output |
def face_area(V, F):
if (type(V).__module__ == np.__name__):
(V, F) = (p2e(V), p2e(F))
A = Xd()
igl.doublearea(V, F, A)
A = e2p(A).flatten()
return A |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def main():
P = parse_args()
P.rank = 0
if torch.cuda.is_available():
torch.cuda.set_device(P.rank)
device = torch.device((f'cuda' if torch.cuda.is_available() else 'cpu'))
P.world_size = torch.cuda.device_count()
P.distributed = (P.world_size > 1)
assert (not P.distributed)
set_random_seed(P.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
kwargs = {'batch_size': P.test_batch_size, 'shuffle': True, 'pin_memory': True, 'num_workers': 4}
test_set = get_meta_dataset(P, dataset=P.dataset, only_test=True)
if P.regression:
test_loader = test_set
else:
test_loader = BatchMetaDataLoader(test_set, **kwargs)
' Initialize model '
model = get_model(P, P.model).to(device)
load_model(P, model)
from evals import setup as test_setup
test_func = test_setup(P.mode, P)
if (P.dataset == 'pose'):
criterion = nn.MSELoss()
elif (P.dataset == 'shapenet'):
from data.shapenet1d import AzimuthLoss
criterion = AzimuthLoss()
else:
criterion = nn.CrossEntropyLoss()
' test '
test_func(P, model, test_loader, criterion, 0.0, logger=None) |
def create_cmfd_similarity_branch(img_shape=(256, 256, 3), nb_pools=100, name='simiDet'):
img_input = Input(shape=img_shape, name=(name + '_in'))
bname = (name + '_cnn')
x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=(bname + '_b1c1'))(img_input)
x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name=(bname + '_b1c2'))(x1)
x1 = MaxPooling2D((2, 2), strides=(2, 2), name=(bname + '_b1p'))(x1)
x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=(bname + '_b2c1'))(x1)
x2 = Conv2D(128, (3, 3), activation='relu', padding='same', name=(bname + '_b2c2'))(x2)
x2 = MaxPooling2D((2, 2), strides=(2, 2), name=(bname + '_b2p'))(x2)
x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=(bname + '_b3c1'))(x2)
x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=(bname + '_b3c2'))(x3)
x3 = Conv2D(256, (3, 3), activation='relu', padding='same', name=(bname + '_b3c3'))(x3)
x3 = MaxPooling2D((2, 2), strides=(2, 2), name=(bname + '_b3p'))(x3)
x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=(bname + '_b4c1'))(x3)
x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=(bname + '_b4c2'))(x4)
x4 = Conv2D(512, (3, 3), activation='relu', padding='same', name=(bname + '_b4c3'))(x4)
x4 = MaxPooling2D((2, 2), strides=(2, 2), name=(bname + '_b4p'))(x4)
xx = Activation(std_norm_along_chs, name=(bname + '_sn'))(x4)
bname = (name + '_corr')
xcorr = SelfCorrelationPercPooling(name=(bname + '_corr'))(xx)
xn = BatchNormalization(name=(bname + '_bn'))(xcorr)
patch_list = [(1, 1), (3, 3), (5, 5)]
bname = (name + '_dconv')
f16 = BnInception(xn, 8, patch_list, name=(bname + '_mpf'))
f32 = BilinearUpSampling2D(name=(bname + '_bx2'))(f16)
dx32 = BnInception(f32, 6, patch_list, name=(bname + '_dx2'))
f64a = BilinearUpSampling2D(name=(bname + '_bx4a'))(f32)
f64b = BilinearUpSampling2D(name=(bname + '_bx4b'))(dx32)
f64 = Concatenate(axis=(- 1), name=(name + '_dx4_m'))([f64a, f64b])
dx64 = BnInception(f64, 4, patch_list, name=(bname + '_dx4'))
f128a = BilinearUpSampling2D(name=(bname + '_bx8a'))(f64a)
f128b = BilinearUpSampling2D(name=(bname + '_bx8b'))(dx64)
f128 = Concatenate(axis=(- 1), name=(name + '_dx8_m'))([f128a, f128b])
dx128 = BnInception(f128, 2, patch_list, name=(bname + '_dx8'))
f256a = BilinearUpSampling2D(name=(bname + '_bx16a'))(f128a)
f256b = BilinearUpSampling2D(name=(bname + '_bx16b'))(dx128)
f256 = Concatenate(axis=(- 1), name=(name + '_dx16_m'))([f256a, f256b])
dx256 = BnInception(f256, 2, patch_list, name=(bname + '_dx16'))
fm256 = Concatenate(axis=(- 1), name=(name + '_mfeat'))([f256a, dx256])
masks = BnInception(fm256, 2, [(5, 5), (7, 7), (11, 11)], name=(bname + '_dxF'))
pred_mask = Conv2D(1, (3, 3), activation='sigmoid', name=(name + '_pred_mask'), padding='same')(masks)
model = Model(inputs=img_input, outputs=pred_mask, name=name)
return model |
class Logger():
def __init__(self, *args, **kwargs):
from expviz.logger import Logger as ExpvizLogger
self.expviz = ExpvizLogger(*args, **kwargs)
def write(self, scalar_dict, epoch):
for (key, value) in scalar_dict.items():
if isinstance(value, torch.Tensor):
value = value.item()
self.expviz.add_scalar(key, value, epoch) |
def linkcode_resolve(domain, info):
if (domain != 'py'):
return None
if (not info['module']):
return None
filename = info['module'].replace('.', '/')
return '{}/{}.py'.format(repo_url, filename) |
_model
def res2net50_14w_8s(pretrained=False, **kwargs):
model_args = dict(block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs)
return _create_res2net('res2net50_14w_8s', pretrained, **model_args) |
class LRFinder(LearnerCallback):
def __init__(self, learn: Learner, start_lr: float=1e-07, end_lr: float=10, num_it: int=100, stop_div: bool=True):
super().__init__(learn)
(self.data, self.stop_div) = (learn.data, stop_div)
self.sched = Scheduler((start_lr, end_lr), num_it, annealing_exp)
def on_train_begin(self, pbar, **kwargs: Any) -> None:
setattr(pbar, 'clean_on_interrupt', True)
self.learn.save('tmp')
self.opt = self.learn.opt
self.opt.lr = self.sched.start
(self.stop, self.best_loss) = (False, 0.0)
return {'skip_validate': True}
def on_batch_end(self, iteration: int, smooth_loss: TensorOrNumber, **kwargs: Any) -> None:
if ((iteration == 0) or (smooth_loss < self.best_loss)):
self.best_loss = smooth_loss
self.opt.lr = self.sched.step()
if (self.sched.is_done or (self.stop_div and ((smooth_loss > (4 * self.best_loss)) or torch.isnan(smooth_loss)))):
return {'stop_epoch': True, 'stop_training': True}
def on_train_end(self, **kwargs: Any) -> None:
self.learn.load('tmp', purge=False)
if hasattr(self.learn.model, 'reset'):
self.learn.model.reset()
for cb in self.callbacks:
if hasattr(cb, 'reset'):
cb.reset()
print('LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.') |
class positive_int_or_none(_ParseType):
_none
def __call__(self, string: str) -> (int | None):
return positive_int()(string) |
def import_cifar(dataset=10):
if (dataset == 10):
((x_train, y_train), (x_test, y_test)) = cifar10.load_data()
elif (dataset == 100):
((x_train, y_train), (x_test, y_test)) = cifar100.load_data(label_mode='fine')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
(m, st) = (x_train.mean(), x_train.std())
x_train = (x_train - m)
x_test = (x_test - m)
x_train = (x_train / st)
x_test = (x_test / st)
y_train = one_hot_encoding(y_train)
y_test = one_hot_encoding(y_test)
return (x_train, y_train, x_test, y_test) |
class GUITopoSim(Sim):
def __init__(self, argv, parser=None):
super(GUITopoSim, self).__init__(argv)
self.topo_file = ((self.ROOT + '/') + self.args.topology_file)
self.ns_file = ((self.ROOT + '/') + self.args.net_settings_file)
self.net_desc = self.get_net_desc(self.topo_file, self.ns_file)
def init_parser(self, parser):
super(GUITopoSim, self).init_parser(parser)
parser.add_argument('-t', '--topology_file', help='topology file generated by imalse GUI tool')
parser.add_argument('-n', '--net_settings_file', help='net_settings file generated by imalse GUI tool')
def get_net_desc(self, topo_file, ns_file):
new_net_settings_file = fix_fs_addr_prefix_bug(ns_file)
net_settings = load_para(f_name=new_net_settings_file, encap=None)
net_settings['topo'] = get_inet_adj_mat(topo_file)
net_settings['node_type'] = 'NNode'
net_settings['node_para'] = {}
return net_settings |
class Data2VecTextForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None):
if vanilla:
print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.')
else:
print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}')
tstart = time.time()
n_saved = (len(glob.glob(os.path.join(logdir, '*.png'))) - 1)
if (model.cond_stage_model is None):
all_images = []
print(f'Running unconditional sampling for {n_samples} samples')
for _ in trange((n_samples // batch_size), desc='Sampling Batches (unconditional)'):
logs = make_convolutional_sample(model, batch_size=batch_size, vanilla=vanilla, custom_steps=custom_steps, eta=eta)
n_saved = save_logs(logs, logdir, n_saved=n_saved, key='sample')
all_images.extend([custom_to_np(logs['sample'])])
if (n_saved >= n_samples):
print(f'Finish after generating {n_saved} samples')
break
all_img = np.concatenate(all_images, axis=0)
all_img = all_img[:n_samples]
shape_str = 'x'.join([str(x) for x in all_img.shape])
nppath = os.path.join(nplog, f'{shape_str}-samples.npz')
np.savez(nppath, all_img)
else:
raise NotImplementedError('Currently only sampling for unconditional models supported.')
print(f'sampling of {n_saved} images finished in {((time.time() - tstart) / 60.0):.2f} minutes.') |
def deconv3d_bn_relu(batchNorm, in_planes, out_planes, kernel_size=4, stride=2, padding=1, output_padding=0, bias=True):
if batchNorm:
return nn.Sequential(nn.ConvTranspose3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias), nn.BatchNorm3d(out_planes), nn.ReLU(inplace=True))
else:
return nn.Sequential(nn.ConvTranspose3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias), nn.ReLU(inplace=True)) |
def getPosFromFileName(fileName):
return (int(fileName.split('/')[(- 1)].split('_')[(- 3)]) > 200) |
class CalibCollector(CollectorBase):
def __init__(self, include_tensors_kl, include_tensors_minmax, num_bins=8001):
self.min_max_dict = {}
self.hist_dict = {}
self.num_bins = num_bins
self.include_tensors_minmax = include_tensors_minmax
self.include_tensors_kl = include_tensors_kl
def collect_gluon(self, name, _, arr):
if (name in self.include_tensors_kl):
alg = 'kl'
elif (name in self.include_tensors_minmax):
alg = 'minmax'
else:
return
min_range = arr.min().asscalar()
max_range = arr.max().asscalar()
th = max(abs(min_range), abs(max_range))
if (name in self.min_max_dict):
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range)
if (alg == 'kl'):
arr = arr.asnumpy()
if (name in self.hist_dict):
self.hist_dict[name] = self._combine_histogram(self.hist_dict[name], arr, min_range, max_range, th)
else:
(hist, hist_edges) = np.histogram(arr, bins=self.num_bins, range=((- th), th))
self.hist_dict[name] = (hist, hist_edges, min_range, max_range, th)
def _combine_histogram(old_hist, arr, new_min, new_max, new_th):
if check_mx_version('2.0.0'):
return mx.contrib.quantization._LayerHistogramCollector.combine_histogram(old_hist, arr, new_min, new_max, new_th)
else:
return mx.contrib.quantization.combine_histogram(old_hist, arr, new_min, new_max, new_th)
def calc_kl_th_dict(self, quantized_dtype):
if (len(self.hist_dict) > 0):
if check_mx_version('2.0.0'):
return mx.contrib.quantization._LayerHistogramCollector.get_optimal_thresholds(self.hist_dict, quantized_dtype)
else:
return mx.contrib.quantization._get_optimal_thresholds(self.hist_dict, quantized_dtype)
return {} |
class MLDG(ERM):
def __init__(self, args):
super(MLDG, self).__init__(args)
self.args = args
def update(self, minibatches, opt, sch):
num_mb = len(minibatches)
objective = 0
opt.zero_grad()
for p in self.network.parameters():
if (p.grad is None):
p.grad = torch.zeros_like(p)
for ((xi, yi), (xj, yj)) in random_pairs_of_minibatches_by_domainperm(minibatches):
(xi, yi, xj, yj) = (xi.cuda().float(), yi.cuda().long(), xj.cuda().float(), yj.cuda().long())
inner_net = copy.deepcopy(self.network)
inner_opt = get_optimizer(inner_net, self.args, True)
inner_sch = get_scheduler(inner_opt, self.args)
inner_obj = F.cross_entropy(inner_net(xi), yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
if inner_sch:
inner_sch.step()
for (p_tgt, p_src) in zip(self.network.parameters(), inner_net.parameters()):
if (p_src.grad is not None):
p_tgt.grad.data.add_((p_src.grad.data / num_mb))
objective += inner_obj.item()
loss_inner_j = F.cross_entropy(inner_net(xj), yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_net.parameters(), allow_unused=True)
objective += (self.args.mldg_beta * loss_inner_j).item()
for (p, g_j) in zip(self.network.parameters(), grad_inner_j):
if (g_j is not None):
p.grad.data.add_(((self.args.mldg_beta * g_j.data) / num_mb))
objective /= len(minibatches)
opt.step()
if sch:
sch.step()
return {'total': objective} |
def get_analytics_zoo_classpath():
if os.getenv('BIGDL_CLASSPATH'):
for path in os.getenv('BIGDL_CLASSPATH').split(':'):
if ((not os.path.exists(path)) and (not os.path.exists(path.split('*')[0]))):
invalidInputError(False, 'Path {} specified BIGDL_CLASSPATH does not exist.'.format(path))
return os.environ['BIGDL_CLASSPATH']
jar_dir = os.path.abspath((__file__ + '/../../../'))
jar_paths = glob.glob(os.path.join(jar_dir, 'share/orca/lib/*.jar'))
if jar_paths:
invalidInputError((len(jar_paths) == 1), ('Expecting one jar: %s' % len(jar_paths)))
return jar_paths[0]
return '' |
def is_prime(n):
if ((n % 2) == 0):
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, (sqrt_n + 1), 2):
if ((n % i) == 0):
return False
return True |
def run(cfg):
seeds = ([cfg.seed] if (cfg.seed is not None) else range(cfg.runs))
if (cfg.gnn.model.name == 'RevGAT'):
TRAINER = DGLGNNTrainer
else:
TRAINER = GNNTrainer
all_acc = []
start = time.time()
for seed in seeds:
cfg.seed = seed
trainer = TRAINER(cfg, cfg.gnn.train.feature_type)
trainer.train()
(_, acc) = trainer.eval_and_save()
all_acc.append(acc)
end = time.time()
if (len(all_acc) > 1):
df = pd.DataFrame(all_acc)
print(f"[{cfg.gnn.model.name} + {cfg.gnn.train.feature_type}] ValACC: {df['val_acc'].mean():.4f} {df['val_acc'].std():.4f}, TestAcc: {df['test_acc'].mean():.4f} {df['test_acc'].std():.4f}")
print(f'Running time: {((end - start) / len(seeds)):.2f}s') |
def get_checkpoint_fn():
if deepspeed.checkpointing.is_configured():
checkpoint = deepspeed.checkpointing.checkpoint
else:
checkpoint = torch.utils.checkpoint.checkpoint
return checkpoint |
def frozen_bn(model):
first_bn = True
for (name, m) in model.named_modules():
if isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
if first_bn:
first_bn = False
print(('Skip frozen first bn layer: ' + name))
continue
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False |
def cli_register(name: str, description: str=''):
def _warpper(command):
items = name.split('.')
com = neuralchat_commands
for item in items:
com = com[item]
com['_command'] = command
if description:
com['description'] = description
return command
return _warpper |
def copy_bn_params(module, bn_module, remove_bn=True, verbose=False):
with torch.no_grad():
if hasattr(bn_module, 'weight'):
module.register_parameter('gamma', nn.Parameter(bn_module.weight.data.clone()))
if hasattr(bn_module, 'bias'):
module.register_parameter('beta', nn.Parameter(bn_module.bias.data.clone())) |
.parametrize('space', [Discrete(3), Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]))]), Tuple((Discrete(5), Discrete(2), Discrete(2))), MultiDiscrete([2, 2, 100]), Dict({'position': Discrete(5), 'velocity': Box(low=np.array([0, 0]), high=np.array([1, 5]))})])
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
(sample_1_prime, sample_2_prime) = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert (s1 == s1p), 'Expected {} to equal {}'.format(s1, s1p)
assert (s2 == s2p), 'Expected {} to equal {}'.format(s2, s2p) |
def trainfxn(trainer, model, dataloader, criterion, optimizer, lr_scheduler, epoch, args, num_classes, logger, **kwargs):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4f')
top1 = AverageMeter('', ':6.2f')
if (num_classes >= 5):
top5 = AverageMeter('', ':6.2f')
else:
top5 = AverageMeter('', ':6.2f')
if (trainer in ['pgd', 'fgsm', 'trades']):
top1_adv = AverageMeter('', ':6.2f')
if (num_classes >= 5):
top5_adv = AverageMeter('', ':6.2f')
else:
top5_adv = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(dataloader), [batch_time, data_time, losses, top1, top5, top1_adv, top5_adv], prefix='Epoch: [{}]'.format(epoch))
elif (trainer == 'baseline'):
progress = ProgressMeter(len(dataloader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
else:
raise ValueError(f'trainer {trainer} not supported')
model.train()
end = time.time()
for (i, (images, targets)) in enumerate(dataloader):
data_time.update((time.time() - end))
if ((i == 0) and (args.rank == 0)):
logger.info((((f'Batch images shape: {images.shape}, targets shape: {targets.shape}, ' + f'World-size: {args.world_size}, Effective batch size: {(args.world_size * len(images))}, ') + f"Learning rate (epoch {epoch}/{args.epochs}): {optimizer.param_groups[0]['lr']:.5f}, ") + f'pixel range: {[images.min().item(), images.max().item()]}'))
(images, targets) = (images.cuda(args.gpu, non_blocking=True), targets.cuda(args.gpu, non_blocking=True))
logits = model(images)
(acc1, acc5) = accuracy(logits, targets, topk=(1, (5 if (num_classes >= 5) else 2)))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
if (trainer in ['fgsm', 'pgd', 'trades']):
(logits_adv, loss) = get_adversarial_loss(trainer, model, images, targets, logits, criterion, optimizer, args)
(acc1_adv, acc5_adv) = accuracy(logits_adv, targets, topk=(1, (5 if (num_classes >= 5) else 2)))
top1_adv.update(acc1_adv[0], images.size(0))
top5_adv.update(acc5_adv[0], images.size(0))
elif (trainer in ['baseline']):
loss = criterion(logits, targets)
losses.update(loss.item(), images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
batch_time.update((time.time() - end))
end = time.time()
if (((i % args.print_freq) == 0) and (args.rank == 0)):
progress.display(i)
if (trainer in ['fgsm', 'pgd', 'trades']):
result = {'top1': top1.avg, f'top{(5 if (num_classes >= 5) else 2)}': top5.avg, 'top1_adv': top1_adv.avg, f'top{(5 if (num_classes >= 5) else 2)}_adv': top5_adv.avg}
elif (trainer in ['baseline']):
result = {'top1': top1.avg, f'top{(5 if (num_classes >= 5) else 2)}': top5.avg}
return result |
class JTMPN(nn.Module):
def __init__(self, hidden_size, depth):
super(JTMPN, self).__init__()
self.hidden_size = hidden_size
self.depth = depth
self.W_i = nn.Linear((ATOM_FDIM + BOND_FDIM), hidden_size, bias=False)
self.W_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_o = nn.Linear((ATOM_FDIM + hidden_size), hidden_size)
def forward(self, cand_batch, tree_mess):
(fatoms, fbonds) = ([], [])
(in_bonds, all_bonds) = ([], [])
(mess_dict, all_mess) = ({}, [create_var(torch.zeros(self.hidden_size))])
total_atoms = 0
scope = []
for (e, vec) in tree_mess.iteritems():
mess_dict[e] = len(all_mess)
all_mess.append(vec)
for (mol, all_nodes, ctr_node) in cand_batch:
n_atoms = mol.GetNumAtoms()
ctr_bid = ctr_node.idx
for atom in mol.GetAtoms():
fatoms.append(atom_features(atom))
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = (a1.GetIdx() + total_atoms)
y = (a2.GetIdx() + total_atoms)
(x_nid, y_nid) = (a1.GetAtomMapNum(), a2.GetAtomMapNum())
x_bid = (all_nodes[(x_nid - 1)].idx if (x_nid > 0) else (- 1))
y_bid = (all_nodes[(y_nid - 1)].idx if (y_nid > 0) else (- 1))
bfeature = bond_features(bond)
b = (len(all_mess) + len(all_bonds))
all_bonds.append((x, y))
fbonds.append(torch.cat([fatoms[x], bfeature], 0))
in_bonds[y].append(b)
b = (len(all_mess) + len(all_bonds))
all_bonds.append((y, x))
fbonds.append(torch.cat([fatoms[y], bfeature], 0))
in_bonds[x].append(b)
if ((x_bid >= 0) and (y_bid >= 0) and (x_bid != y_bid)):
if ((x_bid, y_bid) in mess_dict):
mess_idx = mess_dict[(x_bid, y_bid)]
in_bonds[y].append(mess_idx)
if ((y_bid, x_bid) in mess_dict):
mess_idx = mess_dict[(y_bid, x_bid)]
in_bonds[x].append(mess_idx)
scope.append((total_atoms, n_atoms))
total_atoms += n_atoms
total_bonds = len(all_bonds)
total_mess = len(all_mess)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(total_atoms, MAX_NB).long()
bgraph = torch.zeros(total_bonds, MAX_NB).long()
tree_message = torch.stack(all_mess, dim=0)
for a in xrange(total_atoms):
for (i, b) in enumerate(in_bonds[a]):
agraph[(a, i)] = b
for b1 in xrange(total_bonds):
(x, y) = all_bonds[b1]
for (i, b2) in enumerate(in_bonds[x]):
if ((b2 < total_mess) or (all_bonds[(b2 - total_mess)][0] != y)):
bgraph[(b1, i)] = b2
fatoms = create_var(fatoms)
fbonds = create_var(fbonds)
agraph = create_var(agraph)
bgraph = create_var(bgraph)
binput = self.W_i(fbonds)
graph_message = nn.ReLU()(binput)
for i in xrange((self.depth - 1)):
message = torch.cat([tree_message, graph_message], dim=0)
nei_message = index_select_ND(message, 0, bgraph)
nei_message = nei_message.sum(dim=1)
nei_message = self.W_h(nei_message)
graph_message = nn.ReLU()((binput + nei_message))
message = torch.cat([tree_message, graph_message], dim=0)
nei_message = index_select_ND(message, 0, agraph)
nei_message = nei_message.sum(dim=1)
ainput = torch.cat([fatoms, nei_message], dim=1)
atom_hiddens = nn.ReLU()(self.W_o(ainput))
mol_vecs = []
for (st, le) in scope:
mol_vec = (atom_hiddens.narrow(0, st, le).sum(dim=0) / le)
mol_vecs.append(mol_vec)
mol_vecs = torch.stack(mol_vecs, dim=0)
return mol_vecs |
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlock, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
self.body = PreResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1(in_channels=in_channels, out_channels=out_channels, stride=stride)
def forward(self, x):
identity = x
(x, x_pre_activ) = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = (x + identity)
return x |
def PrintModelFree(mfIndi, mbIndi):
for i in xrange(len(mfIndi)):
if (not np.isnan(mfIndi[i])):
print(('[%d]\t%f' % (i, mfIndi[i])))
print('\n') |
def init(novel_type, description, request: gr.Request):
if (novel_type == ''):
novel_type = ('Science Fiction' if ('en' == lang_opt) else '')
global _CACHE
cookie = request.headers['cookie']
cookie = cookie.split('; _gat_gtag')[0]
init_paragraphs = get_init(text=init_prompt(novel_type, description), model=llm_model, tokenizer=llm_tokenizer)
start_input_to_human = {'output_paragraph': init_paragraphs['Paragraph 3'], 'input_paragraph': '\n\n'.join([init_paragraphs['Paragraph 1'], init_paragraphs['Paragraph 2'], init_paragraphs['Paragraph 3']]), 'output_memory': init_paragraphs['Summary'], 'output_instruction': [init_paragraphs['Instruction 1'], init_paragraphs['Instruction 2'], init_paragraphs['Instruction 3']]}
_CACHE[cookie] = {'start_input_to_human': start_input_to_human, 'init_paragraphs': init_paragraphs}
written_paras = (f'''Title: {init_paragraphs['name']}
Outline: {init_paragraphs['Outline']}
Paragraphs:
{start_input_to_human['input_paragraph']}''' if ('en' == lang_opt) else f''': {init_paragraphs['name']}
: {init_paragraphs['Outline']}
:
{start_input_to_human['input_paragraph']}''')
long_memory = parse_instructions([init_paragraphs['Paragraph 1'], init_paragraphs['Paragraph 2'], init_paragraphs['Paragraph 3']])
return (start_input_to_human['output_memory'], long_memory, written_paras, init_paragraphs['Instruction 1'], init_paragraphs['Instruction 2'], init_paragraphs['Instruction 3']) |
_module
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
def _filter_imgs(self, min_size=32):
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
for (i, img_info) in enumerate(self.img_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if (self.filter_empty_gt and ((self.img_ids[i] not in ids_with_ann) or all_iscrowd)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt'))
(bbox_result, segm_result) = result
bboxes = np.vstack(bbox_result)
segms = mmcv.concat_list(segm_result)
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
assert (len(bboxes) == len(segms) == len(labels))
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = bboxes[(i, (- 1))]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix, (basename + '_{}_{}.png'.format(i, classes)))
mmcv.imwrite(mask, png_filename)
fout.write('{} {} {}\n'.format(osp.basename(png_filename), class_id, score))
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (txtfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
eval_results = dict()
metrics = (metric.copy() if isinstance(metric, list) else [metric])
if ('cityscapes' in metrics):
eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
if (len(metrics) > 0):
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt)
eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
(result_files, tmp_dir) = self.format_results(results, txtfile_prefix)
if (tmp_dir is None):
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = {}
print_log('Evaluating results under {} ...'.format(result_dir), logger=logger)
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), 'Cannot find ground truth images in {}.'.format(CSEval.args.groundTruthSearch)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results[''] = CSEval_results['allAp50%']
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results |
def main():
parser = argparse.ArgumentParser(description='ReID Baseline Inference')
parser.add_argument('--config_file', default='./configs/debug.yml', help='path to config file', type=str)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
if (args.config_file != ''):
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if (output_dir and (not os.path.exists(output_dir))):
mkdir(output_dir)
logger = setup_logger('reid_baseline', output_dir, 0)
logger.info('Using {} GPUS'.format(num_gpus))
logger.info(args)
if (args.config_file != ''):
logger.info('Loaded configuration file {}'.format(args.config_file))
logger.info('Running with config:\n{}'.format(cfg))
if (cfg.MODEL.DEVICE == 'cuda'):
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
(train_loader, val_loader, num_query, num_classes, dataset) = make_data_loader(cfg)
model = build_model(cfg, num_classes)
model.load_param(cfg.TEST.WEIGHT)
inference(cfg, model, val_loader, num_query, dataset) |
class ParticleNetWrapper(torch.nn.Module):
def __init__(self, **kwargs) -> None:
super().__init__()
self.mod = ParticleNet(**kwargs)
def forward(self, points, features, lorentz_vectors, mask):
return self.mod(points, features, mask) |
class MetadataCache(Cache):
def source(cls, url):
print('Getting metadata from source')
browser = Browser(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'random_downloads'))
browser.get(url)
print('URL gotten')
metadata = cls.metadata_from_result_page(browser)
MHTMLCache.add_from_browser(url, browser)
browser.close()
return metadata |
class AddSubNode(ExprNode):
def __init__(self, left=None, right=None, parse_info=None, raw_text=None, op='+-'):
super().__init__(IRNodeType.AddSub, parse_info=parse_info, raw_text=raw_text)
self.left = left
self.right = right
self.op = op
def split_node(self):
add_node = AddNode(self.left, self.right)
add_node.set_parent(self.parent())
sub_node = SubNode(self.left, self.right)
sub_node.set_parent(self.parent())
return [add_node, sub_node]
def get_child(self, node_type):
if self.left.is_node(node_type):
child_node = self.left
elif self.right.is_node(node_type):
child_node = self.right
else:
child_node = self.left.get_child(node_type)
if (child_node is None):
child_node = self.right.get_child(node_type)
return child_node |
def certificate(domain=LOCALHOST, country=None, state=None, city=None, company=None, contact=None, signed=True, **kwargs):
s = subprocess.PIPE
p = ('openssl', 'genrsa', ('%s' % kwargs.get('encryption', 2048)))
p = subprocess.Popen(p, stdin=s, stdout=s, stderr=s)
k = (kwargs.get('key') or p.communicate()[0])
f = tempfile.NamedTemporaryFile(delete=False)
f.write(k)
f.close()
p = ('openssl', 'req', '-new', '-key', f.name)
p = ((p + ('-x509', '-days', '365')) if signed else p)
p = subprocess.Popen(p, stdin=s, stdout=s, stderr=s)
x = p.communicate(('%s\n%s\n%s\n%s\n.\n%s\n%s\n\n\n' % ((country or '.'), (state or '.'), (city or '.'), (company or '.'), (domain or LOCALHOST), (contact or '.'))))[0]
os.unlink(f.name)
return (k, x) |
def dice(input, target, ignore_index=None):
smooth = 1.0
iflat = input.clone().view((- 1))
tflat = target.clone().view((- 1))
if (ignore_index is not None):
mask = (tflat == ignore_index)
tflat[mask] = 0
iflat[mask] = 0
intersection = (iflat * tflat).sum()
return (((2.0 * intersection) + smooth) / ((iflat.sum() + tflat.sum()) + smooth)) |
class SubsetSum(BinaryProblem):
def __init__(self, C: int, W: list):
super(SubsetSum, self).__init__()
self.C = C
self.W = W
self.number_of_bits = len(self.W)
self.number_of_objectives = 2
self.number_of_variables = 1
self.number_of_constraints = 0
self.obj_directions = [self.MAXIMIZE, self.MINIMIZE]
self.obj_labels = ['Sum', 'No. of Objects']
def evaluate(self, solution: BinarySolution) -> BinarySolution:
total_sum = 0.0
number_of_objects = 0
for (index, bits) in enumerate(solution.variables[0]):
if bits:
total_sum += self.W[index]
number_of_objects += 1
if (total_sum > self.C):
total_sum = (self.C - (total_sum * 0.1))
if (total_sum < 0.0):
total_sum = 0.0
solution.objectives[0] = ((- 1.0) * total_sum)
solution.objectives[1] = number_of_objects
return solution
def create_solution(self) -> BinarySolution:
new_solution = BinarySolution(number_of_variables=self.number_of_variables, number_of_objectives=self.number_of_objectives)
new_solution.variables[0] = [(True if (random.randint(0, 1) == 0) else False) for _ in range(self.number_of_bits)]
return new_solution
def name(self) -> str:
return 'Subset Sum' |
def make_image(tensor):
return tensor.detach().clamp_(min=(- 1), max=1).add(1).div_(2).mul(255).type(torch.uint8).permute(0, 2, 3, 1).to('cpu').numpy() |
def main(argv):
epochs = FLAGS.epochs
batch_size = FLAGS.batch_size
gru_units = (FLAGS.model_size * FLAGS.model_size_scale)
emb_dim = FLAGS.emb_dim
max_seq = FLAGS.max_seq
patience = FLAGS.patience
l2 = FLAGS.l2
lr = FLAGS.lr
with open((FLAGS.input + 'train.pkl'), 'rb') as f:
(train, train_y) = pickle.load(f)
with open((FLAGS.input + 'val.pkl'), 'rb') as f:
(val, val_y) = pickle.load(f)
with open((FLAGS.input + 'test.pkl'), 'rb') as f:
(test, test_y) = pickle.load(f)
train = divide_dataset(train, label_fractions=FLAGS.label_fraction)
train_y = divide_dataset(train_y, label_fractions=FLAGS.label_fraction)
dataset_info = (('# training samples {}\n' + '# validation samples {}\n') + '# test samples {}')
print(dataset_info.format(len(train), len(val), len(test)))
inputs = tf.keras.layers.Input(shape=(max_seq, emb_dim))
gru_1 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_units, return_sequences=True, kernel_regularizer=tf.keras.regularizers.l2(l2)))(inputs)
gru_2 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_units, kernel_regularizer=tf.keras.regularizers.l2(l2)))(gru_1)
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l2))(gru_2)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l2))(mlp_1)
outputs = tf.keras.layers.Dense(1)(mlp_2)
base_model = tf.keras.models.Model(inputs, outputs)
base_model.summary()
optimizer = tf.keras.optimizers.Adam(lr)
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_msle = tf.keras.metrics.MeanSquaredLogarithmicError(name='train_msle')
val_loss = tf.keras.metrics.Mean(name='val_loss')
val_msle = tf.keras.metrics.MeanSquaredLogarithmicError(name='train_msle')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_msle = tf.keras.metrics.MeanSquaredLogarithmicError(name='test_msle')
def train_step(data, labels):
with tf.GradientTape() as tape:
predictions = base_model(data, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, base_model.trainable_variables)
optimizer.apply_gradients(zip(gradients, base_model.trainable_variables))
train_loss(loss)
train_msle(labels, predictions)
def val_step(data, labels):
predictions = base_model(data, training=False)
v_loss = loss_object(labels, predictions)
val_loss(v_loss)
val_msle(labels, predictions)
def test_step(data, labels):
predictions = base_model(data, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_msle(labels, predictions)
return predictions
best_val_msle = 1000
save_predictions = list()
for epoch in range(epochs):
(train, train_y) = shuffle_two(train, train_y)
time_start = time.time()
train_loss.reset_states()
train_msle.reset_states()
val_loss.reset_states()
val_msle.reset_states()
test_loss.reset_states()
test_msle.reset_states()
for i in range(((len(train) // batch_size) + 1)):
batch_train = copy.deepcopy(train[(batch_size * i):((batch_size * i) + batch_size)])
batch_train_labels = train_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_train:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
train_step(np.array(batch_train), np.array(batch_train_labels))
for i in range(((len(val) // batch_size) + 1)):
batch_val = copy.deepcopy(val[(batch_size * i):((batch_size * i) + batch_size)])
batch_val_labels = val_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_val:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
val_step(np.array(batch_val), np.array(batch_val_labels))
pred = list()
for i in range(((len(test) // batch_size) + 1)):
batch_test = copy.deepcopy(test[(batch_size * i):((batch_size * i) + batch_size)])
batch_test_labels = test_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_test:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
batch_predictions = test_step(np.array(batch_test), np.array(batch_test_labels))
pred.extend(batch_predictions)
pred = [float(pre) for pre in pred]
report_loss = np.mean(np.square((np.log2(np.array([(pre if (pre >= 1) else 1) for pre in pred])) - np.log2(np.array([(tru if (tru >= 1) else 1) for tru in list(test_y)])))))
if (val_msle.result() < best_val_msle):
best_val_msle = val_msle.result()
save_predictions = pred
patience = FLAGS.patience
template = 'Epoch {:2}, Time: {:.3f}s, Train Loss: {:.3f}, Train MSLE: {:.3f}, Val Loss: {:.3f}, Val MSLE: {:.3f}, Test Loss: {:.3f}, Test MSLE: {:.3f}, LOG2 MSLE: {:.3f}'
print(template.format((epoch + 1), (time.time() - time_start), train_loss.result(), train_msle.result(), val_loss.result(), val_msle.result(), test_loss.result(), test_msle.result(), report_loss))
if (patience == 0):
report_loss = np.mean(np.square((np.log2(np.array([(pre if (pre >= 1) else 1) for pre in save_predictions])) - np.log2(np.array([(tru if (tru >= 1) else 1) for tru in list(test_y)])))))
print('Predictions saved! Best Test MSLE: {}'.format(report_loss))
break
else:
patience -= 1
print('Finished! Time used: {:.3f}min'.format(((time.time() - start_time) / 60))) |
def get_datamodule(datamodule):
datamodule = datamodule.lower()
if (datamodule == 'cifar10'):
return Cifar10DataModule
if (datamodule == 'cifar100'):
return Cifar100DataModule
elif (datamodule == 'mnist'):
return MnistDataModule
elif (datamodule == 'imagenet'):
return ImagenetDataModule
elif (datamodule == 'stl10'):
return STL10DataModule
elif (datamodule == 'stl10_unlabeled'):
return STL10UnlabeledDataModule
elif (datamodule == 'coco'):
return CocoClipDataModule
elif (datamodule == 'food101'):
return Food101DataModule
elif (datamodule == 'cars196'):
return Cars196DataModule
elif (datamodule == 'pets37'):
return Pets37DataModule
elif (datamodule == 'caltech101'):
return Caltech101DataModule
elif (datamodule == 'pcam'):
return PCamDataModule
elif (datamodule == 'galaxy'):
return GalaxyDataModule
elif (datamodule == 'banana'):
return BananaDataModule
else:
raise ValueError(f'Unkown datamodule: {datamodule}') |
class ReadWork():
def __init__(self, read_done_condition, key, read_done_flag, cache_lock, cache):
self.read_done_condition = read_done_condition
self.key = key
self.read_done_flag = read_done_flag
self.cache_lock = cache_lock
self.cache = cache
def wait(self):
with self.read_done_condition:
while (not self.read_done_flag[self.key]):
self.read_done_condition.wait()
with self.cache_lock:
(tensor, ref_count) = self.cache[self.key]
ref_count -= 1
if (ref_count == 0):
del self.cache[self.key]
else:
self.cache[self.key] = (tensor, ref_count) |
def main():
app = QtGui.QApplication(sys.argv)
tool = CityscapesViewer()
sys.exit(app.exec_()) |
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.logits_net = mlp((([obs_dim] + list(hidden_sizes)) + [act_dim]), activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
def _d_kl(self, obs, old_mu, old_log_std, device):
raise NotImplementedError |
_operation
def div(a: torch.Tensor, b: torch.Tensor):
if is_real(b):
if (b.dim() >= a.dim()):
raise ValueError('Incorrect dimensions.')
return div_cplx_real(a, b)
return div_cplx_real(mult_conj(a, b), abs_sqr(b)) |
def test_bytes(doc):
assert (m.bytes_from_string().decode() == 'foo')
assert (m.bytes_from_str().decode() == 'bar')
assert (doc(m.bytes_from_str) == 'bytes_from_str() -> {}'.format(('bytes' if (sys.version_info[0] == 3) else 'str'))) |
class AlbertPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, LvisDetectionBase):
return dataset.lvis
if isinstance(dataset, (torchvision.datasets.CocoDetection, CustomCocoDetection)):
return dataset.coco |
def features_dataset_resizer(features_dataset_class: Type[BaseFeaturesDataset], resize_factor: float):
old_get = features_dataset_class.get_features
def resizer_get(*args):
features = old_get(*args)
new_features = copy.deepcopy(features)
new_features['skeletons'] *= resize_factor
new_features['head_width'] *= resize_factor
new_features['midbody_width'] *= resize_factor
new_features['tail_width'] *= resize_factor
return new_features
features_dataset_class.get_features = resizer_get
return features_dataset_class |
class MobileNetV2(nn.Module):
def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000):
super(MobileNetV2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, stride=2, activation='relu6'))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stride = (2 if ((j == 0) and (i != 0)) else 1)
expansion = ((i != 0) or (j != 0))
stage.add_module('unit{}'.format((j + 1)), LinearBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_block', conv1x1_block(in_channels=in_channels, out_channels=final_block_channels, activation='relu6'))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = conv1x1(in_channels=in_channels, out_channels=num_classes, bias=False)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), (- 1))
return x |
def penalties(module, reduction='sum'):
for (_, penalty) in named_penalties(module, reduction=reduction):
(yield penalty) |
def load_model_base(self, model_path: str, from_pretrained_kwargs: dict):
revision = from_pretrained_kwargs.get('revision', 'main')
print('Customized bigdl-llm loader')
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=self.use_fast_tokenizer, revision=revision)
from bigdl.llm.transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs)
return (model, tokenizer) |
class p_z(nn.Module):
def __init__(self, output_shape, input_shape):
super().__init__()
(nc_y_in, nc_u_in) = (input_shape[0][0], input_shape[1][0])
nc_out = (2 * output_shape[0])
self.y_nn = nn.Sequential(DenselyEncoder(in_channels=nc_y_in, out_channels=(nc_out // 2), growth_rate=32, steps=5, scale_factor=1), nn.ELU(inplace=True))
self.u_nn = nn.Sequential(DenselyNetwork(in_channels=nc_u_in, out_channels=(nc_out // 2), growth_rate=64, steps=3, blocks=3, act=True))
self.core_nn = nn.Sequential(DenselyNetwork(in_channels=nc_out, out_channels=nc_out, growth_rate=64, steps=3, blocks=3, act=None))
def forward(self, input):
(y, u) = (input[0], input[1])
y_out = self.y_nn(y)
u_out = self.u_nn(u)
joint = torch.cat((y_out, u_out), 1)
(mu, logvar) = self.core_nn(joint).chunk(2, 1)
return (mu, F.hardtanh(logvar, min_val=(- 7), max_val=7.0)) |
def run_daemon(bpf):
signal.signal(signal.SIGTERM, remove_rt)
signal.signal(signal.SIGINT, remove_rt)
for (laddr, gaddr) in LOCAL_GLOBAL_MAP.items():
_ = (lambda x: socket.inet_pton(socket.AF_INET6, x))
logger.info('{}:{}'.format(laddr, gaddr))
bpf['link_local_table'][ip_str_to_ct(laddr)] = ip_str_to_ct(gaddr)
while 1:
bpf.kprobe_poll()
sleep(0.01) |
class GumbelBatchedGenerator():
def __init__(self, seed=None):
if isinstance(seed, random.Random):
self.rng = seed
else:
self.rng = random.Random(seed)
def __call__(self):
return (- math.log((- math.log(self.rng.random())))) |
def import_models(models_dir, namespace):
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if ((not file.startswith('_')) and (not file.startswith('.')) and (file.endswith('.py') or os.path.isdir(path))):
model_name = (file[:file.find('.py')] if file.endswith('.py') else file)
importlib.import_module(((namespace + '.') + model_name))
if (model_name in MODEL_REGISTRY):
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[(model_name + '_parser')] = parser |
def get_valid_mask(mask: np.ndarray):
if (mask.ndim == 3):
mask_pil = Image.fromarray(mask).convert('L')
mask = np.array(mask_pil)
if (mask.max() == 255):
mask = (mask / 255)
return mask |
def test_squeeze_and_excitation_block_2d():
N = 10
C = 128
reduction = 16
data = torch.randn(N, C, 7, 7)
model = SqueezeAndExcitationBlock2D(in_channels=C, reduction=reduction)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (N, C, 7, 7)) |
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(4, num_features, eps=eps, learnable_eps=learnable_eps) |
def Ranger(sync_period=6, slow_step_size=0.5, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=0.0, amsgrad=False, sma_threshold=5.0, total_steps=0, warmup_proportion=0.1, min_lr=0.0, name='Ranger'):
inner = RectifiedAdam(learning_rate, beta_1, beta_2, epsilon, weight_decay, amsgrad, sma_threshold, total_steps, warmup_proportion, min_lr, name)
optim = Lookahead(inner, sync_period, slow_step_size, name)
return optim |
class DoubleNode(ExprNode):
def __init__(self, parse_info=None, raw_text=None):
super().__init__(IRNodeType.Double, parse_info=parse_info, raw_text=raw_text)
self.value = None |
def test_fcm_normalization_nont1w_cli(image: pathlib.Path, mask: pathlib.Path) -> None:
args = f'{image} -tm {mask} -mo t2'.split()
retval = fcm_main(args)
assert (retval == 0) |
def dobldobl_ismember(wsys, gpts, dim, point, evatol=1e-06, memtol=1e-06, verbose=True, tasks=0):
from phcpy.interface import store_dobldobl_witness_set
from phcpy.phcpy2c3 import py2c_witset_dobldobl_ismember as membtest
store_dobldobl_witness_set(len(wsys), dim, wsys, gpts)
nbc = len(point)
nvr = (len(wsys) - dim)
if verbose:
print('calling dobldobl_ismember with test point :')
print(point)
result = membtest(int(verbose), tasks, nvr, dim, nbc, evatol, memtol, point)
(fail, onpolsys, inwitset) = result
return (onpolsys, inwitset) |
class FlattenMlp(Mlp):
def forward(self, *inputs, **kwargs):
flat_inputs = torch.cat(inputs, dim=1)
return super().forward(flat_inputs, **kwargs) |
class ContextNet(AcousticModel):
def __init__(self, num_features: int, num_classes: int, kernel_size: int=3, num_blocks: int=6, num_layers: int=5, conv_out_channels: List[int]=[*([256] * 2), *([512] * 3), 640], subsampling_layers: List[int]=[1, 3], alpha: float=1.5, dropout: int=0.1):
super().__init__()
self.num_features = num_features
self.num_classes = num_classes
self.subsampling_factor = (2 * len(subsampling_layers))
conv_channels = ([num_features] + [int((channels * alpha)) for channels in conv_out_channels])
strides = ([1] * num_blocks)
for layer in subsampling_layers:
strides[layer] = 2
strides[layer] = 2
residuals = [False, *([True] * (num_blocks - 2)), False]
blocks_num_layers = [1, *([num_layers] * (num_blocks - 2)), 1]
self.block_list = [ContextNetBlock(conv_channels[i], conv_channels[(i + 1)], kernel_size=kernel_size, stride=strides[i], num_layers=blocks_num_layers[i], dropout=dropout, residual=residuals[i]) for i in range(num_blocks)]
self.blocks = nn.Sequential(*self.block_list)
self.output_layer = nn.Linear(conv_channels[(- 1)], num_classes)
def forward(self, x, supervision=None):
x = x.transpose(1, (- 1))
x = self.blocks(x)
x = self.output_layer(x)
x = nn.functional.log_softmax(x, dim=(- 1)).transpose(1, (- 1))
return (x, None, None) |
class ObservationModel(nn.Module):
def __init__(self, num_ensemble, dim_x, dim_z):
super(ObservationModel, self).__init__()
self.num_ensemble = num_ensemble
self.dim_x = dim_x
self.dim_z = dim_z
self.linear1 = torch.nn.Linear(self.dim_x, 64)
self.linear2 = torch.nn.Linear(64, 128)
self.linear3 = torch.nn.Linear(128, 128)
self.linear4 = torch.nn.Linear(128, 64)
self.linear5 = torch.nn.Linear(64, self.dim_z)
def forward(self, state):
batch_size = state.shape[0]
state = rearrange(state, 'bs k dim -> (bs k) dim', bs=batch_size, k=self.num_ensemble)
x = self.linear1(state)
x = F.relu(x)
x = self.linear2(x)
x = F.relu(x)
x = self.linear3(x)
x = F.relu(x)
x = self.linear4(x)
x = F.relu(x)
z_pred = self.linear5(x)
z_pred = rearrange(z_pred, '(bs k) dim -> bs k dim', bs=batch_size, k=self.num_ensemble)
return z_pred |
def _get_module_macs(module):
s = module.__macs__
for child in module.children():
s += _get_module_macs(child)
return s |
class Grayscale(object):
def __call__(self, img):
gs = img.clone()
gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs |
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int((train_size / batch_size))
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if (train_size % batch_size):
self.leftover = torch.arange((self.num_per_batch * batch_size), train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = (torch.randperm(self.num_per_batch).view((- 1), 1) * self.batch_size)
self.rand_num = (rand_num.expand(self.num_per_batch, self.batch_size) + self.range)
self.rand_num_view = self.rand_num.view((- 1))
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data |
_sentencepiece
class M2M100TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = M2M100Tokenizer
test_rust_tokenizer = False
test_seq2seq = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ['</s>', '<unk>', 'This', 'is', 'a', 't', 'est', 'G', '<pad>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(self.tmpdirname)
save_json(vocab_tokens, (save_dir / VOCAB_FILES_NAMES['vocab_file']))
if (not (save_dir / VOCAB_FILES_NAMES['spm_file']).exists()):
copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['spm_file']))
tokenizer = M2M100Tokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return M2M100Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return ('This is a test', 'This is a test')
def test_convert_token_and_id(self):
token = '</s>'
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
tokenizer = self.get_tokenizer()
vocab_keys = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[(- 1)], '<s>')
self.assertEqual(len(vocab_keys), (tokenizer.vocab_size + len(tokenizer.get_added_vocab())))
('Skip this test while all models are still to be uploaded.')
def test_pretrained_model_lists(self):
pass
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [2, 3, 4, 5, 6])
back_tokens = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(back_tokens, ['This', 'is', 'a', 't', 'est'])
text = tokenizer.convert_tokens_to_string(tokens)
self.assertEqual(text, 'This is a test')
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e') |
class chamfer_3DFunction(Function):
def forward(ctx, xyz1, xyz2):
(batchsize, n, _) = xyz1.size()
(_, m, _) = xyz2.size()
device = xyz1.device
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
dist1 = dist1.to(device)
dist2 = dist2.to(device)
idx1 = idx1.to(device)
idx2 = idx2.to(device)
torch.cuda.set_device(device)
chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return (dist1, dist2, idx1, idx2)
def backward(ctx, graddist1, graddist2, gradidx1, gradidx2):
(xyz1, xyz2, idx1, idx2) = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
device = graddist1.device
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
gradxyz1 = gradxyz1.to(device)
gradxyz2 = gradxyz2.to(device)
chamfer_3D.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
return (gradxyz1, gradxyz2) |
def fdmobilenet_wd2_cub(num_classes=200, **kwargs):
return get_mobilenet(num_classes=num_classes, version='fd', width_scale=0.5, model_name='fdmobilenet_wd2_cub', **kwargs) |
def _get_log_dir(exec_func_name):
cwd = pathlib.Path.cwd()
return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name)) |
def load_fountain_dataset():
rgbd_images = []
fountain_rgbd_dataset = o3d.data.SampleFountainRGBDImages()
for i in range(len(fountain_rgbd_dataset.depth_paths)):
depth = o3d.io.read_image(fountain_rgbd_dataset.depth_paths[i])
color = o3d.io.read_image(fountain_rgbd_dataset.color_paths[i])
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color, depth, convert_rgb_to_intensity=False)
rgbd_images.append(rgbd_image)
camera_trajectory = o3d.io.read_pinhole_camera_trajectory(fountain_rgbd_dataset.keyframe_poses_log_path)
mesh = o3d.io.read_triangle_mesh(fountain_rgbd_dataset.reconstruction_path)
return (mesh, rgbd_images, camera_trajectory) |
class PostProcessor(abc.ABC):
def __init__(self, tokenizer, ignore_pad_token_for_loss):
self.tokenizer = tokenizer
self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
def process(self, preds, labels, data_info=None):
if isinstance(preds, tuple):
preds = preds[0]
if self.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, self.tokenizer.pad_token_id)
preds = np.where((preds != (- 100)), preds, self.tokenizer.pad_token_id)
decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [label.strip() for label in decoded_labels]
return (decoded_preds, decoded_labels) |
class WavLMForAudioFrameClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class PSMDispProcessor(nn.Module):
def __init__(self, max_disp=192):
super().__init__()
self.disp_processor = FasterSoftArgmin(max_disp=max_disp, start_disp=0, dilation=1, alpha=1.0, normalize=True)
def forward(self, inputs):
cost1 = inputs['cost1']
cost2 = inputs['cost2']
cost3 = inputs['cost3']
ref_img = inputs['ref_img']
tgt_img = inputs['tgt_img']
disp1 = self.disp_processor(cost1)
disp2 = self.disp_processor(cost2)
disp3 = self.disp_processor(cost3)
if self.training:
disp_gt = inputs['disp_gt']
output = {'training_disp': {'disp': {'disp_ests': [disp1, disp2, disp3], 'disp_gt': inputs['disp_gt'], 'mask': inputs['mask']}}, 'visual_summary': {'image/train/image_c': torch.cat([ref_img[0], tgt_img[0]], dim=1), 'image/train/disp_c': torch.cat([disp_gt[0], disp3[0]], dim=0)}}
else:
output = {'inference_disp': {'disp_est': disp3}, 'visual_summary': {'image/test/image_c': torch.cat([ref_img[0], tgt_img[0]], dim=1), 'image/test/disp_c': disp3[0]}}
if ('disp_gt' in inputs):
disp_gt = inputs['disp_gt']
output['visual_summary'] = {'image/val/image_c': torch.cat([ref_img[0], tgt_img[0]], dim=1), 'image/val/disp_c': torch.cat([disp_gt[0], disp3[0]], dim=0)}
return output |
def stat_coef_diff(X, X_tilde, y, method='lasso_cv', n_splits=5, n_jobs=1, n_lambdas=10, n_iter=1000, group_reg=0.001, l1_reg=0.001, joblib_verbose=0, return_coef=False, solver='liblinear', seed=0):
n_features = X.shape[1]
X_ko = np.column_stack([X, X_tilde])
lambda_max = (np.max(np.dot(X_ko.T, y)) / (2 * n_features))
lambdas = np.linspace((lambda_max * np.exp((- n_lambdas))), lambda_max, n_lambdas)
cv = KFold(n_splits=5, shuffle=True, random_state=seed)
estimator = {'lasso_cv': LassoCV(alphas=lambdas, n_jobs=n_jobs, verbose=joblib_verbose, max_iter=10000.0, cv=cv), 'logistic_l1': LogisticRegressionCV(penalty='l1', max_iter=10000.0, solver=solver, cv=cv, n_jobs=n_jobs, tol=1e-08), 'logistic_l2': LogisticRegressionCV(penalty='l2', max_iter=10000.0, n_jobs=n_jobs, verbose=joblib_verbose, cv=cv, tol=1e-08)}
try:
clf = estimator[method]
except KeyError:
print('{} is not a valid estimator'.format(method))
clf.fit(X_ko, y)
try:
coef = np.ravel(clf.coef_)
except AttributeError:
coef = np.ravel(clf.best_estimator_.coef_)
test_score = (np.abs(coef[:n_features]) - np.abs(coef[n_features:]))
if return_coef:
return (test_score, coef)
return test_score |
class ResNetV2(nn.Module):
def __init__(self, layers, channels=(256, 512, 1024, 2048), num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), drop_rate=0.0, drop_path_rate=0.0):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
wf = width_factor
self.feature_info = []
stem_chs = make_div((stem_chs * wf))
self.stem = create_stem(in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=('' if preact else 'stem.norm')))
prev_chs = stem_chs
curr_stride = 4
dilation = 1
block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
block_fn = (PreActBottleneck if preact else Bottleneck)
self.stages = nn.Sequential()
for (stage_idx, (d, c, bdpr)) in enumerate(zip(layers, channels, block_dprs)):
out_chs = make_div((c * wf))
stride = (1 if (stage_idx == 0) else 2)
if (curr_stride >= output_stride):
dilation *= stride
stride = 1
stage = ResNetStage(prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn)
prev_chs = out_chs
curr_stride *= stride
feat_name = f'stages.{stage_idx}'
if preact:
feat_name = (f'stages.{(stage_idx + 1)}.blocks.0.norm1' if ((stage_idx + 1) != len(channels)) else 'norm')
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=feat_name)]
self.stages.add_module(str(stage_idx), stage)
self.num_features = prev_chs
self.norm = (norm_layer(self.num_features) if preact else nn.Identity())
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True)
for (n, m) in self.named_modules():
if (isinstance(m, nn.Linear) or (('.fc' in n) and isinstance(m, nn.Conv2d))):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
if (not self.head.global_pool.is_identity()):
x = x.flatten(1)
return x
def load_pretrained(self, checkpoint_path, prefix='resnet/'):
import numpy as np
weights = np.load(checkpoint_path)
with torch.no_grad():
stem_conv_w = tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel'])
if (self.stem.conv.weight.shape[1] == 1):
self.stem.conv.weight.copy_(stem_conv_w.sum(dim=1, keepdim=True))
else:
self.stem.conv.weight.copy_(stem_conv_w)
self.norm.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.norm.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
self.head.fc.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel']))
self.head.fc.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for (i, (sname, stage)) in enumerate(self.stages.named_children()):
for (j, (bname, block)) in enumerate(stage.blocks.named_children()):
convname = 'standardized_conv2d'
block_prefix = f'{prefix}block{(i + 1)}/unit{(j + 1):02d}/'
block.conv1.weight.copy_(tf2th(weights[f'{block_prefix}a/{convname}/kernel']))
block.conv2.weight.copy_(tf2th(weights[f'{block_prefix}b/{convname}/kernel']))
block.conv3.weight.copy_(tf2th(weights[f'{block_prefix}c/{convname}/kernel']))
block.norm1.weight.copy_(tf2th(weights[f'{block_prefix}a/group_norm/gamma']))
block.norm2.weight.copy_(tf2th(weights[f'{block_prefix}b/group_norm/gamma']))
block.norm3.weight.copy_(tf2th(weights[f'{block_prefix}c/group_norm/gamma']))
block.norm1.bias.copy_(tf2th(weights[f'{block_prefix}a/group_norm/beta']))
block.norm2.bias.copy_(tf2th(weights[f'{block_prefix}b/group_norm/beta']))
block.norm3.bias.copy_(tf2th(weights[f'{block_prefix}c/group_norm/beta']))
if (block.downsample is not None):
w = weights[f'{block_prefix}a/proj/{convname}/kernel']
block.downsample.conv.weight.copy_(tf2th(w)) |
class CLIPVisionCfg():
layers: Union[(Tuple[(int, int, int, int)], int)]
width: int
head_width: int
image_size: int
mlp_ratio: float
patch_size: int = None
timm_model_name: str = None
timm_model_pretrained: bool = None
timm_pool: str = None
timm_proj: str = None |
class Host():
def __init__(self, ID, IPS, RAM, Disk, Bw, Latency, Powermodel, Environment):
self.id = ID
self.ipsCap = IPS
self.ramCap = RAM
self.diskCap = Disk
self.bwCap = Bw
self.latency = Latency
self.powermodel = Powermodel
self.powermodel.allocHost(self)
self.powermodel.host = self
self.env = Environment
def getPower(self):
return self.powermodel.power()
def getPowerFromIPS(self, ips):
return self.powermodel.powerFromCPU(min(100, (100 * (ips / self.ipsCap))))
def getCPU(self):
ips = self.getApparentIPS()
return (100 * (ips / self.ipsCap))
def getBaseIPS(self):
ips = 0
containers = self.env.getContainersOfHost(self.id)
for containerID in containers:
ips += self.env.getContainerByID(containerID).getBaseIPS()
return ips
def getApparentIPS(self):
ips = 0
containers = self.env.getContainersOfHost(self.id)
for containerID in containers:
ips += self.env.getContainerByID(containerID).getApparentIPS()
return int(ips)
def getIPSAvailable(self):
return (self.ipsCap - self.getBaseIPS())
def getCurrentRAM(self):
(size, read, write) = (0, 0, 0)
containers = self.env.getContainersOfHost(self.id)
for containerID in containers:
(s, r, w) = self.env.getContainerByID(containerID).getRAM()
size += s
read += r
write += w
return (size, read, write)
def getRAMAvailable(self):
(size, read, write) = self.getCurrentRAM()
return ((self.ramCap.size - size), (self.ramCap.read - read), (self.ramCap.write - write))
def getCurrentDisk(self):
(size, read, write) = (0, 0, 0)
containers = self.env.getContainersOfHost(self.id)
for containerID in containers:
(s, r, w) = self.env.getContainerByID(containerID).getDisk()
size += s
read += r
write += w
assert (size <= self.diskCap.size)
assert (read <= self.diskCap.read)
assert (write <= self.diskCap.write)
return (size, read, write)
def getDiskAvailable(self):
(size, read, write) = self.getCurrentDisk()
return ((self.diskCap.size - size), (self.diskCap.read - read), (self.diskCap.write - write)) |
class ClassAssocationRule():
id = 0
def __init__(self, antecedent, consequent, support, confidence):
self.antecedent = antecedent
self.consequent = consequent
self.support = support
self.confidence = confidence
self.rulelen = (len(antecedent) + 1)
self.rid = ClassAssocationRule.id
ClassAssocationRule.id += 1
self.support_count = 0
self.marked = False
self.class_cases_covered = collections.Counter()
self.replace = set()
def __gt__(self, other):
if (self.confidence > other.confidence):
return True
elif ((self.confidence == other.confidence) and (self.support > other.support)):
return True
elif ((self.confidence == other.confidence) and (self.support == other.support) and (self.rulelen < other.rulelen)):
return True
elif ((self.confidence == other.confidence) and (self.support == other.support) and (self.rulelen == other.rulelen) and (self.rid < other.rid)):
return True
else:
return False
def __lt__(self, other):
return (not (self > other))
def __len__(self):
return (len(self.antecedent) + len(self.consequent))
def __repr__(self):
args = [self.antecedent.string(), (('{' + self.consequent.string()) + '}'), self.support, self.confidence, self.rulelen, self.rid]
text = 'CAR {} => {} sup: {:.2f} conf: {:.2f} len: {}, id: {}'.format(*args)
return text |
def add_prefix(inputs, prefix):
outputs = dict()
for (name, value) in inputs.items():
outputs[f'{prefix}.{name}'] = value
return outputs |
def get_symbol_edges(node: Union[(str, ast.AST)]) -> List[Tuple[(Union[(str, ast.AST)], ast.AST)]]:
if isinstance(node, str):
node = ast.parse(node).body[0]
return GetSymbolEdges()(node) |
def retrieve_data_cfg(config_path, skip_type):
cfg = Config.fromfile(config_path)
train_data_cfg = cfg.data.train
if hasattr(train_data_cfg, 'pipeline'):
train_data_cfg['pipeline'] = [x for x in train_data_cfg.pipeline if (x['type'] not in skip_type)]
else:
train_data_cfg['dataset']['pipeline'] = [x for x in train_data_cfg.dataset.pipeline if (x['type'] not in skip_type)]
return cfg |
def get_dataframes_model(all_results, datasets, model_name, divide=False):
dataset_keys = list(all_results.keys())
if divide:
df_avg_self = pd.DataFrame()
df_avg_mt = pd.DataFrame()
else:
df_avg = {}
for average in (['avg'] + list(languages.keys())):
df_avg[average] = pd.DataFrame()
for dataset in datasets:
dfs = []
for dataset_key in dataset_keys:
if dataset_key.startswith(dataset):
if (model_name == 'open_llama_v2'):
if (not any([(model.lower() in models['open_llama_v2']) for model in all_results[dataset_key]])):
continue
elif (not any([(model_name in model.lower()) for model in all_results[dataset_key]])):
continue
if (('600M' in dataset_key) or ('1.3B' in dataset_key)):
continue
results = pd.DataFrame(all_results[dataset_key]).T
results['dataset'] = dataset_key
results['model'] = [models_reverse[model] for model in results.index]
results = results[(results['model'] == model_name)]
results.drop(columns=['model'], inplace=True)
results['model'] = model_name
results['size'] = model_sizes[model_name][:len(results)]
dfs.append(results)
df_concat = pd.concat(dfs)
df_concat = df_concat.sort_values(['size', 'dataset'])
df_concat = df_concat.reindex(columns=(['model', 'size', 'dataset'] + [col for col in df_concat.columns if (col not in ['model', 'size', 'dataset'])]))
df_concat['dataset'] = df_concat['dataset'].str.replace(dataset, 'Direct')
df_concat['dataset'] = df_concat['dataset'].str.replace('Direct-mt_few-shot', 'Self-translate')
df_concat['dataset'] = df_concat['dataset'].str.replace('Direct-mt_nllb-200-3.3B', 'MT (NLLB)')
if divide:
df_concat_self = df_concat[df_concat['dataset'].isin(['Direct', 'Self-MT'])]
df_concat_mt = df_concat[df_concat['dataset'].isin(['Self-MT', 'MT'])]
df_avg_self['model'] = df_concat_self['model']
df_avg_self['size'] = df_concat_self['size']
df_avg_self['dataset'] = df_concat_self['dataset']
df_avg_self[dataset] = df_concat_self['avg']
df_avg_mt['model'] = df_concat_mt['model']
df_avg_mt['size'] = df_concat_mt['size']
df_avg_mt['dataset'] = df_concat_mt['dataset']
df_avg_mt[dataset] = df_concat_mt['avg']
(yield df_concat_self)
(yield df_concat_mt)
else:
for average in (['avg'] + list(languages.keys())):
df_avg[average]['model'] = df_concat['model']
df_avg[average]['size'] = df_concat['size']
df_avg[average]['dataset'] = df_concat['dataset']
df_avg[average][dataset] = df_concat[average]
(yield df_concat)
if divide:
size = df_avg_self['size']
df_avg_self = df_avg_self.drop(columns=['size'])
size = df_avg_mt['size']
df_avg_mt = df_avg_mt.drop(columns=['size'])
df_avg_self['avg'] = df_avg_self.mean(axis=1).round(1)
df_avg_mt['avg'] = df_avg_mt.mean(axis=1).round(1)
df_avg_self['size'] = size
df_avg_mt['size'] = size
(yield df_avg_self)
(yield df_avg_mt)
else:
for average in (['avg'] + list(languages.keys())):
size = df_avg[average]['size']
df_avg[average] = df_avg[average].drop(columns=['size'])
df_avg[average][average] = df_avg[average].mean(axis=1).round(1)
df_avg[average]['size'] = size
(yield df_avg[average]) |
class VGGLoss_ESRGAN(nn.Module):
def __init__(self):
super().__init__()
vgg19_model = models.vgg19(pretrained=True)
self.vgg19_54 = nn.Sequential(*list(vgg19_model.features.children())[:35])
self.criterion = nn.L1Loss()
def forward(self, real, fake):
return self.criterion(self.vgg19_54(real), self.vgg19_54(fake)) |
def submit_training(**kws):
dims = (N_CLUSTERS, kws['dim_l1'], kws['dim__adj'], kws['dim__v'])
scenario_key = (kws['aid'], kws['cid'])
model_key = (kws['model_type'], kws['spill_v2adj'], kws['ov'], kws['sampling_id'])
if ((scenario_key, model_key) in kws['gathered']):
history = None
else:
history = client.submit(make_train_vae, dims, kws['q_overlap'], kws['p__slices'], scenario_key, with_progress=False, key=('make_train_vae', id(make_train_vae), scenario_key, model_key))
return (scenario_key, model_key, history) |
class ActionRepeat(object):
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while ((current_step < self._amount) and (not done)):
(observ, reward, done, info) = self._env.step(action)
total_reward += reward
current_step += 1
return (observ, total_reward, done, info) |
def load_embedding(VOCAB, path, embedding_dim=300):
with open(path) as f:
weights = np.random.rand(VOCAB.get_vocab_size(), embedding_dim)
counter = 0
for line in f.readlines():
try:
line = line.strip().split()
v = list(map(float, line[1:]))
word = line[0]
wid = VOCAB.get_index(word)
if (wid != VOCAB.get_index('<unk>')):
counter += 1
weights[wid] = np.array(v)
except Exception as e:
print(e)
ipdb.set_trace()
print(f'[!] Loading the weights {round((counter / VOCAB.get_vocab_size()), 4)}')
return weights |
def lanczos_generalized(operator, metric_operator=None, metric_inv_operator=None, num_eigenthings=10, which='LM', max_steps=20, tol=1e-06, num_lanczos_vectors=None, init_vec=None, use_gpu=False):
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if (num_lanczos_vectors is None):
num_lanczos_vectors = min((2 * num_eigenthings), (size - 1))
if (num_lanczos_vectors < (2 * num_eigenthings)):
warn('[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings')
def _scipy_apply(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return operator.apply(x.float()).cpu().numpy()
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if (isinstance(metric_operator, np.ndarray) or isinstance(metric_operator, ScipyLinearOperator)):
metric_op = metric_operator
else:
def _scipy_apply_metric(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_operator.apply(x.float()).cpu().numpy()
metric_op = ScipyLinearOperator(shape, _scipy_apply_metric)
if (isinstance(metric_inv_operator, np.ndarray) or isinstance(metric_inv_operator, ScipyLinearOperator)):
metric_inv_op = metric_inv_operator
else:
def _scipy_apply_metric_inv(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_inv_operator.apply(x.float()).cpu().numpy()
metric_inv_op = ScipyLinearOperator(shape, _scipy_apply_metric_inv)
if (init_vec is None):
init_vec = np.random.rand(size)
elif isinstance(init_vec, torch.Tensor):
init_vec = init_vec.cpu().numpy()
(eigenvals, eigenvecs) = eigsh(A=scipy_op, k=num_eigenthings, M=metric_op, Minv=metric_inv_op, which=which, maxiter=max_steps, tol=tol, ncv=num_lanczos_vectors, return_eigenvectors=True)
return (eigenvals, eigenvecs.T) |
def train_data_loader(config, batch_size):
train_dataset = DataSetMap[config.get('dataset', 'LinearDataset')](size=config.get('data_size', 1000), nested_input=config.get('nested_input', False))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
return train_loader |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class FINNExampleOverlay(Overlay):
def __init__(self, bitfile_name, platform, io_shape_dict, batch_size=1, fclk_mhz=100.0, device=None, download=True, runtime_weight_dir='runtime_weights/'):
super().__init__(bitfile_name, download=download, device=device)
self.runtime_weight_dir = runtime_weight_dir
self._io_shape_dict = io_shape_dict
self.ibuf_packed_device = None
self.obuf_packed_device = None
self.platform = platform
self.batch_size = batch_size
self.fclk_mhz = fclk_mhz
self.idma = []
self.odma = []
self.odma_handle = []
if ('input_dma_name' in io_shape_dict.keys()):
for idma_name in io_shape_dict['input_dma_name']:
self.idma.append(getattr(self, idma_name))
else:
self.idma = [self.idma0]
if ('output_dma_name' in io_shape_dict.keys()):
for odma_name in io_shape_dict['output_dma_name']:
self.odma.append(getattr(self, odma_name))
if (self.platform == 'alveo'):
self.odma_handle.append(None)
else:
self.odma = [self.odma0]
if (self.platform == 'alveo'):
self.odma_handle.append(None)
if (self.platform == 'zynq-iodma'):
if (self.fclk_mhz > 0):
Clocks.fclk0_mhz = self.fclk_mhz
self.load_external_weights()
self.load_runtime_weights()
def load_external_weights(self):
self.external_weights = []
w_filenames = []
if (not os.path.isdir(self.runtime_weight_dir)):
return
for (dirpath, dirnames, filenames) in os.walk(self.runtime_weight_dir):
w_filenames.extend(filenames)
tmp_weight_dict = {}
for w_filename in w_filenames:
if w_filename.endswith('.npy'):
weight_tensor = np.load(((self.runtime_weight_dir + '/') + w_filename))
else:
continue
idma_name = w_filename.split('.')[0]
tmp_weight_dict[idma_name] = weight_tensor
for idma_name in tmp_weight_dict.keys():
if (idma_name in self.ip_dict.keys()):
iwdma = getattr(self, idma_name)
weight_tensor = tmp_weight_dict[idma_name]
weight_buf = allocate(weight_tensor.shape, dtype=np.uint8)
weight_buf[:] = weight_tensor
weight_buf.flush()
self.external_weights += [(iwdma, weight_buf, idma_name)]
if ('number_of_external_weights' in self._io_shape_dict):
hw_ext_weights = self._io_shape_dict['number_of_external_weights']
assert (len(self.external_weights) == hw_ext_weights), (('Number of hardware external weights and number of external ' + 'weight tensors available do not match. \n') + 'Is runtime_weight_dir pointing to the correct folder?')
def load_runtime_weights(self, flush_accel=True, verify=True):
w_filenames = []
if (not os.path.isdir(self.runtime_weight_dir)):
return
for (dirpath, dirnames, filenames) in os.walk(self.runtime_weight_dir):
w_filenames.extend(filenames)
rt_weight_dict = {}
for w_filename in w_filenames:
if w_filename.endswith('.dat'):
with open(((self.runtime_weight_dir + '/') + w_filename), 'r') as f:
dat = f.read()
else:
continue
layer_w = np.fromiter([int(x, 16) for x in dat.strip().split()], dtype=np.uint32)
sdp_ind = int(w_filename.split('_')[0])
layer_ind = int(w_filename.split('_')[1])
rt_weight_dict[(sdp_ind, layer_ind)] = layer_w
for (sdp_ind, layer_ind) in rt_weight_dict.keys():
cand_if_name = ('StreamingDataflowPartition_%d/s_axilite_%d' % (sdp_ind, layer_ind))
if (cand_if_name in self.ip_dict.keys()):
layer_mmio = getattr(getattr(self, ('StreamingDataflowPartition_%d' % sdp_ind)), ('s_axilite_%d' % layer_ind)).mmio
layer_w = rt_weight_dict[(sdp_ind, layer_ind)]
layer_mmio.write_mm(0, layer_w.tobytes())
if verify:
new_w = np.copy(layer_mmio.array[:layer_w.shape[0]])
assert (layer_w == new_w).all()
if flush_accel:
self.execute_on_buffers()
def idt(self, ind=0):
return self._io_shape_dict['idt'][ind]
def odt(self, ind=0):
return self._io_shape_dict['odt'][ind]
def ishape_normal(self, ind=0):
ret = list(self._io_shape_dict['ishape_normal'][ind])
ret[0] = self.batch_size
return tuple(ret)
def oshape_normal(self, ind=0):
ret = list(self._io_shape_dict['oshape_normal'][ind])
ret[0] = self.batch_size
return tuple(ret)
def ishape_folded(self, ind=0):
ret = list(self._io_shape_dict['ishape_folded'][ind])
ret[0] = self.batch_size
return tuple(ret)
def oshape_folded(self, ind=0):
ret = list(self._io_shape_dict['oshape_folded'][ind])
ret[0] = self.batch_size
return tuple(ret)
def ishape_packed(self, ind=0):
ret = list(self._io_shape_dict['ishape_packed'][ind])
ret[0] = self.batch_size
return tuple(ret)
def oshape_packed(self, ind=0):
ret = list(self._io_shape_dict['oshape_packed'][ind])
ret[0] = self.batch_size
return tuple(ret)
def num_inputs(self):
return self._io_shape_dict['num_inputs']
def num_outputs(self):
return self._io_shape_dict['num_outputs']
def batch_size(self):
return self._batch_size
_size.setter
def batch_size(self, value):
self._batch_size = value
if (self.ibuf_packed_device is not None):
self.ibuf_packed_device = None
if (self.obuf_packed_device is not None):
self.obuf_packed_device = None
cacheable = {'alveo': False, 'zynq-iodma': True}[self.platform]
self.ibuf_packed_device = []
self.obuf_packed_device = []
self.obuf_packed = []
for i in range(self.num_inputs):
new_packed_ibuf = allocate(shape=self.ishape_packed(i), dtype=np.uint8, cacheable=cacheable)
self.ibuf_packed_device.append(new_packed_ibuf)
for o in range(self.num_outputs):
new_packed_obuf = allocate(shape=self.oshape_packed(o), dtype=np.uint8, cacheable=cacheable)
self.obuf_packed_device.append(new_packed_obuf)
self.obuf_packed.append(np.empty_like(new_packed_obuf))
def fold_input(self, ibuf_normal, ind=0):
assert (ibuf_normal.shape == self.ishape_normal(ind))
ibuf_folded = ibuf_normal.reshape(self.ishape_folded(ind))
return ibuf_folded
def pack_input(self, ibuf_folded, ind=0):
ibuf_packed = finnpy_to_packed_bytearray(ibuf_folded, self.idt(ind), reverse_endian=True, reverse_inner=True, fast_mode=True)
return ibuf_packed
def unpack_output(self, obuf_packed, ind=0):
obuf_folded = packed_bytearray_to_finnpy(obuf_packed, self.odt(ind), self.oshape_folded(ind), reverse_endian=True, reverse_inner=True, fast_mode=True)
return obuf_folded
def unfold_output(self, obuf_folded, ind=0):
obuf_normal = obuf_folded.reshape(self.oshape_normal(ind))
return obuf_normal
def copy_input_data_to_device(self, data, ind=0):
np.copyto(self.ibuf_packed_device[ind], data)
self.ibuf_packed_device[ind].flush()
def copy_output_data_from_device(self, data, ind=0):
self.obuf_packed_device[ind].invalidate()
np.copyto(data, self.obuf_packed_device[ind])
def execute_on_buffers(self, asynch=False, batch_size=None):
if (batch_size is None):
batch_size = self.batch_size
assert (batch_size <= self.batch_size), 'Specified batch_size is too large.'
if (self.platform == 'zynq-iodma'):
for o in range(self.num_outputs):
assert ((self.odma[o].read(0) & 4) != 0), ('Output DMA %d is not idle' % o)
for (iwdma, iwbuf, iwdma_name) in self.external_weights:
iwdma.write(16, iwbuf.device_address)
iwdma.write(28, batch_size)
iwdma.write(0, 1)
for o in range(self.num_outputs):
self.odma[o].write(16, self.obuf_packed_device[o].device_address)
self.odma[o].write(28, batch_size)
self.odma[o].write(0, 1)
for i in range(self.num_inputs):
self.idma[i].write(16, self.ibuf_packed_device[i].device_address)
self.idma[i].write(28, batch_size)
self.idma[i].write(0, 1)
elif (self.platform == 'alveo'):
for o in range(self.num_outputs):
assert (self.odma_handle[o] is None), ('Output DMA %d is already running' % o)
for i in range(self.num_inputs):
self.idma[i].start(self.ibuf_packed_device[i], batch_size)
for (iwdma, iwbuf, iwdma_name) in self.external_weights:
iwdma.start(iwbuf, batch_size)
for o in range(self.num_outputs):
self.odma_handle[o] = self.odma[o].start(self.obuf_packed_device[o], batch_size)
else:
raise Exception(('Unrecognized platform: %s' % self.platform))
if (asynch is False):
self.wait_until_finished()
def wait_until_finished(self):
if (self.platform == 'zynq-iodma'):
for o in range(self.num_outputs):
status = self.odma[o].read(0)
while ((status & 2) == 0):
status = self.odma[o].read(0)
elif (self.platform == 'alveo'):
assert all([(x is not None) for x in self.odma_handle]), 'No odma_handle to wait on'
for o in range(self.num_outputs):
self.odma_handle[o].wait()
self.odma_handle[o] = None
else:
raise Exception(('Unrecognized platform: %s' % self.platform))
def execute(self, input_npy):
if (not (type(input_npy) is list)):
input_npy = [input_npy]
assert (self.num_inputs == len(input_npy)), 'Not all accelerator inputs are specified.'
for i in range(self.num_inputs):
ibuf_folded = self.fold_input(input_npy[i], ind=i)
ibuf_packed = self.pack_input(ibuf_folded, ind=i)
self.copy_input_data_to_device(ibuf_packed, ind=i)
self.execute_on_buffers()
outputs = []
for o in range(self.num_outputs):
self.copy_output_data_from_device(self.obuf_packed[o], ind=o)
obuf_folded = self.unpack_output(self.obuf_packed[o], ind=o)
obuf_normal = self.unfold_output(obuf_folded, ind=o)
outputs.append(obuf_normal)
if (self.num_outputs == 1):
return outputs[0]
else:
return outputs
def throughput_test(self):
res = {}
start = time.time()
self.execute_on_buffers()
end = time.time()
runtime = (end - start)
res['runtime[ms]'] = (runtime * 1000)
res['throughput[images/s]'] = (self.batch_size / runtime)
total_in = 0
for i in range(self.num_inputs):
total_in += np.prod(self.ishape_packed(i))
res['DRAM_in_bandwidth[MB/s]'] = ((total_in * 1e-06) / runtime)
total_out = 0
for o in range(self.num_outputs):
total_out += np.prod(self.oshape_packed(o))
res['DRAM_out_bandwidth[MB/s]'] = ((total_out * 1e-06) / runtime)
for (iwdma, iwbuf, iwdma_name) in self.external_weights:
res[('DRAM_extw_%s_bandwidth[MB/s]' % iwdma_name)] = (((self.batch_size * np.prod(iwbuf.shape)) * 1e-06) / runtime)
if (self.platform == 'zynq-iodma'):
res['fclk[mhz]'] = Clocks.fclk0_mhz
elif (self.platform == 'alveo'):
res['fclk[mhz]'] = self.clock_dict['clock0']['frequency']
res['batch_size'] = self.batch_size
input_npy = gen_finn_dt_tensor(self.idt(), self.ishape_normal())
if (self.idt() == DataType['UINT8']):
input_npy = input_npy.astype(np.uint8)
elif (self.idt() == DataType['INT8']):
input_npy = input_npy.astype(np.int8)
start = time.time()
ibuf_folded = self.fold_input(input_npy)
end = time.time()
runtime = (end - start)
res['fold_input[ms]'] = (runtime * 1000)
start = time.time()
ibuf_packed = self.pack_input(ibuf_folded)
end = time.time()
runtime = (end - start)
res['pack_input[ms]'] = (runtime * 1000)
start = time.time()
self.copy_input_data_to_device(ibuf_packed)
end = time.time()
runtime = (end - start)
res['copy_input_data_to_device[ms]'] = (runtime * 1000)
start = time.time()
self.copy_output_data_from_device(self.obuf_packed[0])
end = time.time()
runtime = (end - start)
res['copy_output_data_from_device[ms]'] = (runtime * 1000)
start = time.time()
obuf_folded = self.unpack_output(self.obuf_packed[0])
end = time.time()
runtime = (end - start)
res['unpack_output[ms]'] = (runtime * 1000)
start = time.time()
self.unfold_output(obuf_folded)
end = time.time()
runtime = (end - start)
res['unfold_output[ms]'] = (runtime * 1000)
return res |
def mean_absolute_error(y_true, y_pred):
result = mean(abs((y_true - y_pred)), axis=1)
return result |
_module()
class CascadeRCNN(TwoStageDetector):
'Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <
def __init__(self, backbone: ConfigType, neck: OptConfigType=None, rpn_head: OptConfigType=None, roi_head: OptConfigType=None, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) |
class BackendPytorchNative(backend.Backend):
def __init__(self):
super(BackendPytorchNative, self).__init__()
self.sess = None
self.model = None
self.device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
def version(self):
return torch.__version__
def name(self):
return 'pytorch-native'
def image_format(self):
return 'NCHW'
def load(self, model_path, inputs=None, outputs=None):
self.model = torch.load(model_path, map_location=(lambda storage, loc: storage))
self.model.eval()
if inputs:
self.inputs = inputs
else:
self.inputs = []
initializers = set()
for i in self.model.graph.initializer:
initializers.add(i.name)
for i in self.model.graph.input:
if (i.name not in initializers):
self.inputs.append(i.name)
if outputs:
self.outputs = outputs
else:
self.outputs = []
for i in self.model.graph.output:
self.outputs.append(i.name)
self.model = self.model.to(self.device)
return self
def predict(self, feed):
key = [key for key in feed.keys()][0]
feed[key] = torch.tensor(feed[key]).float().to(self.device)
with torch.no_grad():
output = self.model(feed[key])
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.