code stringlengths 101 5.91M |
|---|
(signature, parallel=True, cache=True, nogil=False)
def weighted_average_product_PxP_C(config1, config2, weights, q1, q2):
B = config1.shape[0]
M = config1.shape[1]
N = config2.shape[1]
out = np.zeros((M, N, q1, q2), dtype=curr_float)
for m in prange(M):
for n in prange(N):
for b in prange(B):
out[(m, n, config1[(b, m)], config2[(b, n)])] += weights[b]
out /= weights.sum()
return out |
def _graphsLoad(gs: List[Graph], add: bool) -> List[Graph]:
us = _unwrap(gs)
res = [_graphLoad(a, name=None, add=add) for a in us]
return res |
def unflatten(arr, shape):
size = np.prod(shape)
head = ((np.uint64(arr[:size]) << 32) | np.uint64(arr[size:(2 * size)]))
return (np.reshape(head, shape), (arr[(2 * size):], ())) |
def generate_pretrained_model():
((x_train, y_train), (x_test, y_test)) = cifar10.load_data()
input_shape = x_train.shape[1:]
x_train = (x_train.astype('float32') / 255)
x_test = (x_test.astype('float32') / 255)
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
model = resnet_v2(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.01), metrics=['accuracy'])
model.summary()
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=5e-07)
callbacks = [lr_reducer, lr_scheduler]
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
model.save('baseline_model') |
def _find_quantized_op_num(module, op_qcfgs, prefix='', op_count=0):
for (name_tmp, child_tmp) in module.named_children():
op_name = (((prefix + '.') + name_tmp) if (prefix != '') else name_tmp)
if ((op_name in op_qcfgs.keys()) and (type(child_tmp) != torch.quantization.QuantStub)):
op_count += 1
else:
op_count = _find_quantized_op_num(child_tmp, op_qcfgs, op_name, op_count)
return op_count |
class AdditiveKernels(KernelBase):
def __init__(self, k1, k2):
super().__init__()
self.k1 = k1
self.k2 = k2
def __call__(self, x, y):
return (self.k1(x, y) + self.k2(x, y)) |
def num_prompts(data):
pmts = set()
for row in data:
pmts.add(((row[2] + row[3]) + row[4]))
return len(pmts) |
class Object3d(BEVBox3D):
def __init__(self, center, size, yaw, name, box):
super().__init__(center, size, yaw, name, (- 1.0))
self.occlusion = box['occlusion']
self.quaternion = box['quaternion']
self.coords_3d = box['3d_coord']
self.coords_2d = box['2d_coord']
def generate_corners3d(self):
return self.coords_3d |
def points_to_bev(points, voxel_size, coors_range, with_reflectivity=False, density_norm_num=16, max_voxels=40000):
if (not isinstance(voxel_size, np.ndarray)):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if (not isinstance(coors_range, np.ndarray)):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = ((coors_range[3:] - coors_range[:3]) / voxel_size)
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::(- 1)]
coor_to_voxelidx = (- np.ones(shape=voxelmap_shape, dtype=np.int32))
bev_map_shape = list(voxelmap_shape)
bev_map_shape[0] += 1
height_lowers = np.linspace(coors_range[2], coors_range[5], voxelmap_shape[0], endpoint=False)
if with_reflectivity:
bev_map_shape[0] += 1
bev_map = np.zeros(shape=bev_map_shape, dtype=points.dtype)
_points_to_bevmap_reverse_kernel(points, voxel_size, coors_range, coor_to_voxelidx, bev_map, height_lowers, with_reflectivity, max_voxels)
return bev_map |
def sigmoid_2(x, mu, sd):
xn = ((x - mu) / sd)
sig = torch.sigmoid(xn)
s = sig
js = (((1 / sd) * sig) * (1 - sig))
jjs = ((((1 / (sd ** 2)) * sig) * (1 - sig)) * (1 - (2 * sig)))
return (s, js, jjs) |
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze((- 1)), 0)
return random.argmax(1) |
def get_local_size() -> int:
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size(group=LOCAL_PROCESS_GROUP) |
class Prior(nn.Module):
def __init__(self):
super().__init__()
def sample(self, **kwargs):
raise NotImplementedError
def log_p(self, input, **kwargs):
return self.forward(z)
def forward(self, input, **kwargs):
raise NotImplementedError
def __str__(self):
raise NotImplementedError |
class Server(ABC):
def __init__(self, config, network_config, model, test_loader, seed, optimizer_class: Type, optimizer_params: dict, use_adaptive, use_evaluate=True, lr_scheduler_class=None, lr_scheduler_params=None, control=None, control_scheduler=None, resume=False, init_time_offset=0.0):
self.config = config
self.use_adaptive = use_adaptive
self.use_evaluate = use_evaluate
self.max_round = (self.config.MAX_ROUND_ADAPTIVE if use_adaptive else self.config.MAX_ROUND_CONVENTIONAL_FL)
if use_adaptive:
assert (control is not None)
assert (control_scheduler is not None)
self.control = control
self.control_scheduler = control_scheduler
self.model = model
self.test_loader = test_loader
self.socket = ServerSocket(network_config.SERVER_ADDR, network_config.SERVER_PORT, config.NUM_CLIENTS)
save_dir_name = self.get_save_dir_name()
self.save_path = os.path.join('results', 'exp_{}'.format(config.EXP_NAME), save_dir_name, 'server')
exp_config = ExpConfig(self.config.EXP_NAME, save_dir_name, seed, self.config.CLIENT_BATCH_SIZE, self.config.NUM_LOCAL_UPDATES, optimizer_class, optimizer_params, lr_scheduler_class, lr_scheduler_params, use_adaptive)
self.list_loss = None
self.list_acc = None
self.list_time_stamp = None
self.list_model_size = None
self.start_time = None
self.init_time_offset = init_time_offset
self.round = None
self.eval_thread = None
self.client_is_sparse = False
self.terminate = False
self.initialize(exp_config, resume)
def get_init_extra_params(self) -> List[tuple]:
pass
def initialize(self, exp_config, resume):
list_extra_params = self.get_init_extra_params()
self.socket.wait_for_connections()
if resume:
print('Resuming server...')
self.list_loss = load(os.path.join(self.save_path, 'loss.pt'))
self.list_acc = load(os.path.join(self.save_path, 'accuracy.pt'))
self.list_time_stamp = load(os.path.join(self.save_path, 'time.pt'))
self.list_model_size = load(os.path.join(self.save_path, 'model_size.pt'))
self.model = load(os.path.join(self.save_path, 'model.pt'))
num_loss_acc = len(self.list_loss)
assert (len(self.list_acc) == num_loss_acc)
num_evals = len(self.list_time_stamp)
assert (len(self.list_model_size) == num_evals)
if ((num_evals - num_loss_acc) == 1):
(loss, acc) = self.model.evaluate(self.test_loader)
self.list_loss.append(loss)
self.list_acc.append(acc)
elif (num_evals != num_loss_acc):
raise RuntimeError('Cannot resume')
self.round = ((num_evals - 1) * self.config.EVAL_DISP_INTERVAL)
assert (self.round >= 0)
self.start_time = (timer() - self.list_time_stamp[(- 1)])
self.check_client_to_sparse()
resume_param = (True, (self.round + 1), self.client_is_sparse)
list_params = [(idx, exp_config, self.model, list_extra_params[idx], resume_param) for idx in range(self.config.NUM_CLIENTS)]
resume_msgs_to_client = [ServerToClientInitMessage(init_params) for init_params in list_params]
self.socket.init_connections(resume_msgs_to_client)
self.round += 1
print('Server resumed')
print(self)
else:
self.list_loss = []
self.list_acc = []
self.list_time_stamp = []
self.list_model_size = []
self.start_time = (timer() + self.init_time_offset)
self.round = 0
mkdir_save(self.model, os.path.join(self.save_path, 'init_model.pt'))
self.model.eval()
list_init_params = [(idx, exp_config, self.model, list_extra_params[idx], (False, 0, False)) for idx in range(self.config.NUM_CLIENTS)]
init_msgs_to_client = [ServerToClientInitMessage(init_params) for init_params in list_init_params]
self.socket.init_connections(init_msgs_to_client)
print('Server initialized')
print(self)
def get_save_dir_name(self):
if (not self.use_adaptive):
return 'conventional'
else:
(mdd_100, chl) = ((100 * self.config.MAX_DEC_DIFF), self.config.ADJ_HALF_LIFE)
lrhl = (self.config.LR_HALF_LIFE if hasattr(self.config, 'LR_HALF_LIFE') else None)
assert ((mdd_100 - int(mdd_100)) == 0)
return 'mdd{}_chl{}_lrhl{}'.format(int(mdd_100), lrhl, chl)
def calc_model_params(self, display=False):
sum_param_in_use = 0
sum_all_param = 0
for (layer, layer_prefix) in zip(self.model.param_layers, self.model.param_layer_prefixes):
num_bias = (0 if (layer.bias is None) else layer.bias.nelement())
layer_param_in_use = (layer.mask.sum().int().item() + num_bias)
layer_all_param = (layer.mask.nelement() + num_bias)
sum_param_in_use += layer_param_in_use
sum_all_param += layer_all_param
if display:
print('\t{} remaining: {}/{} = {}'.format(layer_prefix, layer_param_in_use, layer_all_param, (layer_param_in_use / layer_all_param)))
if display:
print('\tTotal: {}/{} = {}'.format(sum_param_in_use, sum_all_param, (sum_param_in_use / sum_all_param)))
return sum_param_in_use
def adjust_model(self, display=True):
print('Running control algorithm')
alg_start = timer()
max_dec_diff = self.control_scheduler.max_dec_diff(self.round)
self.control.adjust(max_dec_diff, None)
print('Algorithm completed in {}s'.format((timer() - alg_start)))
if display:
print('New params:')
self.calc_model_params(display=True)
self.check_client_to_sparse()
_grad()
def merge_accumulate_client_update(self, list_num_proc, list_state_dict, lr):
total_num_proc = sum(list_num_proc)
dict_keys = list_state_dict[0].keys()
for state_dict in list_state_dict[1:]:
assert (state_dict.keys() == dict_keys)
if (self.use_adaptive and self.is_adj_round()):
prefix = 'extra.'
for state_dict in list_state_dict:
del_list = []
for (key, param) in state_dict.items():
if (key[:len(prefix)] == prefix):
sgrad_key = key[len(prefix):]
mask_0 = (self.model.get_mask_by_name(sgrad_key) == 0.0)
dense_sgrad = torch.zeros_like(mask_0, dtype=torch.float)
dense_sgrad.masked_scatter_(mask_0, param)
self.control.accumulate(sgrad_key, dense_sgrad)
del_list.append(key)
for del_key in del_list:
del state_dict[del_key]
server_state_dict = self.model.state_dict()
for key in dict_keys:
server_param = server_state_dict[key]
avg_inc_val = None
for (num_proc, state_dict) in zip(list_num_proc, list_state_dict):
if (state_dict[key].size() != server_state_dict[key].size()):
mask = self.model.get_mask_by_name(key)
inc_val = (server_param.masked_scatter(mask, state_dict[key]) - server_param)
else:
inc_val = (state_dict[key] - server_param)
if (avg_inc_val is None):
avg_inc_val = ((num_proc / total_num_proc) * inc_val)
else:
avg_inc_val += ((num_proc / total_num_proc) * inc_val)
if (self.use_adaptive and (key in dict(self.model.named_parameters()).keys())):
self.control.accumulate(key, (((inc_val / lr) ** 2) * num_proc))
server_param.add_(avg_inc_val)
def check_termination(self) -> bool:
return self.terminate
def evaluate(self):
if (self.eval_thread is not None):
self.eval_thread.join()
t = Thread(target=eval_model_async, args=(deepcopy(self.model).evaluate, self.test_loader, self.list_loss, self.list_acc))
t.start()
self.eval_thread = t
elapsed_time = (timer() - self.start_time)
self.list_time_stamp.append(elapsed_time)
model_size = self.calc_model_params(display=False)
self.list_model_size.append(model_size)
len_loss = len(self.list_loss)
len_acc = len(self.list_acc)
assert (len_loss == len_acc)
print('Evaluation at round #{}. Loss/acc (at round {}) = {}/{}. Elapsed time = {}'.format(self.round, (((len_acc - 1) * self.config.EVAL_DISP_INTERVAL) if (len_acc > 0) else 'NaN'), (self.list_loss[(- 1)] if (len_acc > 0) else 'NaN'), (self.list_acc[(- 1)] if (len_acc > 0) else 'NaN'), elapsed_time))
self.save_exp()
def is_adj_round(self, rd=None) -> bool:
if (rd is None):
rd = self.round
return (self.use_adaptive and (rd > 0) and ((rd % self.config.ADJ_INTERVAL) == 0))
def is_one_before_adj_round(self) -> bool:
return self.is_adj_round((self.round + 1))
def check_client_to_sparse(self):
if ((not self.client_is_sparse) and (self.model.density() <= self.config.TO_SPARSE_THR)):
self.client_is_sparse = True
def clean_dict_to_client(self) -> dict:
clean_state_dict = copy_dict(self.model.state_dict())
if self.client_is_sparse:
for (layer, prefix) in zip(self.model.param_layers, self.model.param_layer_prefixes):
key = (prefix + '.bias')
if (isinstance(layer, DenseLinear) and (key in clean_state_dict.keys())):
clean_state_dict[key] = clean_state_dict[key].view(((- 1), 1))
return clean_state_dict
_grad()
def process_state_dict_to_client(self) -> dict:
clean_state_dict = self.clean_dict_to_client()
if (not self.client_is_sparse):
return clean_state_dict
if self.is_adj_round():
for (layer, prefix) in zip(self.model.prunable_layers, self.model.prunable_layer_prefixes):
key_w = (prefix + '.weight')
if (key_w in clean_state_dict.keys()):
weight = clean_state_dict[key_w]
w_mask = self.model.get_mask_by_name(key_w)
sparse_weight = (weight * w_mask).view(weight.size(0), (- 1)).to_sparse()
clean_state_dict[key_w] = sparse_weight
else:
for prefix in self.model.prunable_layer_prefixes:
key_w = (prefix + '.weight')
if (key_w in clean_state_dict.keys()):
clean_state_dict[key_w] = clean_state_dict[key_w].masked_select(self.model.get_mask_by_name(key_w))
return clean_state_dict
def save_exp(self):
mkdir_save(self.list_loss, os.path.join(self.save_path, 'loss.pt'))
mkdir_save(self.list_acc, os.path.join(self.save_path, 'accuracy.pt'))
mkdir_save(self.list_time_stamp, os.path.join(self.save_path, 'time.pt'))
mkdir_save(self.list_model_size, os.path.join(self.save_path, 'model_size.pt'))
mkdir_save(self.model, os.path.join(self.save_path, 'model.pt'))
def main(self):
assert (not self.terminate)
msgs = self.socket.recv_update_msg_from_all()
list_lr = [msg.lr for msg in msgs]
list_num_proc = [msg.num_processed for msg in msgs]
list_state_dict = [msg.state_dict for msg in msgs]
lr = list_lr[0]
for client_lr in list_lr[1:]:
assert (client_lr == lr)
self.merge_accumulate_client_update(list_num_proc, list_state_dict, lr)
if (self.use_evaluate and ((self.round % self.config.EVAL_DISP_INTERVAL) == 0)):
self.evaluate()
if self.is_adj_round():
self.adjust_model()
terminate = self.check_termination()
if (self.round >= (self.max_round - 1)):
terminate = True
state_dict_to_client = self.process_state_dict_to_client()
client_adj = self.is_one_before_adj_round()
to_sparse = self.client_is_sparse
msg_to_clients = ServerToClientUpdateMessage((state_dict_to_client, client_adj, to_sparse, terminate))
self.socket.send_msg_to_all(msg_to_clients)
self.round += 1
if terminate:
self.socket.recv_ack_msg_from_all()
self.socket.close()
self.eval_thread.join()
self.save_exp()
self.terminate = True
print('Task completed')
return terminate
def __repr__(self):
return 'Experiment = {}'.format(self.config.EXP_NAME) |
def equishwidth(elem, spec, specerr, refspec=None):
if (refspec is None):
refspec = numpy.zeros_like(spec)
win = read(elem, apStarWavegrid=True)
(startindxs, endindxs) = waveregions(elem, asIndex=True, pad=0)
lams = apStarWavegrid()
startlams = lams[startindxs]
endlams = lams[endindxs]
outval = 0.0
norm = 0.0
for (startindx, endindx, startlam, endlam) in zip(startindxs, endindxs, startlams, endlams):
norm += numpy.sum((win[startindx:endindx] / (specerr[startindx:endindx] ** 2.0)))
if (not numpy.all((refspec == 0.0))):
outval += (((endlam - startlam) / (endindx - startindx)) * numpy.sum(((win[startindx:endindx] / (specerr[startindx:endindx] ** 2.0)) * (1.0 - (spec[startindx:endindx] / refspec[startindx:endindx])))))
else:
outval += (((endlam - startlam) / (endindx - startindx)) * numpy.sum(((win[startindx:endindx] / (specerr[startindx:endindx] ** 2.0)) * (1.0 - spec[startindx:endindx]))))
outval /= norm
return outval |
class PyrBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(PyrBlock, self).__init__()
self.conv1 = pre_conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False)
self.conv2 = pre_conv3x3_block(in_channels=out_channels, out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x |
class BasicBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0, downsample='avg', linear_out=False, layers: LayerFn=None, drop_block=None, drop_path_rate=0.0):
super(BasicBlock, self).__init__()
layers = (layers or LayerFn())
mid_chs = make_divisible((out_chs * bottle_ratio))
groups = num_groups(group_size, mid_chs)
if ((in_chs != out_chs) or (stride != 1) or (dilation[0] != dilation[1])):
self.shortcut = create_downsample(downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0], apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.conv2_kxk = layers.conv_norm_act(mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn = (nn.Identity() if (layers.attn is None) else layers.attn(out_chs))
self.drop_path = (DropPath(drop_path_rate) if (drop_path_rate > 0.0) else nn.Identity())
self.act = (nn.Identity() if linear_out else layers.act(inplace=True))
def init_weights(self, zero_init_last_bn=False):
if zero_init_last_bn:
nn.init.zeros_(self.conv2_kxk.bn.weight)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act((x + shortcut))
return x |
(scope='module')
def simple_dtype():
ld = np.dtype('longdouble')
return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'], 'formats': ['?', 'u4', 'f4', f'f{ld.itemsize}'], 'offsets': [0, 4, 8, (16 if (ld.alignment > 4) else 12)]}) |
class EsmModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def matthews_corrcoef(predictions, targets) -> dict:
return {'matthews_correlation': (100 * sklearn.metrics.matthews_corrcoef(targets, predictions))} |
class CosineSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead.')
warmup_end_lr = args.max_lr
if (args.warmup_init_lr < 0):
args.warmup_init_lr = args.lr[0]
self.min_lr = args.lr[0]
self.max_lr = args.max_lr
assert (self.max_lr > self.min_lr), 'max_lr must be more than lr'
self.t_mult = args.t_mult
self.period = args.lr_period_updates
if (self.period <= 0):
assert (args.max_update >= 0), 'Either --max_update or --lr-period-updates must be set'
self.period = (args.max_update - args.warmup_updates)
if (args.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
else:
self.lr_step = 1
self.warmup_updates = args.warmup_updates
self.lr_shrink = args.lr_shrink
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=(- 1), type=float, metavar='LR', help='initial number of updates per period')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
curr_updates = (num_updates - self.args.warmup_updates)
if (self.t_mult != 1):
i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult))
t_i = ((self.t_mult ** i) * self.period)
t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period))
else:
i = math.floor((curr_updates / self.period))
t_i = self.period
t_curr = (curr_updates - (self.period * i))
lr_shrink = (self.lr_shrink ** i)
min_lr = (self.min_lr * lr_shrink)
max_lr = (self.max_lr * lr_shrink)
self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i)))))
self.optimizer.set_lr(self.lr)
return self.lr |
class FocalLoss(nn.Module):
def __init__(self, gamma=0, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, input, target):
target = target.view((- 1), 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view((- 1))
pt = logpt.exp()
loss = (((- 1) * ((1 - pt) ** self.gamma)) * logpt)
if self.size_average:
return loss.mean()
else:
return loss.sum() |
class CNNEncoder(BaseEncoder):
def __init__(self, latent_dimensions: int, feature_size: Iterable, variational: bool=False, channels: tuple=None, kernel_sizes: tuple=None, strides: tuple=None, paddings: tuple=None, activation=nn.LeakyReLU(), dropout=0):
super(CNNEncoder, self).__init__(latent_dimensions, variational=variational)
default_len = (2 if (channels is None) else len(channels))
if (channels is None):
channels = (1, 1)
kernel_sizes = (kernel_sizes or ((5,) * default_len))
strides = (strides or ((1,) * default_len))
paddings = (paddings or ((2,) * default_len))
self.conv_layers = self._build_conv_layers(channels, kernel_sizes, strides, paddings, activation)
final_channels = channels[(- 1)]
final_size = feature_size[0]
linear_input_size = ((final_channels * final_size) * final_size)
if self.variational:
self.create_variational_layers(linear_input_size)
else:
self.fc = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(linear_input_size, latent_dimensions))
def _build_conv_layers(self, channels, kernel_sizes, strides, paddings, activation):
layers = []
current_channels = 1
for idx in range(len(channels)):
layers.append(nn.Sequential(nn.Conv2d(in_channels=current_channels, out_channels=channels[idx], kernel_size=kernel_sizes[idx], stride=strides[idx], padding=paddings[idx]), activation))
current_channels = channels[idx]
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), (- 1))
if self.variational:
return self.forward_variational(x)
return self.fc(x) |
class ShuffleNetV2(nn.Module):
def __init__(self, stages_repeats, stages_out_channels, num_classes=1000):
super(ShuffleNetV2, self).__init__()
if (len(stages_repeats) != 3):
raise ValueError('expected stages_repeats as list of 3 positive ints')
if (len(stages_out_channels) != 5):
raise ValueError('expected stages_out_channels as list of 5 positive ints')
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
for (name, repeats, output_channels) in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [InvertedResidual(input_channels, output_channels, 2)]
for i in range((repeats - 1)):
seq.append(InvertedResidual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[(- 1)]
self.conv5 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
self.fc = nn.Linear(output_channels, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = x.mean(2)
x = x.mean(2)
x = self.fc(x)
return x |
class Sentiment(_Sentiment):
def load(self, path=None):
_Sentiment.load(self, path)
if (not path):
for (w, pos) in list(dict.items(self)):
if ('JJ' in pos):
if w.endswith('y'):
w = (w[:(- 1)] + 'i')
if w.endswith('le'):
w = w[:(- 2)]
(p, s, i) = pos['JJ']
self.annotate((w + 'ly'), 'RB', p, s, i) |
((_HAS_VIDEO_OPT is False), "Didn't compile with ffmpeg")
class TestVideo(unittest.TestCase):
_SKIP
((av is None), 'PyAV unavailable')
def test_read_video_tensor(self):
torchvision.set_video_backend('pyav')
for (test_video, config) in test_videos.items():
full_path = os.path.join(VIDEO_DIR, test_video)
(tv_result, _, _) = torchvision.io.read_video(full_path, pts_unit='sec')
tv_result = tv_result.permute(0, 3, 1, 2)
reader = VideoReader(full_path, 'video')
frames = []
for frame in reader:
frames.append(frame['data'])
new_api = torch.stack(frames, 0)
self.assertEqual(tv_result.size(), new_api.size())
((av is None), 'PyAV unavailable')
def test_metadata(self):
torchvision.set_video_backend('pyav')
for (test_video, config) in test_videos.items():
full_path = os.path.join(VIDEO_DIR, test_video)
reader = VideoReader(full_path, 'video')
reader_md = reader.get_metadata()
self.assertAlmostEqual(config.video_fps, reader_md['video']['fps'][0], delta=0.0001)
self.assertAlmostEqual(config.duration, reader_md['video']['duration'][0], delta=0.5)
_SKIP
((av is None), 'PyAV unavailable')
def test_video_reading_fn(self):
for (test_video, config) in test_videos.items():
full_path = os.path.join(VIDEO_DIR, test_video)
ref_result = _decode_frames_by_av_module(full_path)
reader = VideoReader(full_path, 'video')
newapi_result = _template_read_video(reader)
if ((newapi_result.vframes.numel() > 0) and (ref_result.vframes.numel() > 0)):
mean_delta = torch.mean(torch.abs((newapi_result.vframes.float() - ref_result.vframes.float())))
self.assertAlmostEqual(mean_delta, 0, delta=8.0)
self.assertEqual(newapi_result.vframes.size(), ref_result.vframes.size())
if (config.check_aframes and (newapi_result.aframes.numel() > 0) and (ref_result.aframes.numel() > 0)):
is_same = torch.all(torch.eq(newapi_result.aframes, ref_result.aframes)).item()
self.assertEqual(is_same, True) |
def TestFlag(flag, test_val, default_val):
env_var = ('GTEST_' + flag.upper())
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag)) |
def _run_tensor_parallel_optimization(num_nodes=1, num_devices_per_node=2):
torch.manual_seed(42)
model_context = create_model_context()
parallel_optimization = TensorParallelOptimization(shard_planner='base', num_nodes=num_nodes, num_devices_per_node=num_devices_per_node, tracer_backend='meta_fx', prop_mode='meta_tracer')
(status, best_config, model_context) = parallel_optimization.tune(model_context, {'tp_ranks': [0, 1]}, [])
assert (('replacement_map' in best_config) and status) |
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(self, name, extra_flags=None, delta=0.0001, resume_checkpoint='checkpoint1.pt', max_epoch=3):
if (extra_flags is None):
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
with self.assertLogs() as logs:
test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags))
(train_log, valid_log) = map((lambda rec: json.loads(rec.msg)), logs.records[(- 4):(- 2)])
os.rename(os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, 'checkpoint_last.pt'))
with self.assertLogs() as logs:
test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags))
(train_res_log, valid_res_log) = map((lambda rec: json.loads(rec.msg)), logs.records[(- 4):(- 2)])
for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']:
self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta)
for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']:
self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta)
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
((not torch.cuda.is_available()), 'test requires a GPU')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', ['--fp16', '--fp16-init-scale', '4096'], delta=0.011)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility('test_reproducibility_memory_efficient_fp16', ['--memory-efficient-fp16', '--fp16-init-scale', '4096'])
def test_mid_epoch_reproducibility(self):
self._test_reproducibility('test_mid_epoch_reproducibility', ['--save-interval-updates', '3'], resume_checkpoint='checkpoint_1_3.pt', max_epoch=1) |
def post_processing_function(examples, features, predictions, stage='eval'):
predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=stage)
if args.version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
references = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references) |
def replace_tags_board(board_san):
board_san = board_san.split(' ')[0]
board_san = board_san.replace('2', '11')
board_san = board_san.replace('3', '111')
board_san = board_san.replace('4', '1111')
board_san = board_san.replace('5', '11111')
board_san = board_san.replace('6', '111111')
board_san = board_san.replace('7', '1111111')
board_san = board_san.replace('8', '')
return board_san.replace('/', '') |
class Joint(xmlr.Object):
TYPES = ['unknown', 'revolute', 'continuous', 'prismatic', 'floating', 'planar', 'fixed']
def __init__(self, name=None, parent=None, child=None, joint_type=None, axis=None, origin=None, limit=None, dynamics=None, safety_controller=None, calibration=None, mimic=None, hardwareInterface=None):
self.name = name
self.parent = parent
self.child = child
self.type = joint_type
self.axis = axis
self.origin = origin
self.limit = limit
self.dynamics = dynamics
self.safety_controller = safety_controller
self.calibration = calibration
self.mimic = mimic
self.hardwareInterface = hardwareInterface
def check_valid(self):
assert (self.type in self.TYPES), 'Invalid joint type: {}'.format(self.type)
def joint_type(self):
return self.type
_type.setter
def joint_type(self, value):
self.type = value |
def evaluate(model):
from neural_compressor.model import Model
from neural_compressor import Metric
model = Model(model)
input_tensor = model.input_tensor
output_tensor = (model.output_tensor if (len(model.output_tensor) > 1) else model.output_tensor[0])
iteration = (- 1)
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
iteration = FLAGS.iters
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
metric = metrics['topk']()
def eval_func(dataloader):
latency_list = []
for (idx, (inputs, labels)) in enumerate(dataloader):
inputs = np.array([inputs])
assert (len(input_tensor) == len(inputs)), 'inputs len must equal with input_tensor'
feed_dict = dict(zip(input_tensor, inputs))
start = time.time()
predictions = model.sess.run(output_tensor, feed_dict)
end = time.time()
metric.update(predictions, labels)
latency_list.append((end - start))
if ((idx + 1) == iteration):
break
latency = (np.array(latency_list).mean() / FLAGS.batch_size)
return latency
from neural_compressor.data import DataLoader
dataloader = DataLoader(framework='tensorflow', dataset=Dataset(), batch_size=FLAGS.batch_size)
latency = eval_func(dataloader)
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
print('Batch size = {}'.format(FLAGS.batch_size))
print('Latency: {:.3f} ms'.format((latency * 1000)))
print('Throughput: {:.3f} images/sec'.format((1.0 / latency)))
acc = metric.result()
return acc |
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads |
_sentencepiece
_tokenizers
class TestMarian_EN_DE_More(MarianIntegrationTest):
def test_forward(self):
(src, tgt) = (['I am a small frog'], ['Ich bin ein kleiner Frosch.'])
expected_ids = [38, 121, 14, 697, 38848, 0]
model_inputs: dict = self.tokenizer.prepare_seq2seq_batch(src, tgt_texts=tgt, return_tensors='pt').to(torch_device)
self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist())
desired_keys = {'input_ids', 'attention_mask', 'labels'}
self.assertSetEqual(desired_keys, set(model_inputs.keys()))
model_inputs['decoder_input_ids'] = shift_tokens_right(model_inputs.labels, self.tokenizer.pad_token_id)
model_inputs['return_dict'] = True
model_inputs['use_cache'] = False
with torch.no_grad():
outputs = self.model(**model_inputs)
max_indices = outputs.logits.argmax((- 1))
self.tokenizer.batch_decode(max_indices)
def test_unk_support(self):
t = self.tokenizer
ids = t.prepare_seq2seq_batch(['||'], return_tensors='pt').to(torch_device).input_ids[0].tolist()
expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id]
self.assertEqual(expected, ids)
def test_pad_not_split(self):
input_ids_w_pad = self.tokenizer.prepare_seq2seq_batch(['I am a small frog <pad>'], return_tensors='pt').input_ids[0].tolist()
expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0]
self.assertListEqual(expected_w_pad, input_ids_w_pad)
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
def test_auto_config(self):
config = AutoConfig.from_pretrained(self.model_name)
self.assertIsInstance(config, MarianConfig) |
_REGISTRY.register()
def build_mnv2_backbone(cfg, input_shape):
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
out_feature_channels = {'res2': 24, 'res3': 32, 'res4': 96, 'res5': 320}
out_feature_strides = {'res2': 4, 'res3': 8, 'res4': 16, 'res5': 32}
model = MobileNetV2(cfg)
model._out_features = out_features
model._out_feature_channels = out_feature_channels
model._out_feature_strides = out_feature_strides
return model |
def test_double_double_syspool(vrblvl=0):
initialize_double_double_syspool(3, vrblvl)
dim = size_double_double_syspool(vrblvl)
print('The size of the systems pool :', dim)
pol1 = ['t - 1/3;']
set_double_double_system(1, pol1, vrblvl)
copy_to_double_double_syspool(1)
pol2 = ['t - 2/3;']
set_double_double_system(1, pol2, vrblvl)
copy_to_double_double_syspool(2)
pol3 = ['t - 1;']
set_double_double_system(1, pol3, vrblvl)
copy_to_double_double_syspool(3)
for i in range(1, (dim + 1)):
clear_double_double_system(vrblvl)
copy_from_double_double_syspool(i)
pols = get_double_double_system(vrblvl)
print('system at', i, 'in the pool :', pols)
clear_double_double_syspool(vrblvl)
return int((dim != 3)) |
def main(args):
in_dir = os.path.join(args.in_dir, args.exp, args.custom_dir)
out_dir = os.path.join(args.out_dir, args.exp, args.custom_dir)
os.makedirs(out_dir, exist_ok=True)
exp_util.clear_dir(out_dir)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
process(args, in_dir, out_dir, logger) |
def vgg16_bn_128(pretrained=False, **kwargs):
model = VGG(make_layers(cfg['D_128'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model |
def grid_search(model_constructor, train_data, dev_data, config, acc_priors, balance_priors, epochs, label_to_ix):
best_p = float('-inf')
best_r = float('-inf')
best_f1 = float('-inf')
best_params = None
best_acc_prior = None
best_balance_prior = None
for acc_prior in acc_priors:
for balance_prior in balance_priors:
model = model_constructor(acc_prior, balance_prior)
(p, r, f1) = train_generative_model(model, train_data, dev_data, epochs, label_to_ix, config)
if (f1 > best_f1):
best_p = p
best_r = r
best_f1 = f1
best_params = model.state_dict()
best_acc_prior = acc_prior
best_balance_prior = balance_prior
best_model = model_constructor(best_acc_prior, best_balance_prior)
best_model.load_state_dict(best_params)
return (best_model, best_p, best_r, best_f1) |
def get_wavelength_from_header(hdr):
if (('CRVAL1' and ('CRPIX1' in hdr.keys())) and (('CDELT1' in hdr.keys()) or ('CD1_1' in hdr.keys()))):
if ('CD1_1' in hdr.keys()):
cdelt = hdr['CD1_1']
else:
cdelt = hdr['CDELT1']
crval = hdr['CRVAL1']
crpix = hdr['CRPIX1']
wavelength = (((np.arange(hdr['NAXIS1']) - (crpix - 1)) * cdelt) + crval)
return wavelength
else:
raise WavelengthError('Not enough information in header to create wavelength array') |
def change_label(tree, new_label, span=None, cur_label=None, in_place=True):
if ((span is None) and (cur_label is None)):
return change_label_by_node(tree, new_label, in_place)
elif ((span is not None) and (cur_label is not None)):
return change_label_by_span(tree, new_label, span, cur_label, in_place)
else:
raise Exception('Invalid combination of arguments for change label request') |
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed) |
class SolutionInstance():
def __init__(self, objVal, X, Y, W, H, solNo):
self.X = X
self.Y = Y
self.W = W
self.H = H
self.objVal = objVal
self.solNo = solNo |
class GraphIR():
vars: Dict[(str, AbsTensor)] = field(default_factory=dict)
insts: List[InstIR] = field(default_factory=list)
def __str__(self) -> str:
ret = ''
for inst in self.insts:
ret += f'''{inst}
'''
return ret
def pretty(self) -> str:
inst_remap = {inst.identifier: f'{idx}' for (idx, inst) in enumerate(self.insts)}
ret = ''
for inst in self.insts:
pretty_args = []
for arg in inst.iexpr.args:
(inst_id, ret_idx) = InstIR.var_inst_idx(arg)
pretty_args.append(InstIR.retval_string(inst_remap[inst_id], ret_idx))
pretty_retvals = [InstIR.retval_string(inst_remap[inst.identifier], ret_idx) for ret_idx in range(inst.n_output())]
ret += f"{', '.join(pretty_retvals)} = {inst.iexpr.op}({', '.join(pretty_args)})"
ret += f''' # inst id: {inst_remap[inst.identifier]}
'''
return ret
def n_inst(self) -> int:
return len(self.insts)
def n_compute_inst(self) -> int:
return sum((1 for inst in self.insts if (not isinstance(inst.iexpr.op, (Input, Constant, Placeholder)))))
def n_var(self) -> int:
return len(self.vars)
def leaf_inst(self) -> List[InstIR]:
return [inst for inst in self.insts if inst.no_users()]
def leaf_var(self) -> List[str]:
lvs = []
for inst in self.insts:
for lv in inst.leaf_var():
if (lv not in lvs):
lvs.append(lv)
return lvs
def input_var(self) -> List[str]:
return [inst.retval() for inst in self.insts if isinstance(inst.iexpr.op, Input)]
def add_inst(self, iexpr: InstExpr) -> InstIR:
new_inst = InstIR(iexpr, irctx=self)
otensors = iexpr.op.output_like
if any([(t is None) for t in otensors]):
otensors = iexpr.op.checked_type_transfer([self.vars[arg] for arg in iexpr.args])
for (ridx, abstensor) in enumerate(otensors):
vname = new_inst.retval(ridx)
assert (vname not in self.vars), ('Variable name is not unique: ' + vname)
self.vars[vname] = abstensor
min_user_idx = 0
for arg in set(iexpr.args):
assert (arg in self.vars), ('Variable not defined: ' + arg)
(inst_id, ret_idx) = InstIR.var_inst_idx(arg)
for (idx, may_prod) in enumerate(self.insts):
if (inst_id == may_prod.identifier):
may_prod.users[ret_idx].add(new_inst)
min_user_idx = max(min_user_idx, (idx + 1))
break
self.insts.insert(min_user_idx, new_inst)
return new_inst
def find_inst_by_id(self, obj_id: int) -> Optional[InstIR]:
for inst in self.insts:
if (inst.identifier == obj_id):
return inst
return None
def replace_alluse(self, oldvar: str, newvar: str, type_check=True) -> None:
assert (oldvar in self.vars), ('oldvar not defined: ' + oldvar)
assert (newvar in self.vars), ('newvar not defined: ' + newvar)
if (type_check and (self.vars[oldvar] is not None) and (self.vars[newvar] is not None)):
assert self.vars[oldvar].weak_compare(self.vars[newvar]), f'Type mismatch: {self.vars[oldvar]} != {self.vars[newvar]}'
(old_inst_id, old_ret_idx) = InstIR.var_inst_idx(oldvar)
old_inst = self.find_inst_by_id(old_inst_id)
assert (old_inst is not None), ('oldvar not defined: ' + oldvar)
for ouser in old_inst.users[old_ret_idx]:
ouser.iexpr.args = tuple(((newvar if (a == oldvar) else a) for a in ouser.iexpr.args))
(new_inst_id, new_ret_idx) = InstIR.var_inst_idx(newvar)
new_inst = self.find_inst_by_id(new_inst_id)
new_inst.users[new_ret_idx] = old_inst.users[old_ret_idx]
old_inst.users[old_ret_idx] = set()
def replace_arg(self, inst: InstIR, arg_idx: int, newvar: str, type_check=True):
assert (newvar in self.vars), ('newvar not defined: ' + newvar)
assert (0 <= arg_idx < len(inst.iexpr.args)), f'Invalid argument index {arg_idx} for {inst}'
oldvar = inst.iexpr.args[arg_idx]
if (type_check and (self.vars[oldvar] is not None) and (self.vars[newvar] is not None)):
assert (self.vars[oldvar] == self.vars[newvar]), f'Type mismatch: {self.vars[oldvar]} != {self.vars[newvar]}'
inst.iexpr.args[arg_idx] = newvar
if (oldvar not in inst.iexpr.args):
(old_inst_id, old_ret_idx) = InstIR.var_inst_idx(oldvar)
old_inst = self.find_inst_by_id(old_inst_id)
old_inst.users[old_ret_idx].remove(inst)
(new_inst_id, new_ret_idx) = InstIR.var_inst_idx(newvar)
new_inst = self.find_inst_by_id(new_inst_id)
new_inst.users[new_ret_idx].add(inst)
def remove_unused(self, inst: InstIR) -> None:
assert (inst in self.insts), f'Instruction not in graph: {inst}'
assert inst.no_users(), f'{inst} has users {inst.users}.'
for other in self.insts:
if (other != inst):
for users in other.users:
if (inst in users):
users.remove(inst)
for val in inst.retvals():
del self.vars[val]
self.insts.remove(inst)
def assert_wellform(self):
defined = set()
for inst in self.insts:
for arg in inst.iexpr.args:
assert (arg in self.vars), f'Variable not defined: {arg}'
assert (arg in defined), f'Variable not defined yet: {arg}'
(usee_id, ret_idx) = InstIR.var_inst_idx(arg)
usee = self.find_inst_by_id(usee_id)
assert (inst in usee.users[ret_idx]), f'Use-Def chain broken: {usee} should be used by {inst}'
for rv in inst.retvals():
assert (rv in self.vars), f'Return var not in self.vars: {rv}'
assert (rv not in defined), f'Variable defined twice: {rv}'
for (ret_idx, users) in enumerate(inst.users):
val = inst.retval(ret_idx)
for user in users:
assert (val in user.iexpr.args), f'Use-Def chain broken: {inst} should be used by {user}'
defined.update(inst.retvals())
def _topological_sort(self):
defined = set()
def swap(i, j):
(self.insts[i], self.insts[j]) = (self.insts[j], self.insts[i])
ptr = 0
while (ptr < len(self.insts)):
frontier = []
for idx in range(ptr, len(self.insts)):
inst = self.insts[idx]
if all(((arg in defined) for arg in inst.iexpr.args)):
frontier.append(idx)
if (len(frontier) == 0):
CORE_LOG.error(f'''Bad IR:
{self.pretty()}''')
raise RuntimeError('Cyclic dependency detected.')
for idx in frontier:
swap(ptr, idx)
defined.update(self.insts[ptr].retvals())
ptr += 1
def _udchain_repair(self):
for inst in self.insts:
for arg in inst.iexpr.args:
(usee_id, ret_idx) = InstIR.var_inst_idx(arg)
usee = self.find_inst_by_id(usee_id)
usee.users[ret_idx].add(inst)
for (ret_idx, users) in enumerate(inst.users):
val = inst.retval(ret_idx)
for user in list(users):
if (val not in user.iexpr.args):
users.remove(user)
def wellform_repair(self):
self._udchain_repair()
self._topological_sort()
def concretize(self, model: ModelRef) -> None:
for inst in self.insts:
op = concretize_op(inst.iexpr.op, model)
itensors = [self.vars[vname] for vname in inst.iexpr.args]
otensors = op.checked_type_transfer(itensors)
op.bind_input_like(itensors)
op.bind_output_like(otensors)
inst.iexpr.op = op
for (vname, tensor) in zip(inst.retvals(), otensors):
self.vars[vname] = tensor
def debug(self):
print('=== IR VARIABLES ===')
print(self.vars)
print('=== IR INSTS ===')
has_inferred = False
for inst in self.insts:
if (inst.iexpr.op.__class__.__name__ == 'AutoInfOpBase'):
ainst = inst.iexpr.op.inst
has_inferred |= (not ainst.infer_failed())
print(inst.retvals(), '<-', ainst.invoke_str(inst.iexpr.op.attrs), '<-', inst.iexpr.args)
else:
print(inst.retvals(), '<-', inst.iexpr.op, '<-', inst.iexpr.args)
if has_inferred:
print('=== IR INSTS by MASKING ATTR w/ `` ===')
for inst in self.insts:
if (inst.iexpr.op.__class__.__name__ == 'AutoInfOpBase'):
ainst = inst.iexpr.op.inst
if (not ainst.infer_failed()):
print(inst.retvals(), '<-', ainst.invoke_str({k: f'{k}' for k in ainst.A}), '<-', inst.iexpr.args)
print('SHAPE PROP:', [t for t in ainst.type_transfer_dbg_info.split('\n') if t])
print('REQUIRES:', ([t for t in ainst.requires_dbg_info.split('\n') if t] if (len(ainst.nnsmith_rules()) == 0) else ainst.nnsmith_rules()[0].__name__))
def to_dot(self) -> str:
text = 'digraph D {\n'
text += ' node [shape=Mrecord];\n'
def render_node(inst: InstIR):
label = '{'
extra = ''
if (not isinstance(inst.iexpr.op, (Input, Constant, Placeholder))):
label += '{'
label += '|'.join([f'<i{idx}> {arg}' for (idx, arg) in enumerate(inst.iexpr.args)])
label += '}|'
elif isinstance(inst.iexpr.op, Input):
extra += 'fillcolor=cadetblue1,style=filled,'
elif isinstance(inst.iexpr.op, Constant):
extra += 'fillcolor=lightpink,style=filled,'
elif isinstance(inst.iexpr.op, Placeholder):
extra += 'fillcolor=lightgray,style=filled,'
label += f'{inst.iexpr.op}|'.replace('{', '\\{').replace('}', '\\}')
label += '{'
label += '|'.join([f'<o{idx}> {rv}' for (idx, rv) in enumerate(inst.retvals())])
label += '}}'
return f''' {inst.identifier} [label="{label}",{extra}];
'''
for inst in self.insts:
text += render_node(inst)
for inst in self.insts:
for (idx, arg) in enumerate(inst.iexpr.args):
(usee_id, ret_idx) = InstIR.var_inst_idx(arg)
text += f''' {usee_id}:o{ret_idx} -> {inst.identifier}:i{idx} [label="{self.vars[arg].pretty()}"];
'''
text += '}\n'
return text |
def get_model(outputs, width, height, scale, n_patches, patch_size, reg):
x_in = Input(shape=(height, width, 3))
x_high = ImageLinearTransform()(x_in)
x_high = ImagePan(horizontally=True, vertically=True)(x_high)
x_low = ResizeImages((int((height * scale)), int((width * scale))))(x_high)
(features, att, patches) = attention_sampling(attention, resnet, patch_size, n_patches, replace=False, attention_regularizer=multinomial_entropy(reg), receptive_field=9)([x_low, x_high])
y = Dense(outputs, activation='softmax')(features)
return (Model(inputs=x_in, outputs=[y]), Model(inputs=x_in, outputs=[att, patches, x_low])) |
def transferFileToHdfsPath(sourcepath, targetpath):
hdfspath = targetpath
targetdir = os.path.dirname(targetpath)
os.system('/opt/hadoop/latest/bin/hdfs dfs -mkdir -p {}'.format(targetdir))
result = os.system('/opt/hadoop/latest/bin/hdfs dfs -copyFromLocal -f {} {}'.format(sourcepath, hdfspath))
if (result != 0):
raise OSError('Cannot copyFromLocal {} {} returned {}'.format(sourcepath, hdfspath, result)) |
def get_model_fwk_name(model):
onnx = LazyImport('onnx')
tf = LazyImport('tensorflow')
torch = LazyImport('torch')
def _is_onnxruntime(model):
if isinstance(model, str):
try:
graph = onnx.load(model)
assert (len(graph.graph.node) != 0)
except:
return 'NA'
else:
return 'onnxruntime'
elif ('onnx' in str(type(model))):
return 'onnxruntime'
return 'NA'
def _is_tensorflow(model):
try:
if isinstance(model, str):
graph_def = tf.compat.v1.GraphDef()
with open(model, 'rb') as f:
graph_def.ParseFromString(f.read())
else:
graph = model.graph_def
except:
pass
else:
return 'tensorflow'
return 'NA'
def _is_torch(model):
if isinstance(model, str):
try:
torch.jit.load(model)
except:
return 'NA'
else:
return 'torch'
elif ('torch' in str(type(model))):
return 'torch'
return 'NA'
def _is_neural_engine(model):
if (model and isinstance(model, str) and os.path.isdir(model)):
if (os.path.exists(os.path.join(model, 'conf.yaml')) and os.path.exists(os.path.join(model, 'model.bin'))):
return 'neural engine'
else:
logger.error('Please Input yaml and bin for neural engine.')
return 'NA'
else:
return 'NA'
if isinstance(model, str):
absmodel = os.path.abspath(os.path.expanduser(model))
assert (os.path.exists(absmodel) or os.path.exists((absmodel + '.pb'))), 'invalid input path, the file does not exist!'
checker = [_is_onnxruntime, _is_neural_engine, _is_tensorflow, _is_torch]
for handler in checker:
fwk_name = handler(model)
if (fwk_name != 'NA'):
break
assert (fwk_name != 'NA'), 'Framework is not detected correctly from model format.'
return fwk_name |
def dynamic_import_st(module, backend):
model_class = dynamic_import(module, predefined_st.get(backend, dict()))
assert issubclass(model_class, STInterface), f'{module} does not implement STInterface'
return model_class |
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(os.path.join(models_dir, model_name['resnet50'])))
return model |
def im_detect_bbox(model, images, target_scale, target_max_size, device, rois=None):
transform = T.Compose([T.Resize(target_scale, target_max_size), T.ToTensor(), T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255)])
t_images = []
t_rois = []
for (image, roi) in zip(images, rois):
(t_img, _, t_roi) = transform(image, rois=roi)
t_images.append(t_img)
t_rois.append(t_roi)
t_images = to_image_list(t_images, cfg.DATALOADER.SIZE_DIVISIBILITY)
t_rois = [(r.to(device) if (r is not None) else None) for r in t_rois]
return model(t_images.to(device), rois=t_rois) |
def prepare_img(image, label, num_classes):
if (args.data_set == 'Imagenet'):
image = tf.image.resize(image, [224, 224])
elif (args.data_set == 'Cifar10'):
image = tf.image.resize(image, [32, 32])
label = tf.squeeze(label)
label = tf.one_hot(label, num_classes)
return (image, label) |
class BaseARD(torch.nn.Module):
def penalty(self):
raise NotImplementedError('Derived classes must compute their own penalty.')
def relevance(self, **kwargs):
raise NotImplementedError('Derived classes must implement a float mask of relevant coefficients.') |
def require_set_backend():
assert (backend is not None), 'distributed backend is not set. Please call `distributed_utils.set_backend_from_args` at the start of your script' |
def presnet152(pretrained=False, **kwargs):
return presnet(Bottleneck, [3, 8, 36, 3], 'presnet152', pre=pretrained, **kwargs) |
def inference_multi_modality_detector(model, pcd, image, ann_file):
cfg = model.cfg
device = next(model.parameters()).device
test_pipeline = deepcopy(cfg.data.test.pipeline)
test_pipeline = Compose(test_pipeline)
(box_type_3d, box_mode_3d) = get_box_type(cfg.data.test.box_type_3d)
data_infos = mmcv.load(ann_file)
image_idx = int(re.findall('\\d+', image)[(- 1)])
for x in data_infos:
if (int(x['image']['image_idx']) != image_idx):
continue
info = x
break
data = dict(pts_filename=pcd, img_prefix=osp.dirname(image), img_info=dict(filename=osp.basename(image)), box_type_3d=box_type_3d, box_mode_3d=box_mode_3d, img_fields=[], bbox3d_fields=[], pts_mask_fields=[], pts_seg_fields=[], bbox_fields=[], mask_fields=[], seg_fields=[])
if (box_mode_3d == Box3DMode.DEPTH):
data.update(dict(calib=info['calib']))
data = test_pipeline(data)
if (box_mode_3d == Box3DMode.LIDAR):
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
P2 = info['calib']['P2'].astype(np.float32)
lidar2img = ((P2 rect) Trv2c)
data['img_metas'][0].data['lidar2img'] = lidar2img
elif (box_mode_3d == Box3DMode.DEPTH):
data['calib'][0]['Rt'] = data['calib'][0]['Rt'].astype(np.float32)
data['calib'][0]['K'] = data['calib'][0]['K'].astype(np.float32)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
data = scatter(data, [device.index])[0]
else:
data['img_metas'] = data['img_metas'][0].data
data['points'] = data['points'][0].data
data['img'] = data['img'][0].data
if (box_mode_3d == Box3DMode.DEPTH):
data['calib'][0]['Rt'] = data['calib'][0]['Rt'][0].data
data['calib'][0]['K'] = data['calib'][0]['K'][0].data
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return (result, data) |
def main():
args = get_args()
task_name = 'Task043_BraTS2019'
downloaded_data_dir = args.downloaded_data_dir
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, 'imagesTr')
target_imagesVal = join(target_base, 'imagesVal')
target_imagesTs = join(target_base, 'imagesTs')
target_labelsTr = join(target_base, 'labelsTr')
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
for tpe in ['HGG', 'LGG']:
cur = join(downloaded_data_dir, tpe)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = ((tpe + '__') + p)
patient_names.append(patient_name)
t1 = join(patdir, (p + '_t1.nii.gz'))
t1c = join(patdir, (p + '_t1ce.nii.gz'))
t2 = join(patdir, (p + '_t2.nii.gz'))
flair = join(patdir, (p + '_flair.nii.gz'))
seg = join(patdir, (p + '_seg.nii.gz'))
assert all([isfile(t1), isfile(t1c), isfile(t2), isfile(flair), isfile(seg)]), ('%s' % patient_name)
shutil.copy(t1, join(target_imagesTr, (patient_name + '_0000.nii.gz')))
shutil.copy(t1c, join(target_imagesTr, (patient_name + '_0001.nii.gz')))
shutil.copy(t2, join(target_imagesTr, (patient_name + '_0002.nii.gz')))
shutil.copy(flair, join(target_imagesTr, (patient_name + '_0003.nii.gz')))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, (patient_name + '.nii.gz')))
json_dict = OrderedDict()
json_dict['name'] = 'BraTS2019'
json_dict['description'] = 'nothing'
json_dict['tensorImageSize'] = '4D'
json_dict['reference'] = 'see BraTS2019'
json_dict['licence'] = 'see BraTS2019 license'
json_dict['release'] = '0.0'
json_dict['modality'] = {'0': 'T1', '1': 'T1ce', '2': 'T2', '3': 'FLAIR'}
json_dict['labels'] = {'0': 'background', '1': 'edema', '2': 'non-enhancing', '3': 'enhancing'}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': ('./imagesTr/%s.nii.gz' % i), 'label': ('./labelsTr/%s.nii.gz' % i)} for i in patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, 'dataset.json')) |
def statcast_date_range(start: date, stop: date, step: int, verbose: bool=True) -> Iterator[Tuple[(date, date)]]:
low = start
while (low <= stop):
date_span = (low.replace(month=3, day=15), low.replace(month=11, day=15))
(season_start, season_end) = STATCAST_VALID_DATES.get(low.year, date_span)
if (low < season_start):
low = season_start
if verbose:
print('Skipping offseason dates')
elif (low > season_end):
(low, _) = STATCAST_VALID_DATES.get((low.year + 1), (date(month=3, day=15, year=(low.year + 1)), None))
if verbose:
print('Skipping offseason dates')
if (low > stop):
return
high = min((low + timedelta((step - 1))), stop)
(yield (low, high))
low += timedelta(days=step) |
class RunningSampleData():
index: int
session_id: int
session: Session
asyncio_task: asyncio.Task
def __init__(self, index, session_id, session, task):
self.index = index
self.session_id = session_id
self.session = session
self.asyncio_task = task |
class EnvironmentCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser('env')
download_parser.set_defaults(func=info_command_factory)
def run(self):
pt_version = 'not installed'
pt_cuda_available = 'NA'
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
tf_version = 'not installed'
tf_cuda_available = 'NA'
if is_tf_available():
import tensorflow as tf
tf_version = tf.__version__
try:
tf_cuda_available = tf.test.is_gpu_available()
except AttributeError:
tf_cuda_available = bool(tf.config.list_physical_devices('GPU'))
info = {'`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': '{} ({})'.format(pt_version, pt_cuda_available), 'Tensorflow version (GPU?)': '{} ({})'.format(tf_version, tf_cuda_available), 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>'}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(info))
return info
def format_dict(d):
return ('\n'.join(['- {}: {}'.format(prop, val) for (prop, val) in d.items()]) + '\n') |
class Nlvr2PairedDataset(DetectFeatTxtTokDataset):
def __init__(self, txt_db, img_db, use_img_type=True):
assert isinstance(txt_db, TxtTokLmdb)
assert isinstance(img_db, DetectFeatLmdb)
self.txt_db = txt_db
self.img_db = img_db
(txt_lens, self.ids) = get_ids_and_lens(txt_db)
txt2img = txt_db.txt2img
self.lens = [((2 * tl) + sum((self.img_db.name2nbb[img] for img in txt2img[id_]))) for (tl, id_) in zip(txt_lens, self.ids)]
self.use_img_type = use_img_type
def __getitem__(self, i):
example = super().__getitem__(i)
target = example['target']
outs = []
for (i, img) in enumerate(example['img_fname']):
(img_feat, img_pos_feat, num_bb) = self._get_img_feat(img)
input_ids = copy.deepcopy(example['input_ids'])
input_ids = (([self.txt_db.cls_] + input_ids) + [self.txt_db.sep])
attn_masks = ([1] * (len(input_ids) + num_bb))
input_ids = torch.tensor(input_ids)
attn_masks = torch.tensor(attn_masks)
if self.use_img_type:
img_type_ids = torch.tensor(([(i + 1)] * num_bb))
else:
img_type_ids = None
outs.append((input_ids, img_feat, img_pos_feat, attn_masks, img_type_ids))
return (tuple(outs), target) |
class Collator():
def __init__(self, image_size, url_label, text_label, image_label, name, channels):
self.url_label = url_label
self.text_label = text_label
self.image_label = image_label
self.download = (url_label is not None)
self.name = name
self.channels = channels
self.transform = T.Compose([T.Resize(image_size), T.CenterCrop(image_size), T.ToTensor()])
def __call__(self, batch):
texts = []
images = []
for item in batch:
try:
if self.download:
image = self.fetch_single_image(item[self.url_label])
else:
image = item[self.image_label]
image = self.transform(image.convert(self.channels))
except:
continue
text = t5.t5_encode_text([item[self.text_label]], name=self.name)
texts.append(torch.squeeze(text))
images.append(image)
if (len(texts) == 0):
return None
texts = pad_sequence(texts, True)
newbatch = []
for i in range(len(texts)):
newbatch.append((images[i], texts[i]))
return torch.utils.data.dataloader.default_collate(newbatch)
def fetch_single_image(self, image_url, timeout=1):
try:
request = urllib.request.Request(image_url, data=None, headers={'user-agent': USER_AGENT})
with urllib.request.urlopen(request, timeout=timeout) as req:
image = Image.open(io.BytesIO(req.read())).convert('RGB')
except Exception:
image = None
return image |
def collect_annotations(files, dataset, nproc=1):
assert isinstance(files, list)
assert isinstance(dataset, str)
assert dataset
assert isinstance(nproc, int)
load_img_info_with_dataset = partial(load_img_info, dataset=dataset)
if (nproc > 1):
images = mmcv.track_parallel_progress(load_img_info_with_dataset, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info_with_dataset, files)
return images |
def atari_learn(env, args, num_timesteps):
logdir = os.path.join('data', args.exp_name)
num_iterations = (float(num_timesteps) / 4.0)
def stopping_criterion(env):
return (get_wrapper_by_name(env, 'Monitor').get_total_steps() >= num_timesteps)
optimizer_spec = OptimizerSpec(constructor=optim.Adam, kwargs=dict(lr=LEARNING_RATE))
exploration_schedule = LinearSchedule(30000, 0.01)
logz.configure_output_dir(logdir)
if args.dueling:
dqn_learning(env=env, method=args.method, game=args.env, q_func=Dueling_DQN, optimizer_spec=optimizer_spec, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=REPLAY_BUFFER_SIZE, batch_size=args.batch_size, gamma=args.gamma, learning_starts=LEARNING_STARTS, learning_freq=LEARNING_FREQ, frame_history_len=FRAME_HISTORY_LEN, target_update_freq=TARGET_UPDATE_FREQ, double=args.double, dueling=args.dueling, logdir=logdir, svrl=args.svrl, me_type=args.me_type, maskp=args.maskp, maskstep=args.maskstep, maskscheduler=args.maskscheduler)
else:
dqn_learning(env=env, method=args.method, game=args.env, q_func=DQN, optimizer_spec=optimizer_spec, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=REPLAY_BUFFER_SIZE, batch_size=args.batch_size, gamma=args.gamma, learning_starts=LEARNING_STARTS, learning_freq=LEARNING_FREQ, frame_history_len=FRAME_HISTORY_LEN, target_update_freq=TARGET_UPDATE_FREQ, double=args.double, dueling=args.dueling, logdir=logdir, svrl=args.svrl, me_type=args.me_type, maskp=args.maskp, maskstep=args.maskstep, maskscheduler=args.maskscheduler)
env.close() |
class TileExtraData(BBAStruct):
name_prefix: IdString
tile_x: int
tile_y: int
sites: list[SiteInst] = field(default_factory=list)
def serialise_lists(self, context: str, bba: BBAWriter):
for (i, site) in enumerate(self.sites):
site.serialise_lists(f'{context}_si{i}', bba)
bba.label(f'{context}_sites')
for (i, site) in enumerate(self.sites):
site.serialise(f'{context}_si{i}', bba)
def serialise(self, context: str, bba: BBAWriter):
bba.u32(self.name_prefix.index)
bba.u16(self.tile_x)
bba.u16(self.tile_y)
bba.slice(f'{context}_sites', len(self.sites)) |
def get_averaged_groupby(df, groupby, plot_column):
return df.groupby(groupby)[plot_column].apply(np.mean) |
def export_model(input_model, output_model):
print('\nexport model...')
model = onnx.load(input_model)
model = version_converter.convert_version(model, 14)
onnx.save_model(model, output_model) |
class VAE(Model):
def __init__(self, args):
super(VAE, self).__init__(args)
if (self.args.dataset_name == 'freyfaces'):
h_size = 210
elif (self.args.dataset_name == 'cifar10'):
h_size = 384
else:
h_size = 294
self.q_z2_layers = nn.Sequential(GatedConv2d(self.args.input_size[0], 32, 7, 1, 3), GatedConv2d(32, 32, 3, 2, 1), GatedConv2d(32, 64, 5, 1, 2), GatedConv2d(64, 64, 3, 2, 1), GatedConv2d(64, 6, 3, 1, 1))
self.q_z2_mean = NonLinear(h_size, self.args.z2_size, activation=None)
self.q_z2_logvar = NonLinear(h_size, self.args.z2_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.q_z1_layers_x = nn.Sequential(GatedConv2d(self.args.input_size[0], 32, 3, 1, 1), GatedConv2d(32, 32, 3, 2, 1), GatedConv2d(32, 64, 3, 1, 1), GatedConv2d(64, 64, 3, 2, 1), GatedConv2d(64, 6, 3, 1, 1))
self.q_z1_layers_z2 = nn.Sequential(GatedDense(self.args.z2_size, h_size))
self.q_z1_layers_joint = nn.Sequential(GatedDense((2 * h_size), 300))
self.q_z1_mean = NonLinear(300, self.args.z1_size, activation=None)
self.q_z1_logvar = NonLinear(300, self.args.z1_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.p_z1_layers = nn.Sequential(GatedDense(self.args.z2_size, 300), GatedDense(300, 300))
self.p_z1_mean = NonLinear(300, self.args.z1_size, activation=None)
self.p_z1_logvar = NonLinear(300, self.args.z1_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.p_x_layers_z1 = nn.Sequential(GatedDense(self.args.z1_size, 300))
self.p_x_layers_z2 = nn.Sequential(GatedDense(self.args.z2_size, 300))
self.p_x_layers_joint_pre = nn.Sequential(GatedDense((2 * 300), np.prod(self.args.input_size)))
act = nn.ReLU(True)
self.p_x_layers_joint = nn.Sequential(GatedConv2d(self.args.input_size[0], 64, 3, 1, 1), GatedConv2d(64, 64, 3, 1, 1), GatedConv2d(64, 64, 3, 1, 1), GatedConv2d(64, 64, 3, 1, 1))
if (self.args.input_type == 'binary'):
self.p_x_mean = Conv2d(64, 1, 1, 1, 0, activation=nn.Sigmoid())
elif ((self.args.input_type == 'gray') or (self.args.input_type == 'continuous')):
self.p_x_mean = Conv2d(64, self.args.input_size[0], 1, 1, 0, activation=nn.Sigmoid())
self.p_x_logvar = Conv2d(64, self.args.input_size[0], 1, 1, 0, activation=nn.Hardtanh(min_val=(- 4.5), max_val=0.0))
for m in self.modules():
if isinstance(m, nn.Linear):
he_init(m)
if (self.args.prior == 'vampprior'):
self.add_pseudoinputs()
def calculate_loss(self, x, beta=1.0, average=False):
(x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar) = self.forward(x)
if (self.args.input_type == 'binary'):
RE = log_Bernoulli(x, x_mean, dim=1)
elif ((self.args.input_type == 'gray') or (self.args.input_type == 'continuous')):
RE = (- log_Logistic_256(x, x_mean, x_logvar, dim=1))
else:
raise Exception('Wrong input type!')
log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
log_p_z2 = self.log_p_z2(z2_q)
log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
KL = (- (((log_p_z1 + log_p_z2) - log_q_z1) - log_q_z2))
loss = ((- RE) + (beta * KL))
if average:
loss = torch.mean(loss)
RE = torch.mean(RE)
KL = torch.mean(KL)
return (loss, RE, KL)
def calculate_likelihood(self, X, dir, mode='test', S=5000, MB=500):
N_test = X.size(0)
likelihood_test = []
if (S <= MB):
R = 1
else:
R = (S / MB)
S = MB
for j in range(N_test):
if ((j % 100) == 0):
print('{:.2f}%'.format(((j / (1.0 * N_test)) * 100)))
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, int(R)):
x = x_single.expand(S, x_single.size(1)).contiguous()
(a_tmp, _, _) = self.calculate_loss(x)
a.append((- a_tmp.cpu().data.numpy()))
a = np.asarray(a)
a = np.reshape(a, ((a.shape[0] * a.shape[1]), 1))
likelihood_x = logsumexp(a)
likelihood_test.append((likelihood_x - np.log(len(a))))
likelihood_test = np.array(likelihood_test)
plot_histogram((- likelihood_test), dir, mode)
return (- np.mean(likelihood_test))
def calculate_lower_bound(self, X_full, MB=500):
lower_bound = 0.0
RE_all = 0.0
KL_all = 0.0
I = int(math.ceil((X_full.size(0) / MB)))
for i in range(I):
x = X_full[(i * MB):((i + 1) * MB)].view((- 1), np.prod(self.args.input_size))
(loss, RE, KL) = self.calculate_loss(x, average=True)
RE_all += RE.cpu().data[0]
KL_all += KL.cpu().data[0]
lower_bound += loss.cpu().data[0]
lower_bound /= I
return lower_bound
def generate_x(self, N=25):
if (self.args.prior == 'standard'):
z2_sample_rand = Variable(torch.FloatTensor(N, self.args.z1_size).normal_())
if self.args.cuda:
z2_sample_rand = z2_sample_rand.cuda()
elif (self.args.prior == 'vampprior'):
means = self.means(self.idle_input)[0:N].view((- 1), self.args.input_size[0], self.args.input_size[1], self.args.input_size[2])
(z2_sample_gen_mean, z2_sample_gen_logvar) = self.q_z2(means)
z2_sample_rand = self.reparameterize(z2_sample_gen_mean, z2_sample_gen_logvar)
(z1_sample_mean, z1_sample_logvar) = self.p_z1(z2_sample_rand)
z1_sample_rand = self.reparameterize(z1_sample_mean, z1_sample_logvar)
(samples_gen, _) = self.p_x(z1_sample_rand, z2_sample_rand)
return samples_gen
def reconstruct_x(self, x):
(x_reconstructed, _, _, _, _, _, _, _, _, _) = self.forward(x)
return x_reconstructed
def q_z2(self, x):
h = self.q_z2_layers(x)
h = h.view(x.size(0), (- 1))
z2_q_mean = self.q_z2_mean(h)
z2_q_logvar = self.q_z2_logvar(h)
return (z2_q_mean, z2_q_logvar)
def q_z1(self, x, z2):
x = self.q_z1_layers_x(x)
x = x.view(x.size(0), (- 1))
z2 = self.q_z1_layers_z2(z2)
h = torch.cat((x, z2), 1)
h = self.q_z1_layers_joint(h)
z1_q_mean = self.q_z1_mean(h)
z1_q_logvar = self.q_z1_logvar(h)
return (z1_q_mean, z1_q_logvar)
def p_z1(self, z2):
z2 = self.p_z1_layers(z2)
z1_p_mean = self.p_z1_mean(z2)
z1_p_logvar = self.p_z1_logvar(z2)
return (z1_p_mean, z1_p_logvar)
def p_x(self, z1, z2):
z2 = self.p_x_layers_z2(z2)
z1 = self.p_x_layers_z1(z1)
h = torch.cat((z1, z2), 1)
h = self.p_x_layers_joint_pre(h)
h = h.view((- 1), self.args.input_size[0], self.args.input_size[1], self.args.input_size[2])
h_decoder = self.p_x_layers_joint(h)
x_mean = self.p_x_mean(h_decoder).view((- 1), np.prod(self.args.input_size))
if (self.args.input_type == 'binary'):
x_logvar = 0.0
else:
x_mean = torch.clamp(x_mean, min=(0.0 + (1.0 / 512.0)), max=(1.0 - (1.0 / 512.0)))
x_logvar = self.p_x_logvar(h_decoder).view((- 1), np.prod(self.args.input_size))
return (x_mean, x_logvar)
def log_p_z2(self, z2):
if (self.args.prior == 'standard'):
log_prior = log_Normal_standard(z2, dim=1)
elif (self.args.prior == 'vampprior'):
C = self.args.number_components
X = self.means(self.idle_input).view((- 1), self.args.input_size[0], self.args.input_size[1], self.args.input_size[2])
(z2_p_mean, z2_p_logvar) = self.q_z2(X)
z_expand = z2.unsqueeze(1)
means = z2_p_mean.unsqueeze(0)
logvars = z2_p_logvar.unsqueeze(0)
a = (log_Normal_diag(z_expand, means, logvars, dim=2) - math.log(C))
(a_max, _) = torch.max(a, 1)
log_prior = (a_max + torch.log(torch.sum(torch.exp((a - a_max.unsqueeze(1))), 1)))
else:
raise Exception('Wrong name of the prior!')
return log_prior
def forward(self, x):
x = x.view((- 1), self.args.input_size[0], self.args.input_size[1], self.args.input_size[2])
(z2_q_mean, z2_q_logvar) = self.q_z2(x)
z2_q = self.reparameterize(z2_q_mean, z2_q_logvar)
(z1_q_mean, z1_q_logvar) = self.q_z1(x, z2_q)
z1_q = self.reparameterize(z1_q_mean, z1_q_logvar)
(z1_p_mean, z1_p_logvar) = self.p_z1(z2_q)
(x_mean, x_logvar) = self.p_x(z1_q, z2_q)
return (x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar) |
def add_dataset_config(cfg):
_C = cfg
_C.MODEL.ROI_HEADS.NUM_OUTPUT_CLASSES = 80
_C.MODEL.ROI_HEADS.EMBEDDINGS_PATH = ''
_C.MODEL.ROI_HEADS.EMBEDDINGS_PATH_COCO = ''
_C.MODEL.ROI_HEADS.LINGUAL_MATRIX_THRESHOLD = 0.05
_C.MODEL.ROI_HEADS.MASK_NUM_CLASSES = 80
_C.MODEL.FREEZE_LAYERS = CN()
_C.MODEL.FREEZE_LAYERS.META_ARCH = []
_C.MODEL.FREEZE_LAYERS.ROI_HEADS = []
_C.DATASETS.TYPE = ''
_C.DATASETS.VISUAL_GENOME = CN()
_C.DATASETS.VISUAL_GENOME.IMAGES = './datasets/vg/VG_100K/'
_C.DATASETS.VISUAL_GENOME.MAPPING_DICTIONARY = './datasets/vg/orig_and_generated_files/VG-SGG-dicts-with-attri.json'
_C.DATASETS.VISUAL_GENOME.IMAGE_DATA = './datasets/vg/orig_and_generated_files/image_data.json'
_C.DATASETS.VISUAL_GENOME.VG_ATTRIBUTE_H5 = './datasets/vg/orig_and_generated_files/VG-SGG-with-attri.h5'
_C.DATASETS.VISUAL_GENOME.TRAIN_MASKS = ''
_C.DATASETS.VISUAL_GENOME.TEST_MASKS = ''
_C.DATASETS.VISUAL_GENOME.VAL_MASKS = ''
_C.DATASETS.VISUAL_GENOME.CLIPPED = False
_C.DATASETS.ARXIVDOCS_TARGET = CN()
_C.DATASETS.ARXIVDOCS_TARGET.CLIPPED = False
_C.DATASETS.ARXIVDOCS_TARGET.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.ARXIVDOCS_TARGET.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.ARXIVDOCS_TARGET.FILTER_NON_OVERLAP = False
_C.DATASETS.ARXIVDOCS_TARGET.BOX_SCALE = 1024
_C.DATASETS.ARXIVDOCS_TARGET.TRAIN_MASKS = './datasets/ADtgt_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_target_layout_v2_train_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_TARGET.TEST_MASKS = './datasets/ADtgt_VGv2/additional_processed_anns/test/attribute_files/arxivdocs_target_layout_v2_test_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_TARGET.VAL_MASKS = './datasets/ADtgt_VGv2/additional_processed_anns/dev/attribute_files/arxivdocs_target_layout_v2_dev_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_TARGET.TRAIN_IMAGES = './datasets/ADtgt_VGv2/imgs/train'
_C.DATASETS.ARXIVDOCS_TARGET.TRAIN_MAPPING_DICTIONARY = './datasets/ADtgt_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_target_layout_v2_train_scene_graph_dicts_with_attri.json'
_C.DATASETS.ARXIVDOCS_TARGET.TRAIN_IMAGE_DATA = './datasets/ADtgt_VGv2/anns/train/arxivdocs_target_layout_v2_train_scene_graph_image_data.json'
_C.DATASETS.ARXIVDOCS_TARGET.TRAIN_ARXIVDOCS_TARGET_ATTRIBUTE_H5 = './datasets/ADtgt_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_target_layout_v2_train_scene_graph_with_attri.h5'
_C.DATASETS.ARXIVDOCS_TARGET.VAL_IMAGES = './datasets/ADtgt_VGv2/imgs/dev'
_C.DATASETS.ARXIVDOCS_TARGET.VAL_MAPPING_DICTIONARY = './datasets/ADtgt_VGv2/additional_processed_anns/dev/attribute_files/arxivdocs_target_layout_v2_dev_scene_graph_dicts_with_attri.json'
_C.DATASETS.ARXIVDOCS_TARGET.VAL_IMAGE_DATA = './datasets/ADtgt_VGv2/anns/dev/arxivdocs_target_layout_v2_dev_scene_graph_image_data.json'
_C.DATASETS.ARXIVDOCS_TARGET.VAL_ARXIVDOCS_TARGET_ATTRIBUTE_H5 = './datasets/ADtgt_VGv2/additional_processed_anns/dev/attribute_files/arxivdocs_target_layout_v2_dev_scene_graph_with_attri.h5'
_C.DATASETS.ARXIVDOCS_TARGET.TEST_IMAGES = './datasets/ADtgt_VGv2/imgs/test'
_C.DATASETS.ARXIVDOCS_TARGET.TEST_MAPPING_DICTIONARY = './datasets/ADtgt_VGv2/additional_processed_anns/test/attribute_files/arxivdocs_target_layout_v2_test_scene_graph_dicts_with_attri.json'
_C.DATASETS.ARXIVDOCS_TARGET.TEST_IMAGE_DATA = './datasets/ADtgt_VGv2/anns/test/arxivdocs_target_layout_v2_test_scene_graph_image_data.json'
_C.DATASETS.ARXIVDOCS_TARGET.TEST_ARXIVDOCS_TARGET_ATTRIBUTE_H5 = './datasets/ADtgt_VGv2/additional_processed_anns/test/attribute_files/arxivdocs_target_layout_v2_test_scene_graph_with_attri.h5'
_C.DATASETS.ARXIVDOCS_WEAK = CN()
_C.DATASETS.ARXIVDOCS_WEAK.CLIPPED = False
_C.DATASETS.ARXIVDOCS_WEAK.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.ARXIVDOCS_WEAK.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.ARXIVDOCS_WEAK.FILTER_NON_OVERLAP = False
_C.DATASETS.ARXIVDOCS_WEAK.BOX_SCALE = 1024
_C.DATASETS.ARXIVDOCS_WEAK.TRAIN_MASKS = './datasets/ADwk_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_weak_layout_v2_train_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_WEAK.TEST_MASKS = './datasets/ADwk_VGv2/additional_processed_anns/test/attribute_files/arxivdocs_weak_layout_v2_test_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_WEAK.VAL_MASKS = './datasets/ADwk_VGv2/additional_processed_anns/dev/attribute_files/arxivdocs_weak_layout_v2_dev_scene_graph_segmentations.json'
_C.DATASETS.ARXIVDOCS_WEAK.TRAIN_IMAGES = './datasets/ADwk_VGv2/imgs/train'
_C.DATASETS.ARXIVDOCS_WEAK.TRAIN_MAPPING_DICTIONARY = './datasets/ADwk_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_weak_layout_v2_train_scene_graph_dicts_with_attri.json'
_C.DATASETS.ARXIVDOCS_WEAK.TRAIN_IMAGE_DATA = './datasets/ADwk_VGv2/anns/train/arxivdocs_weak_layout_v2_train_scene_graph_image_data.json'
_C.DATASETS.ARXIVDOCS_WEAK.TRAIN_ARXIVDOCS_WEAK_ATTRIBUTE_H5 = './datasets/ADwk_VGv2/additional_processed_anns/train/attribute_files/arxivdocs_weak_layout_v2_train_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA = CN()
_C.DATASETS.EPERIODICA.CLIPPED = False
_C.DATASETS.EPERIODICA.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.EPERIODICA.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.EPERIODICA.FILTER_NON_OVERLAP = False
_C.DATASETS.EPERIODICA.BOX_SCALE = 1024
_C.DATASETS.EPERIODICA.TRAIN_MASKS = ''
_C.DATASETS.EPERIODICA.VAL_MASKS = ''
_C.DATASETS.EPERIODICA.TEST_MASKS = ''
_C.DATASETS.EPERIODICA.TRAIN_MASKS = './datasets/eperiodica_v3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA.VAL_MASKS = './datasets/eperiodica_v3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA.TEST_MASKS = './datasets/eperiodica_v3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA.TRAIN_IMAGES = './datasets/eperiodica_v3/imgs/train'
_C.DATASETS.EPERIODICA.TRAIN_MAPPING_DICTIONARY = './datasets/eperiodica_v3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA.TRAIN_IMAGE_DATA = './datasets/eperiodica_v3/anns/train/eperiodica_minitrain/eperiodica_minitrain_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA.TRAIN_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica_v3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA.VAL_IMAGES = './datasets/eperiodica_v3/imgs/val'
_C.DATASETS.EPERIODICA.VAL_MAPPING_DICTIONARY = './datasets/eperiodica_v3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA.VAL_IMAGE_DATA = './datasets/eperiodica_v3/anns/val/eperiodica_minival/eperiodica_minival_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA.VAL_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica_v3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA.TEST_IMAGES = './datasets/eperiodica_v3/imgs/test'
_C.DATASETS.EPERIODICA.TEST_MAPPING_DICTIONARY = './datasets/eperiodica_v3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA.TEST_IMAGE_DATA = './datasets/eperiodica_v3/anns/test/eperiodica_minitest/eperiodica_minitest_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA.TEST_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica_v3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA2 = CN()
_C.DATASETS.EPERIODICA2.CLIPPED = False
_C.DATASETS.EPERIODICA2.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.EPERIODICA2.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.EPERIODICA2.FILTER_NON_OVERLAP = False
_C.DATASETS.EPERIODICA2.BOX_SCALE = 1024
_C.DATASETS.EPERIODICA2.TRAIN_MASKS = ''
_C.DATASETS.EPERIODICA2.TEST_MASKS = ''
_C.DATASETS.EPERIODICA2.VAL_MASKS = ''
_C.DATASETS.EPERIODICA2.TRAIN_IMAGES = './datasets/eperiodica2/imgs/train'
_C.DATASETS.EPERIODICA2.TRAIN_MAPPING_DICTIONARY = './datasets/eperiodica2/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA2.TRAIN_IMAGE_DATA = './datasets/eperiodica2/anns/train/eperiodica_minitrain/eperiodica_minitrain_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA2.TRAIN_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica2/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA2.VAL_IMAGES = './datasets/eperiodica2/imgs/val'
_C.DATASETS.EPERIODICA2.VAL_MAPPING_DICTIONARY = './datasets/eperiodica2/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA2.VAL_IMAGE_DATA = './datasets/eperiodica2/anns/val/eperiodica_minival/eperiodica_minival_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA2.VAL_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica2/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA2.TEST_IMAGES = './datasets/eperiodica2/imgs/test'
_C.DATASETS.EPERIODICA2.TEST_MAPPING_DICTIONARY = './datasets/eperiodica2/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA2.TEST_IMAGE_DATA = './datasets/eperiodica2/anns/test/eperiodica_minitest/eperiodica_minitest_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA2.TEST_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica2/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA3 = CN()
_C.DATASETS.EPERIODICA3.CLIPPED = False
_C.DATASETS.EPERIODICA3.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.EPERIODICA3.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.EPERIODICA3.FILTER_NON_OVERLAP = False
_C.DATASETS.EPERIODICA3.BOX_SCALE = 1024
_C.DATASETS.EPERIODICA3.TRAIN_MASKS = './datasets/eperiodica3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA3.VAL_MASKS = './datasets/eperiodica3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA3.TEST_MASKS = './datasets/eperiodica3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_segmentations.json'
_C.DATASETS.EPERIODICA3.TRAIN_IMAGES = './datasets/eperiodica3/imgs/train'
_C.DATASETS.EPERIODICA3.TRAIN_MAPPING_DICTIONARY = './datasets/eperiodica3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA3.TRAIN_IMAGE_DATA = './datasets/eperiodica3/anns/train/eperiodica_minitrain/eperiodica_minitrain_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA3.TRAIN_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica3/additional_processed_anns/train/eperiodica_minitrain/attribute_files/eperiodica_minitrain_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA3.VAL_IMAGES = './datasets/eperiodica3/imgs/val'
_C.DATASETS.EPERIODICA3.VAL_MAPPING_DICTIONARY = './datasets/eperiodica3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA3.VAL_IMAGE_DATA = './datasets/eperiodica3/anns/val/eperiodica_minival/eperiodica_minival_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA3.VAL_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica3/additional_processed_anns/val/eperiodica_minival/attribute_files/eperiodica_minival_VG_scene_graph_with_attri.h5'
_C.DATASETS.EPERIODICA3.TEST_IMAGES = './datasets/eperiodica3/imgs/test'
_C.DATASETS.EPERIODICA3.TEST_MAPPING_DICTIONARY = './datasets/eperiodica3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_dicts_with_attri.json'
_C.DATASETS.EPERIODICA3.TEST_IMAGE_DATA = './datasets/eperiodica3/anns/test/eperiodica_minitest/eperiodica_minitest_VG_scene_graph_image_data.json'
_C.DATASETS.EPERIODICA3.TEST_EPERIODICA_TARGET_ATTRIBUTE_H5 = './datasets/eperiodica3/additional_processed_anns/test/eperiodica_minitest/attribute_files/eperiodica_minitest_VG_scene_graph_with_attri.h5'
_C.DATASETS.MSCOCO = CN()
_C.DATASETS.MSCOCO.ANNOTATIONS = ''
_C.DATASETS.MSCOCO.DATAROOT = ''
_C.DATASETS.VISUAL_GENOME.FILTER_EMPTY_RELATIONS = True
_C.DATASETS.VISUAL_GENOME.FILTER_DUPLICATE_RELATIONS = True
_C.DATASETS.VISUAL_GENOME.FILTER_NON_OVERLAP = True
_C.DATASETS.VISUAL_GENOME.NUMBER_OF_VALIDATION_IMAGES = 5000
_C.DATASETS.VISUAL_GENOME.BOX_SCALE = 1024
_C.DATASETS.SEG_DATA_DIVISOR = 1
_C.DATASETS.TRANSFER = ('coco_train_2014',)
_C.DATASETS.MASK_TRAIN = ('coco_train_2017',)
_C.DATASETS.MASK_TEST = ('coco_val_2017',) |
class AttnSkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnSkipUpBlock2D
block_type = 'up'
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def test_output(self):
expected_slice = [0.0361, 0.0617, 0.2787, (- 0.035), 0.0342, 0.3421, (- 0.0843), 0.0913, 0.3015]
super().test_output(expected_slice) |
class PreActResNet(nn.Module):
def __init__(self, num_classes: int=10, depth: int=18, width: int=0, activation_fn: nn.Module=nn.ReLU, mean: Union[(Tuple[(float, ...)], float)]=CIFAR10_MEAN, std: Union[(Tuple[(float, ...)], float)]=CIFAR10_STD, padding: int=0, num_input_channels: int=3):
super().__init__()
if (width != 0):
raise ValueError('Unsupported `width`.')
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
self.conv_2d = nn.Conv2d(num_input_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
if (depth == 18):
num_blocks = (2, 2, 2, 2)
elif (depth == 34):
num_blocks = (3, 4, 6, 3)
else:
raise ValueError('Unsupported `depth`.')
self.layer_0 = self._make_layer(64, 64, num_blocks[0], 1, activation_fn)
self.layer_1 = self._make_layer(64, 128, num_blocks[1], 2, activation_fn)
self.layer_2 = self._make_layer(128, 256, num_blocks[2], 2, activation_fn)
self.layer_3 = self._make_layer(256, 512, num_blocks[3], 2, activation_fn)
self.batchnorm = nn.BatchNorm2d(512, momentum=0.01)
self.relu = activation_fn()
self.logits = nn.Linear(512, num_classes)
def _make_layer(self, in_planes, out_planes, num_blocks, stride, activation_fn):
layers = []
for (i, stride) in enumerate(([stride] + ([1] * (num_blocks - 1)))):
layers.append(_PreActBlock((((i == 0) and in_planes) or out_planes), out_planes, stride, activation_fn))
return nn.Sequential(*layers)
def forward(self, x):
if (self.padding > 0):
x = F.pad(x, ((self.padding,) * 4))
if x.is_cuda:
if (self.mean_cuda is None):
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = ((x - self.mean_cuda) / self.std_cuda)
else:
out = ((x - self.mean) / self.std)
out = self.conv_2d(out)
out = self.layer_0(out)
out = self.layer_1(out)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return self.logits(out) |
def to_bigdl_optim_method(optimizer):
optimizer = optimizer.lower()
if (optimizer == 'adagrad'):
return Adagrad(learningrate=0.01)
elif (optimizer == 'sgd'):
return SGD(learningrate=0.01)
elif (optimizer == 'adam'):
return Adam()
elif (optimizer == 'rmsprop'):
return RMSprop(learningrate=0.001, decayrate=0.9)
elif (optimizer == 'adadelta'):
return Adadelta(decayrate=0.95, epsilon=1e-08)
elif (optimizer == 'adamax'):
return Adamax(epsilon=1e-08)
else:
invalidInputError(False, ('Unsupported optimizer: %s' % optimizer)) |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = AsyncCpuSampler(EnvCls=gym_make, env_kwargs=config['env'], CollectorCls=DbCpuResetCollector, eval_env_kwargs=config['env'], **config['sampler'])
algo = TD3(optim_kwargs=config['optim'], **config['algo'])
agent = Td3Agent(**config['agent'])
runner = AsyncRlEval(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = ('async_td3_' + config['env']['id'])
with logger_context(log_dir, run_ID, name, config):
runner.train() |
def mkdir_p(path):
try:
os.makedirs(os.path.abspath(path))
except OSError as exc:
if ((exc.errno == errno.EEXIST) and os.path.isdir(path)):
pass
else:
raise |
def check_all_inits():
failures = []
for (root, _, files) in os.walk(PATH_TO_TRANSFORMERS):
if ('__init__.py' in files):
fname = os.path.join(root, '__init__.py')
objects = parse_init(fname)
if (objects is not None):
errors = analyze_results(*objects)
if (len(errors) > 0):
errors[0] = f'''Problem in {fname}, both halves do not define the same objects.
{errors[0]}'''
failures.append('\n'.join(errors))
if (len(failures) > 0):
raise ValueError('\n\n'.join(failures)) |
_module()
class WIDERFaceDataset(XMLDataset):
CLASSES = ('face',)
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height))
return data_infos |
class CategoricalMLPRegressor(StochasticRegressor):
def __init__(self, input_shape, output_dim, name='CategoricalMLPRegressor', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.softmax, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), optimizer=None, optimizer_args=None, tr_optimizer=None, tr_optimizer_args=None, use_trust_region=True, max_kl_step=0.01, normalize_inputs=True, layer_normalization=False):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if (optimizer_args is None):
optimizer_args = dict()
if (tr_optimizer_args is None):
tr_optimizer_args = dict()
if (optimizer is None):
self._optimizer = make_optimizer(LbfgsOptimizer, **optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
if (tr_optimizer is None):
self._tr_optimizer = make_optimizer(ConjugateGradientOptimizer, **tr_optimizer_args)
else:
self._tr_optimizer = make_optimizer(tr_optimizer, **tr_optimizer_args)
self._first_optimized = False
self.model = CategoricalMLPRegressorModel(input_shape, output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._old_model = self.model.clone(name='model_for_old_dist')
self._network = None
self._old_network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=((None,) + self._input_shape))
self._old_network = self._old_model.build(input_var)
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
self._old_model.parameters = self.model.parameters
ys_var = tf.compat.v1.placeholder(dtype=tf.float32, name='ys', shape=(None, self._output_dim))
y_hat = self._network.y_hat
dist = self._network.dist
old_dist = self._old_network.dist
mean_kl = tf.reduce_mean(old_dist.kl_divergence(dist))
loss = (- tf.reduce_mean(dist.log_prob(ys_var)))
predicted = tf.one_hot(tf.argmax(y_hat, axis=1), depth=self._output_dim)
self._f_predict = tensor_utils.compile_function([input_var], predicted)
self._optimizer.update_opt(loss=loss, target=self, inputs=[input_var, ys_var])
self._tr_optimizer.update_opt(loss=loss, target=self, inputs=[input_var, ys_var], leq_constraint=(mean_kl, self._max_kl_step))
def fit(self, xs, ys):
if self._normalize_inputs:
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True))
self._old_network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._old_network.x_std.load(np.std(xs, axis=0, keepdims=True))
inputs = [xs, ys]
if self._use_trust_region:
optimizer = self._tr_optimizer
else:
optimizer = self._optimizer
loss_before = optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), (loss_before - loss_after))
self._first_optimized = True
self._old_model.parameters = self.model.parameters
def predict(self, xs):
return self._f_predict(xs)
def recurrent(self):
return False
def vectorized(self):
return True
def distribution(self):
return self._network.dist
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_network']
del new_dict['_old_network']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
(best_exact, exact_thresh) = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
(best_f1, f1_thresh) = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh |
class EMAML():
def __init__(self, dim_input, dim_output, dim_hidden=32, num_layers=4, num_particles=2, max_test_step=5):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
self.in_lr = tf.placeholder_with_default(input=FLAGS.in_lr, name='in_lr', shape=[])
self.out_lr = tf.placeholder_with_default(input=FLAGS.out_lr, name='out_lr', shape=[])
self.max_test_step = max_test_step
self.bnn = BNN(dim_input=self.dim_input, dim_output=self.dim_output, dim_hidden=self.dim_hidden, num_layers=self.num_layers, is_bnn=False)
self.construct_network_weights = self.bnn.construct_network_weights
self.forward_network = self.bnn.forward_network
self.train_x = tf.placeholder(dtype=tf.float32, name='train_x')
self.train_y = tf.placeholder(dtype=tf.float32, name='train_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
self.W_network_particles = None
def construct_model(self, is_training=True):
print('start model construction')
with tf.variable_scope('model', reuse=None) as training_scope:
if (is_training or (self.W_network_particles is None)):
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx)) for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
if is_training:
max_update_step = FLAGS.in_step
else:
max_update_step = max(FLAGS.in_step, self.max_test_step)
def fast_learn_one_task(inputs):
[train_x, valid_x, train_y, valid_y] = inputs
meta_loss = []
WW_update = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in self.W_network_particles]
step_train_loss = ([None] * (max_update_step + 1))
step_valid_loss = ([None] * (max_update_step + 1))
step_train_pred = ([None] * (max_update_step + 1))
step_valid_pred = ([None] * (max_update_step + 1))
for s_idx in range((max_update_step + 1)):
train_z_list = []
valid_z_list = []
train_mse_list = []
valid_mse_list = []
for p_idx in range(FLAGS.num_particles):
train_z_list.append(self.forward_network(x=train_x, W_dict=WW_update[p_idx]))
valid_z_list.append(self.forward_network(x=valid_x, W_dict=WW_update[p_idx]))
train_mse_list.append(self.bnn.mse_data(predict_y=train_z_list[(- 1)], target_y=train_y))
valid_mse_list.append(self.bnn.mse_data(predict_y=valid_z_list[(- 1)], target_y=valid_y))
if (s_idx < max_update_step):
particle_loss = tf.reduce_mean(train_mse_list[(- 1)])
dWp = tf.gradients(ys=particle_loss, xs=list(WW_update[p_idx].values()))
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
dWp = OrderedDict(zip(WW_update[p_idx].keys(), dWp))
param_names = []
param_vals = []
for key in list(WW_update[p_idx].keys()):
if (FLAGS.in_grad_clip > 0):
grad = tf.clip_by_value(dWp[key], (- FLAGS.in_grad_clip), FLAGS.in_grad_clip)
else:
grad = dWp[key]
param_names.append(key)
param_vals.append((WW_update[p_idx][key] - (self.in_lr * grad)))
WW_update[p_idx] = OrderedDict(zip(param_names, param_vals))
else:
meta_loss.append(tf.reduce_mean(valid_mse_list[(- 1)]))
step_train_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(train_mse) for train_mse in train_mse_list])
step_valid_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_mse) for valid_mse in valid_mse_list])
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
meta_loss = tf.reduce_sum(meta_loss)
return [step_train_loss, step_valid_loss, step_train_pred, step_valid_pred, meta_loss]
out_dtype = [([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), tf.float32]
result = tf.map_fn(fast_learn_one_task, elems=[self.train_x, self.valid_x, self.train_y, self.valid_y], dtype=out_dtype, parallel_iterations=FLAGS.num_tasks)
full_step_train_loss = result[0]
full_step_valid_loss = result[1]
full_step_train_pred = result[2]
full_step_valid_pred = result[3]
full_meta_loss = result[4]
if is_training:
self.total_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
self.total_train_z_list = full_step_train_pred
self.total_valid_z_list = full_step_valid_pred
update_params_list = []
update_params_name = []
for p in range(FLAGS.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p, name])
update_params_list.append(self.W_network_particles[p][name])
optimizer = tf.train.AdamOptimizer(learning_rate=self.out_lr)
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss, var_list=update_params_list)
if (FLAGS.out_grad_clip > 0):
gv_list = [(tf.clip_by_value(grad, (- FLAGS.out_grad_clip), FLAGS.out_grad_clip), var) for (grad, var) in gv_list]
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
self.eval_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((max_update_step + 1))]
self.eval_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((max_update_step + 1))]
self.eval_train_z_list = full_step_train_pred
self.eval_valid_z_list = full_step_valid_pred
print('end of model construction') |
def is_trained(cfg: NamespaceMap, stage: str, is_force_retrain: bool=False) -> bool:
pretrained_path = Path(cfg.paths.pretrained.save)
filename = BEST_CHECKPOINT.format(stage=stage)
if (is_force_retrain and (pretrained_path / filename).is_file()):
results_path = Path(cfg.paths.results)
ckpt_path = Path(cfg.checkpoint.kwargs.dirpath)
log_path = Path(cfg.paths.logs)
logger.info(f'Forcing the retraining of {stage}, even though {(pretrained_path / filename)} exists. Deleting {pretrained_path} and {results_path} and {ckpt_path} and {log_path}.')
remove_rf(pretrained_path, not_exist_ok=True)
remove_rf(results_path, not_exist_ok=True)
remove_rf(ckpt_path, not_exist_ok=True)
remove_rf(log_path, not_exist_ok=True)
pretrained_path.mkdir(parents=True)
results_path.mkdir(parents=True)
ckpt_path.mkdir(parents=True)
log_path.mkdir(parents=True)
return False
else:
return (pretrained_path / filename).is_file() |
class ORTModel():
def __init__(self, model: Union[(str, os.PathLike)], compute_metrics: Optional[Callable[([EvalPrediction], Dict)]]=None, label_names: Optional[List[str]]=None):
self.compute_metrics = compute_metrics
self.label_names = (['labels'] if (label_names is None) else label_names)
self.session = onnxruntime.InferenceSession(model.SerializeToString(), providers=onnxruntime.get_available_providers())
self.onnx_input_names = {input_key.name: idx for (idx, input_key) in enumerate(self.session.get_inputs())}
def evaluation_loop(self, dataset: Dataset):
logger.info('***** Running evaluation *****')
all_preds = None
all_labels = None
onnx_inputs = {}
for (step, inputs) in enumerate(dataset):
has_labels = all(((inputs.get(k) is not None) for k in self.label_names))
if has_labels:
labels = tuple((np.array([inputs.get(name)]) for name in self.label_names))
if (len(labels) == 1):
labels = labels[0]
else:
labels = None
"\n LayoutLMV2 inputs (with order):\n {\n 'input_ids': {0: 'batch_size', 1: 'sequence_length'}, \n 'bbox': {0: 'batch_size', 1: 'sequence_length'}, \n 'image': {0: 'batch_size', 1: 'num_channels'}, # dtype is np.int64 not float\n 'attention_mask': {0: 'batch_size', 1: 'sequence_length'}, \n }\n "
for key in self.onnx_input_names:
if (key in inputs):
onnx_inputs[key] = np.array([inputs[key]])
elif (key == 'image'):
onnx_inputs[key] = np.array([inputs['images']], dtype=np.int64)
preds = self.session.run(None, onnx_inputs)
if (len(preds) == 1):
preds = preds[0]
all_preds = (preds if (all_preds is None) else nested_concat(all_preds, preds, padding_index=(- 100)))
all_labels = (labels if (all_labels is None) else nested_concat(all_labels, labels, padding_index=(- 100)))
if ((self.compute_metrics is not None) and (all_preds is not None) and (all_labels is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=len(dataset)) |
class ResLayer(Sequential):
def __init__(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), multi_grid=None, contract_dilation=False, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
if (multi_grid is None):
if ((dilation > 1) and contract_dilation):
first_dilation = (dilation // 2)
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=first_dilation, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
inplanes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=(dilation if (multi_grid is None) else multi_grid[i]), conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
super(ResLayer, self).__init__(*layers) |
def save_blip_diffusion_model(model, args):
qformer = get_qformer(model)
qformer.eval()
text_encoder = ContextCLIPTextModel.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder='text_encoder')
vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder='vae')
unet = UNet2DConditionModel.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder='unet')
vae.eval()
text_encoder.eval()
scheduler = PNDMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=False, skip_prk_steps=True)
tokenizer = CLIPTokenizer.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder='tokenizer')
image_processor = BlipImageProcessor()
blip_diffusion = BlipDiffusionPipeline(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, image_processor=image_processor)
blip_diffusion.save_pretrained(args.checkpoint_path) |
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x |
def zscore_from_pval(pval, one_minus_pval=None, distrib='norm'):
if (distrib == 'norm'):
zscore = norm.isf(pval)
if (one_minus_pval is not None):
ind = (pval > 0.5)
zscore[ind] = norm.ppf(one_minus_pval[ind])
zscore = _replace_infinity(zscore, replace_val=40, method='plus-one')
return zscore |
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, SE=False):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
self.se_layer = None
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
(op_names, indices) = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
(op_names, indices) = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
if SE:
self.se_layer = SELayer(channel=(self.multiplier * C))
def _compile(self, C, op_names, indices, concat, reduction):
assert (len(op_names) == len(indices))
self._steps = (len(op_names) // 2)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for (name, index) in zip(op_names, indices):
stride = (2 if (reduction and (index < 2)) else 1)
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
s = (h1 + h2)
states += [s]
if (self.se_layer is None):
return torch.cat([states[i] for i in self._concat], dim=1)
else:
return self.se_layer(torch.cat([states[i] for i in self._concat], dim=1)) |
def dump_topset(topset: Dict[(str, OpConfig)], path: PathLike):
OmegaConf.save({'topset': topset}, path) |
_builder('msvd_caption')
class MSVDCapBuilder(BaseDatasetBuilder):
train_dataset_cls = VideoCaptionDataset
eval_dataset_cls = VideoCaptionEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/msvd/defaults_cap.yaml'} |
class Triple():
def __init__(self, s, p, o):
self.s = s
self.o = o
self.p = p |
def last_zero_init(m):
if isinstance(m, nn.Sequential):
constant_init(m[(- 1)], val=0)
m[(- 1)].inited = True
else:
constant_init(m, val=0)
m.inited = True |
def rlagru_resnet101_eca(rla_channel=32, k_size=[5, 5, 5, 7]):
print('Constructing rlagru_resnet101_eca......')
model = RLAgru_ResNet(RLA_Bottleneck, [3, 4, 23, 3], rla_channel=rla_channel, ECA=k_size)
return model |
def read_image_RGB(img_path):
got_img = False
if (not osp.exists(img_path)):
raise IOError('{} does not exist'.format(img_path))
while (not got_img):
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img |
def main(args):
if args.kitti_to_yolo_labels:
from utils.utils import kitti_labels_to_yolo
kitti_labels_to_yolo(args.kitti_to_yolo_labels)
exit()
cudnn.benchmark = True
start_time = datetime.now()
log.info(' NEW RUN ')
log.info(f"Running: {' '.join(sys.argv)}")
log.info('Full args list:')
for arg in vars(args):
log.info(f'{arg}: {getattr(args, arg)}')
log.info('')
results = ResultsManager(('' if (args.dataset == 'kitti') else 'Error'))
init_settings(args)
if args.usr:
set_paths(args)
for run in range(args.num_runs):
net = init_net(args)
for args.severity_idx in range(args.num_severities):
if ('dua' in args.methods):
methods.dua(args, net)
if results.has_results():
timestamp_str = time.strftime('%b-%d-%Y_%H%M', time.localtime())
results.save_to_file(file_name=f'{timestamp_str}_raw_results.pkl')
results.print_summary_latex()
if (args.num_runs > 1):
results.reset_results()
log.info(f"{('>' * 50)} FINISHED RUN #{(run + 1)} {('<' * 50)}")
runtime = (datetime.now() - start_time)
log.info(f'Runtime so far: {timedelta_to_str(runtime)}')
torch.cuda.empty_cache()
del net
if (args.num_runs > 1):
results.print_multiple_runs_results()
runtime = (datetime.now() - start_time)
log.info(f'Execution finished in {timedelta_to_str(runtime)}') |
def make_gsm_loss_evaluator(cfg):
loss_evaluators = dict()
loss_weights = dict()
if ('l1_loss' in cfg.model.losses):
l1_loss_evaluator = make_sll_loss_evaluator(cfg)
loss_evaluators['l1_loss'] = l1_loss_evaluator
loss_weights['l1_loss'] = cfg.model.losses.l1_loss.weight
if ('gerf_loss' in cfg.model.losses):
gerf_loss_evaluator = make_gerf_loss_evaluator(cfg)
loss_evaluators['gerf_loss'] = gerf_loss_evaluator
loss_weights['gerf_loss'] = cfg.model.losses.gerf_loss.weight
if ('focal_loss' in cfg.model.losses):
focal_loss_evaluator = make_focal_loss_evaluator(cfg)
loss_evaluators['focal_loss'] = focal_loss_evaluator
loss_weights['focal_loss'] = cfg.model.losses.focal_loss.weight
if ('relative_loss' in cfg.model.losses):
relative_loss_evaluators = make_relative_loss_evaluator(cfg)
loss_evaluators['relative_loss'] = relative_loss_evaluators
loss_weights['relative_loss'] = cfg.model.losses.relative_loss.weight
return CombinedLossEvaluators(cfg, loss_evaluators, loss_weights) |
class TrainingInterface():
def __init__(self, device, model, parallel, log_path_mng, data_loaders, summary_writers, opt_scheduler, param_scheduler, n_epoch, **kwargs):
self.model = model
self.model.device = device
if parallel:
self.model = nn.DataParallel(self.model)
self.model.to(device)
self.path_mng = log_path_mng
self.summary_writers = summary_writers
self.data_loaders = data_loaders
self.opt_scheduler = opt_scheduler
self.param_scheduler = param_scheduler
self.device = device
self.n_epoch = n_epoch
self.epoch = 0
self.train_step = 0
self.val_step = 0
self.parallel = parallel
for (key, val) in kwargs.items():
setattr(self, key, val)
def name(self):
if self.parallel:
return self.model.module.name
else:
return self.model.name
def log_path(self):
return self.path_mng.log_path
def model_path(self):
return self.path_mng.model_path
def writer_path(self):
return self.path_mng.writer_path
def writer_names(self):
return self.summary_writers.writer_names
def _init_loss_dic(self):
loss_dic = {}
for key in self.writer_names:
loss_dic[key] = 0.0
return loss_dic
def _accumulate_loss_dic(self, loss_dic, loss_items):
assert (len(self.writer_names) == len(loss_items))
for (key, val) in zip(self.writer_names, loss_items):
loss_dic[key] += val.item()
return loss_dic
def _write_loss_to_dic(self, loss_items):
loss_dic = {}
assert (len(self.writer_names) == len(loss_items))
for (key, val) in zip(self.writer_names, loss_items):
loss_dic[key] = val.item()
return loss_dic
def _batch_to_inputs(self, batch):
raise NotImplementedError
def train(self, **kwargs):
self.model.train()
self.param_scheduler.train()
epoch_loss_dic = self._init_loss_dic()
for (i, batch) in enumerate(self.data_loaders.train_loader):
inputs = self._batch_to_inputs(batch)
self.opt_scheduler.optimizer_zero_grad()
input_params = self.param_scheduler.step()
outputs = self.model('train', *inputs, **input_params)
outputs = self._sum_parallel_loss(outputs)
loss = outputs[0]
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt_scheduler.clip)
self.opt_scheduler.step()
self._accumulate_loss_dic(epoch_loss_dic, outputs)
batch_loss_dic = self._write_loss_to_dic(outputs)
self.summary_writers.write_task('train', batch_loss_dic, self.train_step)
self.train_step += 1
return epoch_loss_dic
def _sum_parallel_loss(self, loss):
if self.parallel:
if isinstance(loss, tuple):
return tuple([x.mean() for x in loss])
else:
return loss.mean()
else:
return loss
def eval(self):
self.model.eval()
self.param_scheduler.eval()
epoch_loss_dic = self._init_loss_dic()
for (i, batch) in enumerate(self.data_loaders.val_loader):
inputs = self._batch_to_inputs(batch)
input_params = self.param_scheduler.step()
with torch.no_grad():
outputs = self.model('train', *inputs, **input_params)
outputs = self._sum_parallel_loss(outputs)
self._accumulate_loss_dic(epoch_loss_dic, outputs)
batch_loss_dic = self._write_loss_to_dic(outputs)
self.summary_writers.write_task('val', batch_loss_dic, self.val_step)
self.val_step += 1
return epoch_loss_dic
def save_model(self, fn):
if self.parallel:
torch.save(self.model.module.state_dict(), fn)
else:
torch.save(self.model.state_dict(), fn)
def epoch_report(self, start_time, end_time, train_loss, valid_loss):
(epoch_mins, epoch_secs) = epoch_time(start_time, end_time)
print(f'Epoch: {(self.epoch + 1):02} | Time: {epoch_mins}m {epoch_secs}s', flush=True)
print(f' Train Loss: {train_loss:.3f}', flush=True)
print(f' Valid. Loss: {valid_loss:.3f}', flush=True)
def run(self, start_epoch=0, start_train_step=0, start_val_step=0):
self.epoch = start_epoch
self.train_step = start_train_step
self.val_step = start_val_step
best_valid_loss = float('inf')
for i in range(self.n_epoch):
start_time = time.time()
train_loss = self.train()['loss']
val_loss = self.eval()['loss']
end_time = time.time()
self.save_model(self.path_mng.epoch_model_path(self.name))
if (val_loss < best_valid_loss):
best_valid_loss = val_loss
self.save_model(self.path_mng.valid_model_path(self.name))
self.epoch_report(start_time, end_time, train_loss, val_loss)
self.epoch += 1
self.save_model(self.path_mng.final_model_path(self.name))
print('Model saved.') |
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr)])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
self.conv_2 = nn.Sequential(*[projection_conv(nr, inter_channels, scale, (not up)), nn.PReLU(inter_channels)])
self.conv_3 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
def forward(self, x):
if (self.bottleneck is not None):
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out |
def main():
parser = argparse.ArgumentParser(description='Supplement dataset')
parser.add_argument('--data', type=str, required=True, help='Block data file to use (e.g. inputs/data/time_skylake.data')
parser.add_argument('--embedding', type=str, required=True, help='Token embedding file to use (e.g. inputs/embeddings/code_delim.emb)')
parser.add_argument('--table-name', type=str, required=True, help='Table to write augmentations to (will be freshly created)')
parser.add_argument('--execute-sql', action='store_true', default=False)
parser.add_argument('--store-sql', action='store_true', default=False)
parser.add_argument('--optimize-sql', action='store_true', default=False)
subparsers = parser.add_subparsers(dest='command')
perms_parser = subparsers.add_parser('permutations')
perms_parser.add_argument('--desired-n-perms', default='all')
perms_parser.add_argument('--max-block-size', type=int, default=None, help='Maximum block size to attempt to generate permutations for. Default none')
perms_parser.add_argument('--min-perms-per-block', type=int, default=None, help='Minimum number of permutations to include when generating permutations (otherwise throw out block)')
perms_parser.add_argument('--max-perms-per-block', type=int, default=None, help='Maximum numnber of permutations to include when generating permuations.')
ports_parser = subparsers.add_parser('ports')
ports_parser.add_argument('--dup-template', type=str, default=_DEFAULT_DUP_TEMPLATE)
ports_parser.add_argument('--max-dups', type=int, default=10, help='Max number of times to duplicate a given instruction')
args = parser.parse_args()
data = read_dataset(args.data, args.embedding)
if (args.command == 'permutations'):
if (args.desired_n_perms == 'all'):
desired_n_perms = None
elif (args.desired_n_perms == 'equal'):
desired_n_perms = len(data.data)
else:
desired_n_perms = int(args.desired_n_perms)
augs = gen_permutations(data, desired_n_perms=desired_n_perms, max_block_size=args.max_block_size, min_perms_per_block=args.min_perms_per_block, max_perms_per_block=args.max_perms_per_block)
else:
augs = gen_duplicated_instructions(data, args.max_dups)
sql_commands = gen_sql_commands_of_augs(augs, args.table_name)
if args.optimize_sql:
sql_commands.insert(0, 'SET autocommit=0;')
sql_commands.insert(1, 'SET unique_checks=0;')
sql_commands.insert(2, 'SET foreign_key_checks=0;')
sql_commands.append('COMMIT;')
sql_commands.append('SET unique_checks=1;')
sql_commands.append('SET foreign_key_checks=1;')
sql_commands.append('SET autocommit=1;')
if args.store_sql:
with open(os.path.join(_DATA_DIR, 'table_{}.sql'.format(time_str())), 'w') as f:
print('\n'.join(sql_commands), file=f)
if args.execute_sql:
execute_sql(sql_commands) |
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def modify_commandline_options(parser, is_train):
return parser
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print(('Network [%s] was created. Total number of parameters: %.1f million. To see the architecture, do print(network).' % (type(self).__name__, (num_params / 1000000))))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm2d') != (- 1)):
if (hasattr(m, 'weight') and (m.weight is not None)):
init.normal_(m.weight.data, 1.0, gain)
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'xavier_uniform'):
init.xavier_uniform_(m.weight.data, gain=1.0)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=gain)
elif (init_type == 'none'):
m.reset_parameters()
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
self.apply(init_func)
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain) |
def get_final_path(closed, goal_node):
(reversed_x, reversed_y, reversed_yaw) = (list(reversed(goal_node.x_list)), list(reversed(goal_node.y_list)), list(reversed(goal_node.yaw_list)))
direction = list(reversed(goal_node.directions))
nid = goal_node.parent_index
final_cost = goal_node.cost
while nid:
n = closed[nid]
reversed_x.extend(list(reversed(n.x_list)))
reversed_y.extend(list(reversed(n.y_list)))
reversed_yaw.extend(list(reversed(n.yaw_list)))
direction.extend(list(reversed(n.directions)))
nid = n.parent_index
reversed_x = list(reversed(reversed_x))
reversed_y = list(reversed(reversed_y))
reversed_yaw = list(reversed(reversed_yaw))
direction = list(reversed(direction))
direction[0] = direction[1]
path = Path(reversed_x, reversed_y, reversed_yaw, direction, final_cost)
return path |
class FasterRCNNResnetV1FeatureExtractor(faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)):
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._architecture = architecture
self._resnet_model = resnet_model
super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939]
return (resized_inputs - [[channel_means]])
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4):
raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape()))
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights) as var_scope:
(_, activations) = self._resnet_model(preprocessed_inputs, num_classes=None, is_training=False, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope)
handle = (scope + ('/%s/block3' % self._architecture))
return activations[handle]
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights):
with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm], is_training=False):
blocks = [resnet_utils.Block('block4', resnet_v1.bottleneck, ([{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1}] * 3))]
proposal_classifier_features = resnet_utils.stack_blocks_dense(proposal_feature_maps, blocks)
return proposal_classifier_features |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.