code stringlengths 101 5.91M |
|---|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride):
super().__init__()
if (not (1 <= stride <= 3)):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = (oup // 2)
assert ((self.stride != 1) or (inp == (branch_features << 1)))
if (self.stride > 1):
self.branch1 = nn.Sequential(self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(inp), nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.Conv2d((inp if (self.stride > 1) else branch_features), branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(branch_features), nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True))
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if (self.stride == 1):
(x1, x2) = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out |
def make_hot():
aa_dict = {}
for (i, aa) in enumerate(amino_acids):
aa_one_hot = ([0] * num_aa)
aa_one_hot[i] = 1
aa_dict[aa] = aa_one_hot
return aa_dict |
class Fp16OptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), loss_scale=512.0, distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
runner.optimizer.param_groups = copy.deepcopy(runner.optimizer.param_groups)
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
for (fp32_param, fp16_param) in zip(fp32_weights, fp16_net.parameters()):
if (fp16_param.grad is not None):
if (fp32_param.grad is None):
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net, fp32_weights):
for (fp16_param, fp32_param) in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
runner.model.zero_grad()
runner.optimizer.zero_grad()
scaled_loss = (runner.outputs['loss'] * self.loss_scale)
scaled_loss.backward()
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
if self.distributed:
all_reduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
for param in fp32_weights:
if (param.grad is not None):
param.grad.div_(self.loss_scale)
if (self.grad_clip is not None):
self.clip_grads(fp32_weights)
runner.optimizer.step()
self.copy_params_to_fp16(runner.model, fp32_weights) |
class NN_MBE():
def __init__(self, tfm_=None):
self.nn_mbe = dict()
if (tfm_ != None):
for order in tfm_:
print(tfm_[order])
self.nn_mbe[order] = TFMolManage(tfm_[order], None, False)
return
def NN_Energy(self, mol):
mol.Generate_All_MBE_term(atom_group=3, cutoff=6, center_atom=0)
nn_energy = 0.0
for i in range(1, (mol.mbe_order + 1)):
nn_energy += self.nn_mbe[i].Eval_Mol(mol)
mol.Set_MBE_Force()
mol.nn_energy = nn_energy
print('coords of mol:', mol.coords)
print('force of mol:', mol.properties['mbe_deri'])
print('energy of mol:', nn_energy)
return |
def train_single_epoch(epoch, model, train_loader, optimizer, eval_loader, plotfilename=None):
model.train()
(errs, losses) = ([], [])
x = torch.unsqueeze(x, dim=1)
optimizer.zero_grad()
(x, y, clas) = (x.to(device), y.to(device), clas.to(device)) |
class hico():
def __init__(self, annotation_file):
self.annotations = json.load(open(annotation_file, 'r'))
self.train_annotations = json.load(open(annotation_file.replace('test_hico.json', 'trainval_hico.json'), 'r'))
self.overlap_iou = 0.5
self.verb_name_dict = []
self.verb_name_dict_name = []
self.fp = {}
self.tp = {}
self.score = {}
self.sum_gt = {}
self.file_name = []
self.train_sum = {}
self.no_inds = []
self.in_inds = []
for gt_i in self.annotations:
self.file_name.append(gt_i['file_name'])
gt_hoi = gt_i['hoi_annotation']
gt_bbox = gt_i['annotations']
for gt_hoi_i in gt_hoi:
if isinstance(gt_hoi_i['category_id'], str):
gt_hoi_i['category_id'] = int(gt_hoi_i['category_id'].replace('\n', ''))
triplet = [gt_bbox[gt_hoi_i['subject_id']]['category_id'], gt_bbox[gt_hoi_i['object_id']]['category_id'], gt_hoi_i['category_id']]
if (triplet not in self.verb_name_dict):
self.verb_name_dict.append(triplet)
_verb_name = hico_action_name[hico_action_inverse_ids[triplet[2]]]
if (_verb_name == 'no_interaction'):
self.no_inds.append(self.verb_name_dict.index(triplet))
else:
self.in_inds.append(self.verb_name_dict.index(triplet))
_object_name = coco_object_name[coco_object_inverse_ids[triplet[1]]]
self.verb_name_dict_name.append(f'{_verb_name} {_object_name}')
if (self.verb_name_dict.index(triplet) not in self.sum_gt.keys()):
self.sum_gt[self.verb_name_dict.index(triplet)] = 0
self.sum_gt[self.verb_name_dict.index(triplet)] += 1
assert (len(self.no_inds) == 80), 'number of no_interaction labels should be 80'
assert (len(self.in_inds) == 520), 'number of interaction labels should be 520'
for train_i in self.train_annotations:
train_hoi = train_i['hoi_annotation']
train_bbox = train_i['annotations']
for train_hoi_i in train_hoi:
if isinstance(train_hoi_i['category_id'], str):
train_hoi_i['category_id'] = int(train_hoi_i['category_id'].replace('\n', ''))
triplet = [train_bbox[train_hoi_i['subject_id']]['category_id'], train_bbox[train_hoi_i['object_id']]['category_id'], train_hoi_i['category_id']]
if (triplet not in self.verb_name_dict):
continue
if (self.verb_name_dict.index(triplet) not in self.train_sum.keys()):
self.train_sum[self.verb_name_dict.index(triplet)] = 0
self.train_sum[self.verb_name_dict.index(triplet)] += 1
for i in range(len(self.verb_name_dict)):
self.fp[i] = []
self.tp[i] = []
self.score[i] = []
self.r_inds = []
self.c_inds = []
for id in self.train_sum.keys():
if (self.train_sum[id] < 10):
self.r_inds.append(id)
else:
self.c_inds.append(id)
self.num_class = len(self.verb_name_dict)
def evalution(self, predict_annot, save_mAP=None):
for pred_i in predict_annot:
if (pred_i['file_name'] not in self.file_name):
continue
gt_i = self.annotations[self.file_name.index(pred_i['file_name'])]
gt_bbox = gt_i['annotations']
if (len(gt_bbox) != 0):
pred_bbox = self.add_One(pred_i['predictions'])
if (len(pred_bbox) == 0):
logging.warning(f"Image {pred_i['file_name']} pred NULL")
continue
(bbox_pairs, bbox_ov) = self.compute_iou_mat(gt_bbox, pred_bbox)
pred_hoi = pred_i['hoi_prediction']
gt_hoi = gt_i['hoi_annotation']
self.compute_fptp(pred_hoi, gt_hoi, bbox_pairs, pred_bbox, bbox_ov)
else:
pred_bbox = self.add_One(pred_i['predictions'])
for (i, pred_hoi_i) in enumerate(pred_i['hoi_prediction']):
triplet = [pred_bbox[pred_hoi_i['subject_id']]['category_id'], pred_bbox[pred_hoi_i['object_id']]['category_id'], pred_hoi_i['category_id']]
verb_id = self.verb_name_dict.index(triplet)
self.tp[verb_id].append(0)
self.fp[verb_id].append(1)
self.score[verb_id].append(pred_hoi_i['score'])
map = self.compute_map(save_mAP)
return map
def compute_map(self, save_mAP=None):
logging.debug(f'total category = {self.num_class}')
ap = np.zeros(self.num_class)
max_recall = np.zeros(self.num_class)
name2ap = {}
for i in range(len(self.verb_name_dict)):
name = self.verb_name_dict_name[i]
sum_gt = self.sum_gt[i]
if (sum_gt == 0):
continue
tp = np.asarray(self.tp[i].copy())
fp = np.asarray(self.fp[i].copy())
res_num = len(tp)
if (res_num == 0):
continue
score = np.asarray(self.score[i].copy())
sort_inds = np.argsort((- score))
fp = fp[sort_inds]
tp = tp[sort_inds]
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = (tp / sum_gt)
prec = (tp / (fp + tp))
ap[i] = self.voc_ap(rec, prec)
max_recall[i] = np.max(rec)
logging.debug(f'class {self.verb_name_dict_name[i]} -- ap: {ap[i]} max recall:{max_recall[i]}')
name2ap[name] = ap[i]
mAP = np.mean(ap[:])
mAP_rare = np.mean(ap[self.r_inds])
mAP_nonrare = np.mean(ap[self.c_inds])
mAP_inter = np.mean(ap[self.in_inds])
mAP_noninter = np.mean(ap[self.no_inds])
m_rec = np.mean(max_recall[:])
print('')
print(f'''mAP Full: {mAP}
mAP rare: {mAP_rare} mAP nonrare: {mAP_nonrare}
mAP inter: {mAP_inter} mAP noninter: {mAP_noninter}
max recall: {m_rec}''')
print('')
if (save_mAP is not None):
json.dump(name2ap, open(save_mAP, 'w'))
return mAP
def voc_ap(self, rec, prec):
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if (np.sum((rec >= t)) == 0):
p = 0
else:
p = np.max(prec[(rec >= t)])
ap = (ap + (p / 11.0))
return ap
def compute_fptp(self, pred_hoi, gt_hoi, match_pairs, pred_bbox, bbox_ov):
pos_pred_ids = match_pairs.keys()
vis_tag = np.zeros(len(gt_hoi))
pred_hoi.sort(key=(lambda k: k.get('score', 0)), reverse=True)
if (len(pred_hoi) != 0):
for (i, pred_hoi_i) in enumerate(pred_hoi):
is_match = 0
if isinstance(pred_hoi_i['category_id'], str):
pred_hoi_i['category_id'] = int(pred_hoi_i['category_id'].replace('\n', ''))
if ((len(match_pairs) != 0) and (pred_hoi_i['subject_id'] in pos_pred_ids) and (pred_hoi_i['object_id'] in pos_pred_ids)):
pred_sub_ids = match_pairs[pred_hoi_i['subject_id']]
pred_obj_ids = match_pairs[pred_hoi_i['object_id']]
pred_obj_ov = bbox_ov[pred_hoi_i['object_id']]
pred_sub_ov = bbox_ov[pred_hoi_i['subject_id']]
pred_category_id = pred_hoi_i['category_id']
max_ov = 0
max_gt_id = 0
for gt_id in range(len(gt_hoi)):
gt_hoi_i = gt_hoi[gt_id]
if ((gt_hoi_i['subject_id'] in pred_sub_ids) and (gt_hoi_i['object_id'] in pred_obj_ids) and (pred_category_id == gt_hoi_i['category_id'])):
is_match = 1
min_ov_gt = min(pred_sub_ov[pred_sub_ids.index(gt_hoi_i['subject_id'])], pred_obj_ov[pred_obj_ids.index(gt_hoi_i['object_id'])])
if (min_ov_gt > max_ov):
max_ov = min_ov_gt
max_gt_id = gt_id
if (pred_hoi_i['category_id'] not in list(self.fp.keys())):
continue
triplet = [pred_bbox[pred_hoi_i['subject_id']]['category_id'], pred_bbox[pred_hoi_i['object_id']]['category_id'], pred_hoi_i['category_id']]
if (triplet not in self.verb_name_dict):
continue
verb_id = self.verb_name_dict.index(triplet)
if ((is_match == 1) and (vis_tag[max_gt_id] == 0)):
self.fp[verb_id].append(0)
self.tp[verb_id].append(1)
vis_tag[max_gt_id] = 1
else:
self.fp[verb_id].append(1)
self.tp[verb_id].append(0)
self.score[verb_id].append(pred_hoi_i['score'])
def compute_iou_mat(self, bbox_list1, bbox_list2):
iou_mat = np.zeros((len(bbox_list1), len(bbox_list2)))
if ((len(bbox_list1) == 0) or (len(bbox_list2) == 0)):
return {}
for (i, bbox1) in enumerate(bbox_list1):
for (j, bbox2) in enumerate(bbox_list2):
iou_i = self.compute_IOU(bbox1, bbox2)
iou_mat[(i, j)] = iou_i
iou_mat_ov = iou_mat.copy()
iou_mat[(iou_mat >= 0.5)] = 1
iou_mat[(iou_mat < 0.5)] = 0
match_pairs = np.nonzero(iou_mat)
match_pairs_dict = {}
match_pairs_ov = {}
if (iou_mat.max() > 0):
for (i, pred_id) in enumerate(match_pairs[1]):
if (pred_id not in match_pairs_dict.keys()):
match_pairs_dict[pred_id] = []
match_pairs_ov[pred_id] = []
match_pairs_dict[pred_id].append(match_pairs[0][i])
match_pairs_ov[pred_id].append(iou_mat_ov[(match_pairs[0][i], pred_id)])
return (match_pairs_dict, match_pairs_ov)
def compute_IOU(self, bbox1, bbox2):
if isinstance(bbox1['category_id'], str):
bbox1['category_id'] = int(bbox1['category_id'].replace('\n', ''))
if isinstance(bbox2['category_id'], str):
bbox2['category_id'] = int(bbox2['category_id'].replace('\n', ''))
if (bbox1['category_id'] == bbox2['category_id']):
rec1 = bbox1['bbox']
rec2 = bbox2['bbox']
S_rec1 = (((rec1[2] - rec1[0]) + 1) * ((rec1[3] - rec1[1]) + 1))
S_rec2 = (((rec2[2] - rec2[0]) + 1) * ((rec2[3] - rec2[1]) + 1))
sum_area = (S_rec1 + S_rec2)
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
if ((left_line >= right_line) or (top_line >= bottom_line)):
return 0
else:
intersect = (((right_line - left_line) + 1) * ((bottom_line - top_line) + 1))
return (intersect / (sum_area - intersect))
else:
return 0
def add_One(self, prediction):
for (i, pred_bbox) in enumerate(prediction):
rec = pred_bbox['bbox']
rec[0] += 1
rec[1] += 1
rec[2] += 1
rec[3] += 1
return prediction |
class CUDACallback(Callback):
def on_train_epoch_start(self, trainer, pl_module):
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, outputs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = (torch.cuda.max_memory_allocated(trainer.root_gpu) / (2 ** 20))
epoch_time = (time.time() - self.start_time)
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds')
rank_zero_info(f'Average Peak memory: {max_memory:.2f} MiB')
except AttributeError:
pass |
def get_real_dataloaders(dataset, data_dir, batch_size, num_workers, metadata, distributed=True):
(transform_train, transform_val) = get_transforms(TRANFORMS_MAPPING[dataset], metadata.image_size)
(train_set, val_set, train_sampler, val_sampler) = get_dataset(dataset, data_dir, transform_train, transform_val, distributed)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=(train_sampler is None), sampler=train_sampler, num_workers=num_workers, pin_memory=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=(val_sampler is None), sampler=val_sampler, num_workers=num_workers, pin_memory=True)
return (train_loader, val_loader, train_sampler, val_sampler) |
def batch_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__batch_counter__ += batch_size |
class OrRule(MappingRule):
def __init__(self, *rules):
self.rules = rules
def matches(self, key):
return any((r.matches(key) for r in self.rules))
def apply(self, key, value):
items = [(key, value)]
for r in self.rules:
items = [r.apply(k, v) for (k, v) in items]
return items |
def reshape_patch(img_tensor, patch_size):
assert (5 == img_tensor.ndim)
batch_size = np.shape(img_tensor)[0]
seq_length = np.shape(img_tensor)[1]
img_height = np.shape(img_tensor)[2]
img_width = np.shape(img_tensor)[3]
num_channels = np.shape(img_tensor)[4]
a = np.reshape(img_tensor, [batch_size, seq_length, (img_height / patch_size), patch_size, (img_width / patch_size), patch_size, num_channels])
b = np.transpose(a, [0, 1, 2, 4, 3, 5, 6])
patch_tensor = np.reshape(b, [batch_size, seq_length, (img_height / patch_size), (img_width / patch_size), ((patch_size * patch_size) * num_channels)])
return patch_tensor |
class VAE_ID(nn.Module):
def __init__(self, in_channels, latent_dim, hidden_dim=512, hidden_nums=5, **kwargs) -> None:
super(VAE_ID, self).__init__()
self.epoch = 0
self.step = 0
self.latent_dim = latent_dim
self.in_channels_ori = in_channels
modules = []
for _ in range(hidden_nums):
modules.append(nn.Sequential(nn.Linear(in_channels, hidden_dim), nn.LeakyReLU()))
in_channels = hidden_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_var = nn.Linear(hidden_dim, latent_dim)
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dim)
for _ in range(hidden_nums):
modules.append(nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.LeakyReLU()))
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.LeakyReLU(), nn.Linear(hidden_dim, self.in_channels_ori))
for param in self.parameters():
param.requires_grad = False
def set_device(self, device):
self.device = device
self.encoder.device = device
self.fc_mu.device = device
self.fc_var.device = device
self.decoder.device = device
self.final_layer.device = device
def encode(self, input):
result = self.encoder(input)
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z):
result = self.decoder_input(z)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return ((eps * std) + mu)
def forward(self, input, **kwargs):
(mu, log_var) = self.encode(input)
z = self.reparameterize(mu, log_var)
recons = self.decode(z)
return [recons, input, mu, log_var]
def sample(self, num_samples: int, current_device: int, **kwargs):
z = torch.randn(num_samples, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x, **kwargs):
return self.forward(x)[0] |
.skipif((not hasattr(m, 'has_string_view')), reason='no <string_view>')
def test_string_view(capture):
assert (m.string_view_chars('Hi') == [72, 105])
assert (m.string_view_chars('Hi ') == [72, 105, 32, 240, 159, 142, 130])
assert (m.string_view16_chars('Hi ') == [72, 105, 32, 55356, 57218])
assert (m.string_view32_chars('Hi ') == [72, 105, 32, 127874])
assert (m.string_view_return() == 'utf8 secret ')
assert (m.string_view16_return() == 'utf16 secret ')
assert (m.string_view32_return() == 'utf32 secret ')
with capture:
m.string_view_print('Hi')
m.string_view_print('utf8 ')
m.string_view16_print('utf16 ')
m.string_view32_print('utf32 ')
assert (capture == '\n Hi 2\n utf8 9\n utf16 8\n utf32 7\n ')
with capture:
m.string_view_print('Hi, ascii')
m.string_view_print('Hi, utf8 ')
m.string_view16_print('Hi, utf16 ')
m.string_view32_print('Hi, utf32 ')
assert (capture == '\n Hi, ascii 9\n Hi, utf8 13\n Hi, utf16 12\n Hi, utf32 11\n ') |
def get_diml_indoor_loader(data_dir_root, batch_size=1, **kwargs):
dataset = DIML_Indoor(data_dir_root)
return DataLoader(dataset, batch_size, **kwargs) |
def bottomPvis():
bPbu.switch()
if (bPbu.status() == 'Bhide'):
bottomP.off()
elif (bPbu.status() == 'Bshow'):
bottomP.on() |
def test_interpsnapshotKeplerPotential_logR_eval():
s = pynbody.new(star=1)
s['mass'] = 1.0
s['eps'] = 0.0
sp = potential.InterpSnapshotRZPotential(s, rgrid=(numpy.log(0.01), numpy.log(20.0), 251), logR=True, zgrid=(0.0, 0.2, 201), interpPot=True, zsym=True)
kp = potential.KeplerPotential(amp=1.0)
rs = numpy.linspace(0.02, 16.0, 20)
zs = numpy.linspace((- 0.15), 0.15, 40)
(mr, mz) = numpy.meshgrid(rs, zs)
mr = mr.flatten()
mz = mz.flatten()
assert numpy.all((numpy.fabs(((sp(mr, mz) - kp(mr, mz)) / kp(mr, mz))) < (10.0 ** (- 5.0)))), f'RZPot interpolation w/ interpRZPotential fails for vector input, w/ logR at (R,z) = ({mr[numpy.argmax(numpy.fabs(((sp(mr, mz) - kp(mr, mz)) / kp(mr, mz))))]:f},{mz[numpy.argmax(numpy.fabs(((sp(mr, mz) - kp(mr, mz)) / kp(mr, mz))))]:f}) by {numpy.amax(numpy.fabs(((sp(mr, mz) - kp(mr, mz)) / kp(mr, mz)))):g}'
return None |
class Pool5FnVGG(nn.Module):
def __init__(self, opt):
super(Pool5FnVGG, self).__init__()
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
self.x_conv_layers = self.make_conv_layers()
self._initialize_weights()
def make_conv_layers(self, batch_norm=True):
layers = []
in_channels = 3
for v in self.cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.x_conv_layers(x)
return x |
class MInstrDataset(QuestionTemplateMixin, Dataset):
_repr_indent = 4
def __init__(self, filename, image_folder=None, filename_positive=None, filename_negative=None, image_folder_positive=None, image_folder_negative=None, label=None, label_negative=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.filename = filename
self.image_folder = image_folder
self.filename_positive = filename_positive
self.filename_negative = filename_negative
self.image_folder_positive = image_folder_positive
self.image_folder_negative = image_folder_negative
self.label_name = label
self.label_negative = label_negative
self.rng = np.random.default_rng(seed)
self.data = []
with open(filename, 'r', encoding='utf8') as f:
for line in f:
self.data.append(line)
def get_raw_item(self, index):
return json.loads(self.data[index])
def get_image(self, image_path):
if (self.image_folder is not None):
image_path = os.path.join(self.image_folder, image_path)
image = Image.open(image_path).convert('RGB')
return image
def get_template(self):
return self.rng.choice(self.templates)
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
return len(self.data)
def __repr__(self) -> str:
head = ('Dataset ' + self.__class__.__name__)
body = [f'Number of datapoints: {self.__len__()}', f'ann file: {self.filename}']
if (self.image_folder is not None):
body.append(f'image folder: {self.image_folder}')
body += self.extra_repr().splitlines()
lines = ([head] + [((' ' * self._repr_indent) + line) for line in body])
return '\n'.join(lines)
def extra_repr(self) -> str:
return '' |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str)
(args, others) = parser.parse_known_args()
config = OmegaConf.load(args.config)
includes = config.get('includes', [])
if (not isinstance(includes, collections.abc.Sequence)):
raise AttributeError('Includes must be a list, {} provided'.format(type(includes)))
include_mapping = OmegaConf.create()
for include in includes:
if (not os.path.exists(include)):
include = os.path.join(os.path.dirname(args.config), include)
current_include_mapping = OmegaConf.load(include)
include_mapping = OmegaConf.merge(include_mapping, current_include_mapping)
config = OmegaConf.merge(include_mapping, config)
override = merge_with_dotlist(OmegaConf.create(), others)
config = OmegaConf.merge(config, override)
return (config, override) |
_registry(operator_type='FusedBatchNormV3')
class FusedBatchNormV3(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'tensorflow'):
self._attr['epsilon'] = node.attr['epsilon'].f
self._attr['exponential_avg_factor'] = node.attr['exponential_avg_factor'].i
self._attr['is_training'] = node.attr['is_training'].b |
class XLMRobertaModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_xl')
def transformer_lm_gpt3_xl(args):
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 2048)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32)
base_gpt3_architecture(args) |
def IPOT_torch_uniform(C, n, m, beta=0.5):
sigma = (torch.ones(int(m), 1).cuda() / m)
T = torch.ones(n, m).cuda()
A = torch.exp(((- C) / beta))
for t in range(50):
Q = (A * T)
for k in range(1):
delta = (1 / (n * torch.mm(Q, sigma)))
a = torch.mm(torch.transpose(Q, 0, 1), delta)
sigma = (1 / (float(m) * a))
tmp = torch.mm(torch.diag(torch.squeeze(delta)), Q)
dim_ = torch.diag(torch.squeeze(sigma)).dim()
assert ((dim_ == 2) or (dim_ == 1))
T = torch.mm(tmp, torch.diag(torch.squeeze(sigma)))
return T.detach() |
class ImageCenterCrop(ImagePreprocessing):
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type='float'):
super(ImageCenterCrop, self).__init__(bigdl_type, crop_width, crop_height, is_clip) |
class PytorchConverter(base_converter.ConverterInterface):
activation_type = {'ReLU': ActivationType.RELU, 'ReLU6': ActivationType.RELUX}
pooling_type_mode = {NodeKind.AvgPool2D: PoolingType.AVG, NodeKind.AdaptiveAvgPool2D: PoolingType.AVG, NodeKind.MaxPool2D: PoolingType.MAX}
eltwise_type = {NodeKind.Add: EltwiseType.SUM, NodeKind.Add_: EltwiseType.SUM}
def model_to_graph(self):
dummy_input = ()
for in_node in self._option.input_nodes.values():
if (len(in_node.shape) == 4):
in_data_format = in_node.data_format
if (in_data_format == DataFormat.NHWC):
(N, H, W, C) = in_node.shape
elif (in_data_format == DataFormat.NCHW):
(N, C, H, W) = in_node.shape
dummy_input = (dummy_input + (torch.randn([N, C, H, W]),))
else:
dummy_input = (dummy_input + (torch.randn(in_node.shape),))
(graph, params_dict) = _model_to_graph(self._loaded_model, dummy_input)
return (graph, params_dict)
def init_output_shape_cache(self):
self._output_shape_cache = {}
for input_node in self._option.input_nodes.values():
input_shape = input_node.shape[:]
if ((len(input_shape) == 4) and (input_node.data_format == DataFormat.NHWC)):
Transformer.transpose_shape(input_shape, [0, 3, 1, 2])
self._output_shape_cache[input_node.name] = input_shape
def __init__(self, option, src_model_file):
torch._C.Node.__getitem__ = _node_getitem
self._param_converts = (NodeKind.Constant, NodeKind.List, NodeKind.Size, NodeKind.NumToTensor, NodeKind.Int)
self._option = option
self._converter_info = dict()
self._mace_net_def = mace_pb2.NetDef()
ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
ConverterUtil.set_framework_type(self._mace_net_def, FrameworkType.PYTORCH.value)
self._op_converters = {NodeKind.AdaptiveAvgPool2D: self.convert_pool, NodeKind.Add: self.convert_add, NodeKind.Add_: self.convert_add, NodeKind.Addmm: self.convert_addmm, NodeKind.AvgPool2D: self.convert_pool, NodeKind.BatchNorm: self.convert_batch_norm, NodeKind.Cat: self.convert_cat, NodeKind.Convolution: self.convert_conv2d, NodeKind.Dropout: self.convert_dropout, NodeKind.Flatten: self.convert_flatten, NodeKind.HardTanh_: self.convert_hardtanh, NodeKind.HardTanh: self.convert_hardtanh, NodeKind.Matmul: self.convert_matmul, NodeKind.MaxPool2D: self.convert_pool, NodeKind.Relu: self.convert_relu, NodeKind.Relu_: self.convert_relu, NodeKind.Reshape: self.convert_reshape, NodeKind.T: self.convert_t}
self._loaded_model = torch.jit.load(src_model_file)
self._loaded_model.eval()
(self._graph, self._params_dict) = self.model_to_graph()
self._output_node_name = list(self._graph.outputs())[0].debugName()
self._output_value_type = list(self._graph.outputs())[0].type()
mace_check(isinstance(self._output_value_type, (ValueType.TensorType, ValueType.ListType, ValueType.TupleType)), 'return type {} not supported'.format(self._output_value_type))
self._node_map = {}
self.init_output_shape_cache()
def run(self):
self.convert_ops()
return (self._mace_net_def, self._converter_info)
def init_ignore_t(self, all_nodes):
self.ignore_t = set()
for node in all_nodes:
node_kind = node.kind()
if ((node_kind == NodeKind.T) and self.is_trans_fc_w(node)):
self.ignore_t.add(node.output().debugName())
def convert_ops(self):
all_nodes = list(self._graph.nodes())
self.init_ignore_t(all_nodes)
for node in all_nodes:
if (isinstance(self._output_value_type, (ValueType.TupleType, ValueType.ListType)) and (node.output().debugName() == self._output_node_name)):
print('pytorch return type is {}, skipping adding it into MACE graph'.format(self._output_value_type))
continue
inputs_vals = list(node.inputs())
outputs_vals = list(node.outputs())
mace_check((len(outputs_vals) == 1), 'pytorch converter supports nodes with single output, {} outputs found'.format(len(outputs_vals)))
node_kind = node.kind()
if (node_kind in self._param_converts):
continue
self._node_map[outputs_vals[0].debugName()] = node
mace_check((node_kind in self._op_converters), 'MACE does not support pytorch node {} yet'.format(node_kind))
self._op_converters[node_kind](node, inputs_vals, outputs_vals)
def convert_general_op(self, outputs_vals):
op = self._mace_net_def.op.add()
op.name = outputs_vals[0].debugName()
data_type_arg = op.arg.add()
data_type_arg.name = 'T'
data_type_arg.i = self._option.data_type
framework_type_arg = op.arg.add()
framework_type_arg.name = MaceKeyword.mace_framework_type_str
framework_type_arg.i = FrameworkType.PYTORCH.value
ConverterUtil.add_data_format_arg(op, DataFormat.NCHW)
return op
def add_output_shape(self, op, shapes):
mace_check((len(op.output) == len(shapes)), 'Op {} ({}) output count is different from output shape count'.format(op.name, op.type))
for i in range(len(shapes)):
output_name = op.output[i]
output_shape = op.output_shape.add()
output_shape.dims.extend(shapes[i])
self._output_shape_cache[output_name] = shapes[i]
def infer_shape_general(self, op):
if (len(op.input) > 0):
mace_check((op.input[0] in self._output_shape_cache), 'Op {} input {} does not exist'.format(op.name, op.input[0]))
input_shape = self._output_shape_cache[op.input[0]]
if (op.type == MaceOp.BatchNorm.name):
mace_check((len(input_shape) == 4), 'only 2D BatchNorm is supported, but {}D input found'.format(len(input_shape)))
self.add_output_shape(op, [input_shape])
def infer_shape_conv2d_pool(self, op):
input_shape = self._output_shape_cache[op.input[0]]
output_shape = np.zeros_like(input_shape)
if (not (op.type == MaceOp.Pooling)):
filter_shape = self._output_shape_cache[op.input[1]]
paddings = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_values_str).ints
strides = ConverterUtil.get_arg(op, MaceKeyword.mace_strides_str).ints
dilations_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dilations_str)
if (dilations_arg is not None):
dilations = dilations_arg.ints
else:
dilations = [1, 1]
if (op.type == MaceOp.Pooling.name):
kernels = ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str).ints
if (ConverterUtil.get_arg(op, MaceKeyword.mace_global_pooling_str) is not None):
kernels[0] = input_shape[2]
kernels[1] = input_shape[3]
round_func = math.floor
round_mode_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_round_mode_str)
if ((round_mode_arg is not None) and (round_mode_arg.i == RoundMode.CEIL.value)):
round_func = math.ceil
output_shape[0] = input_shape[0]
mace_check(((ConverterUtil.data_format(op) == DataFormat.NCHW) and (ConverterUtil.filter_format(self._mace_net_def) == DataFormat.OIHW)), 'MACE can only infer shape for NCHW input and OIHW filter')
if (op.type == MaceOp.DepthwiseConv2d.name):
output_shape[1] = filter_shape[1]
elif (op.type == MaceOp.Conv2D.name):
output_shape[1] = filter_shape[0]
else:
output_shape[1] = input_shape[1]
(p, d, s) = (paddings[0], dilations[0], strides[0])
k = (kernels[0] if (op.type == MaceOp.Pooling.name) else filter_shape[2])
output_shape[2] = int(round_func(((float((((input_shape[2] + p) - (d * (k - 1))) - 1)) / float(s)) + 1)))
(p, d, s) = (paddings[1], dilations[1], strides[1])
k = (kernels[1] if (op.type == MaceOp.Pooling.name) else filter_shape[3])
output_shape[3] = int(round_func(((float((((input_shape[3] + p) - (d * (k - 1))) - 1)) / float(s)) + 1)))
self.add_output_shape(op, [output_shape])
def convert_conv2d(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.input.extend([inputs_vals[i].debugName() for i in range(2)])
op.output.extend([outputs_vals[0].debugName()])
key = inputs_vals[1].debugName()
filter_shape = self._params_dict[key].shape
filter_shape = [int(elem) for elem in filter_shape]
mace_check((len(filter_shape) == 4), 'MACE only supports 2D Conv, current Conv is {}D'.format((len(filter_shape) - 2)))
filter_cin = filter_shape[1]
filter_cout = filter_shape[0]
group_node = inputs_vals[ConvParamIdx.groups_idx].node()
ngroups = group_node['value']
mace_check(((ngroups == 1) or (ngroups == filter_cout)), 'MACE only support conv without group or depthwise conv, but group number of {} found'.format(ngroups))
is_depthwise = ((ngroups != 1) and (ngroups == filter_cout) and (filter_cin == 1))
if is_depthwise:
op.type = MaceOp.DepthwiseConv2d.name
else:
op.type = MaceOp.Conv2D.name
strides_node = inputs_vals[ConvParamIdx.stride_idx].node()
strides_vals = list(strides_node.inputs())
mace_strides = [strides_vals[i].node()['value'] for i in range(2)]
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(mace_strides)
pads_node = inputs_vals[ConvParamIdx.pad_idx].node()
pads_vals = list(pads_node.inputs())
mace_pads = [(2 * pads_vals[i].node()['value']) for i in range(2)]
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend(mace_pads)
dilations_node = inputs_vals[ConvParamIdx.dilation_idx].node()
dilations_vals = list(dilations_node.inputs())
mace_dilations = [dilations_vals[i].node()['value'] for i in range(2)]
dilation_arg = op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints.extend(mace_dilations)
filter_tensor_name = inputs_vals[ConvParamIdx.weight_idx].debugName()
filter_data = self._params_dict[filter_tensor_name]
if is_depthwise:
filter_data = filter_data.permute((1, 0, 2, 3))
filter_data = filter_data.numpy()
self.add_tensor_and_shape(filter_tensor_name, filter_data.shape, mace_pb2.DT_FLOAT, filter_data)
bias_val = inputs_vals[ConvParamIdx.bias_idx]
has_bias = (not isinstance(bias_val.type(), ValueType.NoneType))
if has_bias:
bias_tensor_name = inputs_vals[ConvParamIdx.bias_idx].debugName()
bias_data = self._params_dict[bias_tensor_name]
bias_data = bias_data.numpy()
self.add_tensor_and_shape(bias_tensor_name, bias_data.shape, mace_pb2.DT_FLOAT, bias_data)
op.input.extend([bias_tensor_name])
self.infer_shape_conv2d_pool(op)
def convert_batch_norm(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
op.type = MaceOp.BatchNorm.name
is_training = int(inputs_vals[BNParamIdx.training_idx].node()['value'])
mace_check((is_training == 0), 'Only support batch normalization with is_training = 0, but got {}'.format(is_training))
state_dict = self._params_dict
gamma_key = inputs_vals[BNParamIdx.weight_idx].debugName()
gamma_value = state_dict[gamma_key].numpy().astype(np.float32)
beta_key = inputs_vals[BNParamIdx.bias_idx].debugName()
beta_value = state_dict[beta_key].numpy().astype(np.float32)
mean_name = inputs_vals[BNParamIdx.running_mean_idx].debugName()
mean_value = state_dict[mean_name].numpy().astype(np.float32)
var_name = inputs_vals[BNParamIdx.running_var_idx].debugName()
var_value = state_dict[var_name].numpy().astype(np.float32)
epsilon_value = inputs_vals[BNParamIdx.eps_idx].node()['value']
scale_name = (gamma_key + '_scale')
offset_name = (beta_key + '_offset')
scale_value = ((1.0 / np.vectorize(math.sqrt)((var_value + epsilon_value))) * gamma_value)
offset_value = (((- mean_value) * scale_value) + beta_value)
self.add_tensor_and_shape(scale_name, scale_value.shape, mace_pb2.DT_FLOAT, scale_value)
self.add_tensor_and_shape(offset_name, offset_value.shape, mace_pb2.DT_FLOAT, offset_value)
op.input.extend([scale_name, offset_name])
self.infer_shape_general(op)
def convert_hardtanh(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Activation.name
min_val = inputs_vals[1].node()['value']
max_val = inputs_vals[2].node()['value']
mace_check((abs(min_val) < 1e-08), 'MACE only supports min == 0 Clip op')
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
mace_check((abs((max_val - 6.0)) < 1e-08), 'only support converting hardtanh_ to ReLU6 yet')
type_arg.s = six.b(self.activation_type['ReLU6'].name)
limit_arg = op.arg.add()
limit_arg.name = MaceKeyword.mace_activation_max_limit_str
limit_arg.f = 6.0
self.infer_shape_general(op)
def convert_add(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Eltwise.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_element_type_str
node_kind = node.kind()
type_arg.i = self.eltwise_type[node_kind].value
alpha = inputs_vals[2].node()['value']
mace_check((alpha == 1), 'MACE only support alpha value of 1 for Add Op, {} found'.format(alpha))
op.input.extend([inputs_vals[i].debugName() for i in range(2)])
op.output.extend([outputs_vals[0].debugName()])
lhs_kind = inputs_vals[0].node().kind()
rhs_kind = inputs_vals[1].node().kind()
if ((lhs_kind != NodeKind.Constant) and (rhs_kind == NodeKind.Constant)):
const_value = inputs_vals[1].node()['value']
value_arg = op.arg.add()
value_arg.name = MaceKeyword.mace_scalar_input_str
value_arg.f = float(const_value)
value_index_arg = op.arg.add()
value_index_arg.name = MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 1
del op.input[1]
elif ((lhs_kind == NodeKind.Constant) and (rhs_kind != NodeKind.Constant)):
const_value = inputs_vals[0].node()['value']
value_arg = op.arg.add()
value_arg.name = MaceKeyword.mace_scalar_input_str
value_arg.f = float(const_value)
value_index_arg = op.arg.add()
value_index_arg.name = MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 0
del op.input[0]
self.infer_shape_general(op)
def convert_relu(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Activation.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = six.b(self.activation_type['ReLU'].name)
self.infer_shape_general(op)
def infer_shape_cat(self, op):
output_shape = list(self._output_shape_cache[op.input[0]])
axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
if (axis < 0):
axis = (len(output_shape) + axis)
output_shape[axis] = 0
for input_node in op.input:
input_shape = list(self._output_shape_cache[input_node])
output_shape[axis] = (output_shape[axis] + input_shape[axis])
self.add_output_shape(op, [output_shape])
def convert_cat(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Concat.name
in_vals = list(inputs_vals[0].node().inputs())
in_names = [in_vals[i].debugName() for i in range(len(in_vals))]
op.input.extend(in_names)
op.output.extend([outputs_vals[0].debugName()])
axis_int = inputs_vals[1].node()['value']
axis_arg = op.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.i = axis_int
self.infer_shape_cat(op)
def convert_flatten(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Reshape.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
input_shape = list(self._output_shape_cache[op.input[0]])
ndim = len(input_shape)
start_dim = inputs_vals[1].node()['value']
if (start_dim < 0):
start_dim += ndim
end_dim = inputs_vals[2].node()['value']
if (end_dim < 0):
end_dim += ndim
reshape_dims = []
for i in range(0, start_dim):
reshape_dims.append(input_shape[i])
mid_shape = 1
for i in range(start_dim, (end_dim + 1)):
mid_shape *= input_shape[i]
reshape_dims.append(mid_shape)
for i in range((end_dim + 1), ndim):
reshape_dims.append(input_shape[i])
dim_arg = op.arg.add()
dim_arg.name = MaceKeyword.mace_dim_str
dim_arg.ints.extend(reshape_dims)
self.infer_shape_reshape(op)
def get_weight_from_node(self, node):
input_list = list(node.inputs())
key = input_list[0].debugName()
return self._params_dict[key]
def is_trans_fc_w(self, node):
in_vals = list(node.inputs())
mace_check((len(in_vals) == 1), 't() must have 1 input')
in_name = in_vals[0].debugName()
if ((in_name in self._params_dict) and (len(self._params_dict[in_name].shape) == 2)):
return True
return False
def infer_shape_fully_connected(self, op):
input_shape = self._output_shape_cache[op.input[0]]
weight_shape = self._output_shape_cache[op.input[1]]
data_format = ConverterUtil.data_format(op)
mace_check((data_format == DataFormat.NCHW), 'format {} is not supported'.format(data_format))
output_shape = [input_shape[0], weight_shape[0], 1, 1]
self.add_output_shape(op, [output_shape])
def convert_addmm(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
weight_in_node = inputs_vals[AddmmParamIdx.weight_idx].node()
is_mat2_w = ((weight_in_node.kind() == NodeKind.T) and self.is_trans_fc_w(weight_in_node))
alpha = inputs_vals[AddmmParamIdx.alpha_idx].node()['value']
alpha_type = inputs_vals[AddmmParamIdx.alpha_idx].type()
is_alpha_fc = (isinstance(alpha_type, ValueType.IntType) and (alpha == 1))
is_bias_w = (inputs_vals[AddmmParamIdx.bias_idx].debugName() in self._params_dict)
beta = inputs_vals[AddmmParamIdx.beta_idx].node()['value']
beta_type = inputs_vals[AddmmParamIdx.beta_idx].type()
is_beta_fc = (isinstance(beta_type, ValueType.IntType) and (beta == 1))
is_fc = (is_mat2_w and is_alpha_fc and is_bias_w and is_beta_fc)
mace_check(is_fc, 'addmm can only be converted into FC yet')
name_back = op.name
matmul_op_name = (op.name + '_matmul')
op.name = matmul_op_name
op.type = MaceOp.MatMul.name
fc_upstream_name = inputs_vals[AddmmParamIdx.input_idx].debugName()
op.input.extend([fc_upstream_name])
op.output.extend([matmul_op_name])
weight_tensor_name = (op.name + '_weight')
weight_tensor = self.get_weight_from_node(weight_in_node)
weight_data = weight_tensor.numpy()
self.add_tensor_and_shape(weight_tensor_name, weight_data.shape, mace_pb2.DT_FLOAT, weight_data)
op.input.extend([weight_tensor_name])
transpose_a_arg = op.arg.add()
transpose_a_arg.name = MaceKeyword.mace_transpose_a_str
transpose_a_arg.i = 0
transpose_b_arg = op.arg.add()
transpose_b_arg.name = MaceKeyword.mace_transpose_b_str
transpose_b_arg.i = 1
self.infer_shape_matmul(op)
opb = self.convert_general_op(outputs_vals)
opb.type = MaceOp.BiasAdd.name
bias_tensor_name = (opb.name + '_bias')
key = inputs_vals[AddmmParamIdx.bias_idx].debugName()
bias_data = self._params_dict[key]
bias_data = bias_data.numpy()
self.add_tensor_and_shape(bias_tensor_name, bias_data.reshape((- 1)).shape, mace_pb2.DT_FLOAT, bias_data)
opb.input.extend([matmul_op_name, bias_tensor_name])
opb.output.extend([name_back])
self.infer_shape_general(opb)
def infer_shape_matmul(self, op):
lhs_shape = self._output_shape_cache[op.input[0]]
lhs_rank = len(lhs_shape)
lhs_rows = lhs_shape[(- 2)]
lhs_cols = lhs_shape[(- 1)]
rhs_shape = self._output_shape_cache[op.input[1]]
rhs_rank = len(rhs_shape)
rhs_rows = rhs_shape[(- 2)]
rhs_cols = rhs_shape[(- 1)]
transpose_a_ = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_a_str).i
transpose_b_ = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str).i
rows = (lhs_cols if transpose_a_ else lhs_rows)
cols = (rhs_rows if transpose_b_ else rhs_cols)
if (lhs_rank >= rhs_rank):
if (lhs_rank > rhs_rank):
mace_check((rhs_rank == 2), 'The rhs rank of non-batched MatMul must be 2')
output_shape = lhs_shape.copy()
output_shape[(lhs_rank - 2)] = rows
output_shape[(lhs_rank - 1)] = cols
else:
output_shape = rhs_shape.copy()
output_shape[(rhs_rank - 2)] = rows
output_shape[(rhs_rank - 1)] = cols
self.add_output_shape(op, [output_shape])
def convert_matmul(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
weight_in_node = inputs_vals[1].node()
is_weight = ((weight_in_node.kind() == NodeKind.T) and self.is_trans_fc_w(weight_in_node))
op.type = MaceOp.MatMul.name
op.input.extend([inputs_vals[i].debugName() for i in range(2)])
op.output.extend([outputs_vals[0].debugName()])
if is_weight:
weight_tensor_name = op.input[1]
weight_val = inputs_vals[1]
weight_tensor = self.get_weight_from_node(weight_in_node)
weight_data = weight_tensor.numpy()
self.add_tensor_and_shape(weight_tensor_name, weight_data.shape, mace_pb2.DT_FLOAT, weight_data)
lhs_shape = self._output_shape_cache[op.input[0]]
rhs_shape = self._output_shape_cache[op.input[1]]
lhs_rank = len(lhs_shape)
rhs_rank = len(rhs_shape)
mace_check(((lhs_rank >= 2) and (rhs_rank >= 2)), 'The rank of MatMul must be >= 2, but lhs_rank = {} and rhs_rank = {} found'.format(lhs_rank, rhs_rank))
transpose_a_arg = op.arg.add()
transpose_a_arg.name = MaceKeyword.mace_transpose_a_str
transpose_a_arg.i = 0
transpose_b_arg = op.arg.add()
transpose_b_arg.name = MaceKeyword.mace_transpose_b_str
if is_weight:
transpose_b_arg.i = 1
else:
transpose_b_arg.i = 0
self.infer_shape_matmul(op)
def convert_pool(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Pooling.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
node_kind = node.kind()
idx_map = {NodeKind.AvgPool2D: AvgPool2DParamIdx, NodeKind.MaxPool2D: MaxPool2DParamIdx}
if (node_kind == NodeKind.AdaptiveAvgPool2D):
output_shape_node = inputs_vals[1].node()
output_shape_vals = list(output_shape_node.inputs())
target_output_shape = [output_shape_vals[i].node()['value'] for i in range(2)]
mace_check(((target_output_shape[0] == 1) and (target_output_shape[1] == 1)), 'only support output shape of [1, 1] for AdaptiveAvgPool2D')
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend([1, 1])
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend([0, 0])
kernels_arg = op.arg.add()
kernels_arg.name = MaceKeyword.mace_kernel_str
kernels_arg.ints.extend([0, 0])
global_pooling_arg = op.arg.add()
global_pooling_arg.name = MaceKeyword.mace_global_pooling_str
global_pooling_arg.i = 1
else:
pad_node = inputs_vals[idx_map[node_kind].pad_idx].node()
pad_vals = list(pad_node.inputs())
mace_check((len(pad_vals) == 2), 'only support 2D pooling, but {}D padding value found'.format(len(pad_vals)))
pads = [(2 * pad_vals[i].node()['value']) for i in range(2)]
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend(pads)
if (node_kind == NodeKind.MaxPool2D):
dilation_node = inputs_vals[idx_map[node_kind].dilation_idx].node()
dilation_vals = list(dilation_node.inputs())
dilations = [dilation_vals[i].node()['value'] for i in range(2)]
mace_check(((dilations[0] == 1) and (dilations[1] == 1)), 'MACE pooling does not support dilation')
kernel_node = inputs_vals[idx_map[node_kind].kernel_size_idx].node()
kernel_vals = list(kernel_node.inputs())
kernels = [kernel_vals[i].node()['value'] for i in range(2)]
kernels_arg = op.arg.add()
kernels_arg.name = MaceKeyword.mace_kernel_str
kernels_arg.ints.extend(kernels)
stride_node = inputs_vals[idx_map[node_kind].stride_idx].node()
stride_vals = list(stride_node.inputs())
strides = [stride_vals[i].node()['value'] for i in range(2)]
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(strides)
ceil_node = inputs_vals[idx_map[node_kind].ceil_mode_idx].node()
ceil_mode = bool(ceil_node['value'])
round_mode_arg = op.arg.add()
round_mode_arg.name = MaceKeyword.mace_round_mode_str
round_mode_arg.i = RoundMode.FLOOR.value
if ceil_mode:
round_mode_arg.i = RoundMode.CEIL.value
if (node_kind == NodeKind.AvgPool2D):
count_include_pad_node = inputs_vals[AvgPool2DParamIdx.count_include_pad_idx].node()
count_include_pad = bool(count_include_pad_node['value'])
if count_include_pad:
mace_check(((pads[0] == 0) and (pads[1] == 0)), 'if count_include_pad is set, pad must be zero. pad values ({},{}) found.'.format(pads[0], pads[1]))
divisor_override_node = inputs_vals[AvgPool2DParamIdx.divisor_override_idx].node()
mace_check(isinstance(divisor_override_node.output().type(), ValueType.NoneType), 'MACE does not support divisor_override parameter for AvgPool2D')
pooling_type_arg = op.arg.add()
pooling_type_arg.name = MaceKeyword.mace_pooling_type_str
pooling_type_arg.i = self.pooling_type_mode[node_kind].value
self.infer_shape_conv2d_pool(op)
def node_to_int(self, shape_node):
if (shape_node.kind() == NodeKind.Constant):
return shape_node['value']
elif (shape_node.kind() == NodeKind.Size):
input_node = list(shape_node.inputs())[0].node()
axis_node = list(shape_node.inputs())[1].node()
axis_int = self.node_to_int(axis_node)
input_tensor_shape = self._output_shape_cache[input_node.output().debugName()]
return input_tensor_shape[axis_int]
else:
input_node = list(shape_node.inputs())[0].node()
return self.node_to_int(input_node)
def infer_shape_reshape(self, op):
input_shape = self._output_shape_cache[op.input[0]]
dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str)
output_shape = list(dim_arg.ints)
product = input_size = 1
idx = (- 1)
num_minus1 = 0
for dim in input_shape:
input_size *= dim
reshape_dims = list(dim_arg.ints)
for i in range(len(reshape_dims)):
if (reshape_dims[i] == (- 1)):
idx = i
output_shape[i] = 1
num_minus1 += 1
else:
output_shape[i] = reshape_dims[i]
product *= reshape_dims[i]
mace_check((num_minus1 <= 1), 'only 0 or 1 negative shape supported')
if (idx != (- 1)):
output_shape[idx] = int((input_size / product))
self.add_output_shape(op, [output_shape])
def convert_reshape(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Reshape.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
dim_arg = op.arg.add()
dim_arg.name = MaceKeyword.mace_dim_str
shape_list_node = list(node.inputs())[1].node()
reshape_dims = []
for shape_val in shape_list_node.inputs():
shape_node = shape_val.node()
_kind = shape_node.kind()
if (_kind == NodeKind.Constant):
reshape_dims.append(shape_node['value'])
elif (_kind == NodeKind.Int):
_dim = int(self.node_to_int(shape_node))
reshape_dims.append(_dim)
else:
print('unsupported shape node kind {}'.format(_kind))
dim_arg.ints.extend(reshape_dims)
self.infer_shape_reshape(op)
def infer_shape_identity(self, op):
input_shape = self._output_shape_cache[op.input[0]]
output_shape = input_shape
self.add_output_shape(op, [output_shape])
def convert_dropout(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
training = int(inputs_vals[2].node()['value'])
mace_check((training == 0), 'for inference, dropout must be disabled')
op.type = MaceOp.Identity.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
self.infer_shape_identity(op)
def infer_shape_transpose(self, op):
input_shape = self._output_shape_cache[op.input[0]]
output_shape = np.zeros(len(input_shape), dtype=np.int32)
dims_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dims_str)
dims_ints = dims_arg.ints
for idx in range(len(dims_ints)):
output_shape[idx] = input_shape[dims_ints[idx]]
self.add_output_shape(op, [output_shape])
def convert_t(self, node, inputs_vals, outputs_vals):
if (node.output().debugName() in self.ignore_t):
return
op = self.convert_general_op(outputs_vals)
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
input_shape = self._output_shape_cache[op.input[0]]
if (len(input_shape) <= 1):
op.type = MaceOp.Identity.name
self.infer_shape_general(op)
else:
op.type = MaceOp.Transpose.name
dims_arg = op.arg.add()
dims_arg.name = MaceKeyword.mace_dims_str
dims_arg.ints.extend([1, 0])
self.infer_shape_transpose(op)
def add_tensor_and_shape(self, name, shape, data_type, value):
tensor = self._mace_net_def.tensors.add()
tensor.name = name
tensor.dims.extend(list(shape))
tensor.data_type = data_type
tensor.float_data.extend(value.flat)
self._output_shape_cache[name] = np.array(shape) |
def stitch_boxes_into_lines(boxes, max_x_dist=10, min_y_overlap_ratio=0.8):
if (len(boxes) <= 1):
return boxes
merged_boxes = []
x_sorted_boxes = sorted(boxes, key=(lambda x: np.min(x['box'][::2])))
skip_idxs = set()
i = 0
for i in range(len(x_sorted_boxes)):
if (i in skip_idxs):
continue
rightmost_box_idx = i
line = [rightmost_box_idx]
for j in range((i + 1), len(x_sorted_boxes)):
if (j in skip_idxs):
continue
if is_on_same_line(x_sorted_boxes[rightmost_box_idx]['box'], x_sorted_boxes[j]['box'], min_y_overlap_ratio):
line.append(j)
skip_idxs.add(j)
rightmost_box_idx = j
lines = []
line_idx = 0
lines.append([line[0]])
for k in range(1, len(line)):
curr_box = x_sorted_boxes[line[k]]
prev_box = x_sorted_boxes[line[(k - 1)]]
dist = (np.min(curr_box['box'][::2]) - np.max(prev_box['box'][::2]))
if (dist > max_x_dist):
line_idx += 1
lines.append([])
lines[line_idx].append(line[k])
for box_group in lines:
merged_box = {}
merged_box['text'] = ' '.join([x_sorted_boxes[idx]['text'] for idx in box_group])
(x_min, y_min) = (float('inf'), float('inf'))
(x_max, y_max) = (float('-inf'), float('-inf'))
for idx in box_group:
x_max = max(np.max(x_sorted_boxes[idx]['box'][::2]), x_max)
x_min = min(np.min(x_sorted_boxes[idx]['box'][::2]), x_min)
y_max = max(np.max(x_sorted_boxes[idx]['box'][1::2]), y_max)
y_min = min(np.min(x_sorted_boxes[idx]['box'][1::2]), y_min)
merged_box['box'] = [x_min, y_min, x_max, y_min, x_max, y_max, x_min, y_max]
merged_boxes.append(merged_box)
return merged_boxes |
class HierarchicalSoftmax(Layer):
def __init__(self, output_dim, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.output_dim = output_dim
def hshape(n):
from math import sqrt, ceil
l1 = ceil(sqrt(n))
l2 = ceil((n / l1))
return (int(l1), int(l2))
(self.n_classes, self.n_outputs_per_class) = hshape(output_dim)
super(HierarchicalSoftmax, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
input_dim = self.input_spec[0].shape[(- 1)]
self.W1 = self.init((input_dim, self.n_classes), name='{}_W1'.format(self.name))
self.b1 = K.zeros((self.n_classes,), name='{}_b1'.format(self.name))
self.W2 = self.init((self.n_classes, input_dim, self.n_outputs_per_class), name='{}_W2'.format(self.name))
self.b2 = K.zeros((self.n_classes, self.n_outputs_per_class), name='{}_b2'.format(self.name))
self.trainable_weights = [self.W1, self.b1, self.W2, self.b2]
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1], None)
def call(self, X, mask=None):
input_shape = self.input_spec[0].shape
x = K.reshape(X[0], ((- 1), input_shape[2]))
target = (X[1].flatten() if self.trainable else None)
Y = h_softmax(x, K.shape(x)[0], self.output_dim, self.n_classes, self.n_outputs_per_class, self.W1, self.b1, self.W2, self.b2, target)
output_dim = (1 if self.trainable else self.output_dim)
input_length = K.shape(X[0])[1]
y = K.reshape(Y, ((- 1), input_length, output_dim))
return y
def get_config(self):
config = {'output_dim': self.output_dim, 'init': self.init.__name__}
base_config = super(HierarchicalSoftmax, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def graph_hyperparamdist_file(filename, ymin=0, ymax=500, hpname='', gname=''):
parsed = [[], [], []]
with open(filename, 'r') as hp_dist:
r = 0
for line_raw in hp_dist:
line = line_raw.rstrip().split(',')
parsed[r] = np.array(line[1:]).astype(float)
r += 1
graph_mean_and_std(categories=parsed[0], means=parsed[1], stds=parsed[2], ymin=ymin, ymax=ymax, xaxis=hpname, filename=gname) |
def setup_camera(camera_parameters, camera_scale):
bpy.data.objects['Camera'].location = (0, 0, 0)
bpy.data.objects['Camera'].rotation_euler = (0, pi, pi)
width = (camera_scale * camera_parameters['width'])
height = (camera_scale * camera_parameters['height'])
f = ((camera_scale * (camera_parameters['f_x'] + camera_parameters['f_y'])) / 2.0)
p_x = (camera_scale * camera_parameters['p_x'])
p_y = (camera_scale * camera_parameters['p_y'])
camera = bpy.data.cameras['Camera']
camera.lens = 1
camera.sensor_width = (width / f)
camera.shift_x = (0.5 - (p_x / width))
camera.shift_y = ((p_y - (0.5 * height)) / width) |
class CoNLL03Reader(object):
def __init__(self, file_path, word_alphabet, char_alphabet, pos_alphabet, chunk_alphabet, ner_alphabet):
self.__source_file = open(file_path, 'r')
self.__word_alphabet = word_alphabet
self.__char_alphabet = char_alphabet
self.__pos_alphabet = pos_alphabet
self.__chunk_alphabet = chunk_alphabet
self.__ner_alphabet = ner_alphabet
def close(self):
self.__source_file.close()
def getNext(self, normalize_digits=True):
line = self.__source_file.readline()
while ((len(line) > 0) and (len(line.strip()) == 0)):
line = self.__source_file.readline()
if (len(line) == 0):
return None
lines = []
while (len(line.strip()) > 0):
line = line.strip()
lines.append(line.split(' '))
line = self.__source_file.readline()
length = len(lines)
if (length == 0):
return None
words = []
word_ids = []
char_seqs = []
char_id_seqs = []
postags = []
pos_ids = []
chunk_tags = []
chunk_ids = []
ner_tags = []
ner_ids = []
for tokens in lines:
chars = []
char_ids = []
for char in tokens[1]:
chars.append(char)
char_ids.append(self.__char_alphabet.get_index(char))
if (len(chars) > MAX_CHAR_LENGTH):
chars = chars[:MAX_CHAR_LENGTH]
char_ids = char_ids[:MAX_CHAR_LENGTH]
char_seqs.append(chars)
char_id_seqs.append(char_ids)
word = (DIGIT_RE.sub('0', tokens[1]) if normalize_digits else tokens[1])
pos = tokens[2]
chunk = tokens[3]
ner = tokens[4]
words.append(word)
word_ids.append(self.__word_alphabet.get_index(word))
postags.append(pos)
pos_ids.append(self.__pos_alphabet.get_index(pos))
chunk_tags.append(chunk)
chunk_ids.append(self.__chunk_alphabet.get_index(chunk))
ner_tags.append(ner)
ner_ids.append(self.__ner_alphabet.get_index(ner))
return NERInstance(Sentence(words, word_ids, char_seqs, char_id_seqs), postags, pos_ids, chunk_tags, chunk_ids, ner_tags, ner_ids) |
class UBase(Gate):
def __init__(self, theta, phi, lam):
super().__init__('U', 1, [theta, phi, lam])
def inverse(self):
return UBase((- self.params[0]), (- self.params[2]), (- self.params[1]))
def to_matrix(self):
(theta, phi, lam) = self.params
return numpy.array([[numpy.cos((theta / 2)), ((- numpy.exp((1j * lam))) * numpy.sin((theta / 2)))], [(numpy.exp((1j * phi)) * numpy.sin((theta / 2))), (numpy.exp((1j * (phi + lam))) * numpy.cos((theta / 2)))]], dtype=complex) |
def is_valid_action(state: State, action: chex.Array) -> chex.Array:
return (state.board[tuple(action)] == UNEXPLORED_ID) |
def custom_scorer(net, ds, y=None):
output = net.predict_proba(ds)
if (output.shape[1] > 1):
probas = torch.softmax(torch.Tensor(output), dim=1)
preds = probas.argmax(dim=1)
else:
probas = torch.sigmoid(torch.Tensor(output))
preds = torch.round(probas)
score = accuracy_score(preds, y)
return score |
class DepthwiseConv1d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, padding: int=0, bias: bool=False) -> None:
super(DepthwiseConv1d, self).__init__()
assert ((out_channels % in_channels) == 0), 'out_channels should be constant multiple of in_channels'
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=in_channels, stride=stride, padding=padding, bias=bias)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs) |
class VGG(nn.Module):
def __init__(self, features):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(LIFSpike(), tdLayer(nn.Linear(512, 512)), LIFSpike(), tdLayer(nn.Linear(512, 512)), LIFSpike(), tdLayer(nn.Linear(512, 10)), LIFSpike())
(self.steps, _, _) = get_snn_param()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.shape[0], (- 1), x.shape[4])
x = self.classifier(x)
out = (torch.sum(x, dim=2) / self.steps)
return out |
def polar_position(r, theta, start_point):
x = (r * math.cos(theta))
y = (r * math.sin(theta))
return (np.array([x, y]) + start_point) |
def sizeof_fmt(size, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if (abs(size) < 1024.0):
return f'{size:3.1f} {unit}{suffix}'
size /= 1024.0
return f'{size:3.1f} Y{suffix}' |
class VAE(nn.Module):
def __init__(self, x_shape, prior=args.prior):
super().__init__()
self.x_shape = x_shape
self.z_dim = args.z_dim
self.z_shape = get_shape(self.z_dim)
self.p_z = globals()[prior](self.z_shape)
self.q_z = q_z(self.z_shape, self.x_shape)
self.p_x = p_x(self.x_shape, self.z_shape)
self.recon_loss = partial(dmol_loss, nc=self.x_shape[0])
self.sample_distribution = partial(sample_from_dmol, nc=self.x_shape[0])
def initialize(self, dataloader):
with torch.no_grad():
(x, _) = next(iter(dataloader))
x = x.to(args.device)
output = self.forward(x)
self.calculate_elbo(x, output)
return
def reparameterize(z_mu, z_logvar):
epsilon = torch.randn_like(z_mu)
return (z_mu + (torch.exp((0.5 * z_logvar)) * epsilon))
_grad()
def generate(self, n_samples=args.n_samples):
z = self.p_z.sample(z_shape=self.z_shape, n_samples=n_samples, device=args.device).to(args.device)
x_logits = self.p_x(z)
x_hat = self.sample_distribution(x_logits, random_sample=False)
return x_hat
_grad()
def reconstruct(self, x, **kwargs):
x_logits = self.forward(x).get('x_logits')
x_hat = self.sample_distribution(x_logits, random_sample=False)
return x_hat
def calculate_elbo(self, input, outputs):
(x, x_logits) = (input, outputs.get('x_logits'))
(z_q, z_q_mean, z_q_logvar) = (outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar'))
RE = (- self.recon_loss(x, x_logits).mean())
log_p_z = self.p_z.log_p(z_q)
log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar)
KL = (log_q_z - log_p_z).mean()
nelbo = (RE + KL)
diagnostics = {'bpd': (nelbo.item() / (np.prod(x.shape[1:]) * np.log(2.0))), 'nelbo': nelbo.item(), 'RE': RE.mean(dim=0).item(), 'KL': KL.mean(dim=0).item()}
return (nelbo, diagnostics)
def forward(self, x, **kwargs):
(z_q_mean, z_q_logvar) = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
x_logits = self.p_x(z_q)
return {'z_q': z_q, 'z_q_mean': z_q_mean, 'z_q_logvar': z_q_logvar, 'x_logits': x_logits} |
_registry(pattern_type='QuantizeFusion')
class QuantizeFusion(Pattern):
def __call__(self, model):
def search_quant_fusion(node):
if (node.input_tensors[0].source_op == []):
return (None, False)
pre_node = model.get_node_by_name(node.input_tensors[0].source_op[0])
if (pre_node.input_tensors == []):
return (None, False)
is_from_quant = False
if pre_node.input_tensors[0].source_op:
try:
is_from_quant = (True if (model.get_node_by_name(pre_node.input_tensors[0].source_op[0]).op_type == 'Quantize') else False)
except:
is_from_quant = False
if (((pre_node.input_tensors[0].name in quant_info) and (len(pre_node.input_tensors) >= 6)) or (pre_node.op_type == 'Softmax') or ((EXECUTOR_TYPE.get(pre_node.op_type, pre_node.op_type) in ['InnerProduct', 'Matmul']) and ((not quant_info) or is_from_quant))):
return (pre_node, True)
elif (pre_node.op_type == 'Reshape'):
return search_quant_fusion(pre_node)
else:
return (None, False)
quant_info = util.get_quant_info()
if (model.inquire_config_item('framework') == 'torch'):
if (not quant_info):
return model
remove_node_name = []
for node in model.nodes:
if (node.op_type == 'Quantize'):
dtype = node.attr['output_dtype']
(quant_node, can_fuse) = search_quant_fusion(node)
if can_fuse:
if ((dtype == 'u8') or (dtype == 's8')):
if (quant_node.op_type == 'Softmax'):
def is_lat_model(model, p=None):
if (p == None):
p = [[(0, 'TopK'), (1, 'GatherElements')]]
match_result = util.search_pattern(p, model)
return (len(match_result) != 0)
if is_lat_model(model):
node.attr = OrderedDict({'output_dtype': 'u8'})
continue
else:
model.change_node_input_tensors(quant_node.name, 1, node.input_tensors[1], 'insert')
model.change_node_input_tensors(quant_node.name, 2, node.input_tensors[2], 'insert')
quant_node.attr['output_dtype'] = 'u8'
else:
if (model.inquire_config_item('framework') == 'torch'):
t_len = len(quant_node.input_tensors)
model.change_node_input_tensors(quant_node.name, t_len, node.input_tensors[1], 'insert')
model.change_node_input_tensors(quant_node.name, (t_len + 1), node.input_tensors[2], 'insert')
else:
model.change_node_input_tensors(quant_node.name, (- 2), node.input_tensors[1], 'modify')
model.change_node_input_tensors(quant_node.name, (- 1), node.input_tensors[2], 'modify')
quant_node.attr['output_dtype'] = node.attr['output_dtype']
elif (dtype == 'bf16'):
quant_node.attr['output_dtype'] = dtype
for dst_node_name in node.output_tensors[0].dest_op:
dst_node = model.get_node_by_name(dst_node_name)
for (idx, input_tensor) in enumerate(dst_node.input_tensors):
if (node.output_tensors[0].name == input_tensor.name):
model.change_node_input_tensors(dst_node_name, idx, node.input_tensors[0], 'modify')
remove_node_name.append(node.name)
model.remove_nodes(remove_node_name)
return model |
class FactorGNNSBMs(nn.Module):
def __init__(self, g, num_layers, in_dim, num_hidden, num_latent, feat_drop, residual, n_cls=2):
super(FactorGNNSBMs, self).__init__()
self.g = g
self.layers = nn.ModuleList()
self.BNs = nn.ModuleList()
self.feat_drop = feat_drop
self.activate = torch.nn.LeakyReLU(negative_slope=0.2)
self.embed = nn.Embedding(200, in_dim)
self.layers.append(DisentangleLayer(num_latent, in_dim, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(DisentangleLayer(max((num_latent // 2), 1), num_hidden, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(DisentangleLayer(max((num_latent // 2), 1), num_hidden, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(None)
self.BNs.append(None)
self.BN1 = nn.BatchNorm1d(num_hidden)
self.classifier1 = nn.Linear(num_hidden, (num_hidden // 2))
self.classifier2 = nn.Linear((num_hidden // 2), n_cls)
def forward(self, x, e, snorm_n, snorm_e):
feat = self.embed(x)
for (layer, bn) in zip(self.layers[:(- 1)], self.BNs[:(- 1)]):
feat_prim = feat
feat = torch_fn.dropout(feat, self.feat_drop)
feat = layer(self.g, feat)
feat = (feat * snorm_n)
feat = bn(feat)
feat = self.activate(feat)
feat = torch_fn.dropout(feat, self.feat_drop)
h = feat
h = torch.relu(h)
h = self.classifier1(h)
h = torch.relu(h)
h = self.classifier2(h)
return h
def get_factor(self):
factor_list = []
for layer in self.layers:
if isinstance(layer, DisentangleLayer):
factor_list.append(layer.get_factor())
return factor_list
def compute_disentangle_loss(self):
loss_list = []
for layer in self.layers:
if isinstance(layer, DisentangleLayer):
loss_list.append(layer.compute_disentangle_loss())
return loss_list
def merge_loss(list_loss):
total_loss = 0
for loss in list_loss:
(discrimination_loss, distribution_loss) = (loss[0], loss[1])
total_loss += discrimination_loss
return total_loss |
class CPUinfo():
def __init__(self):
self.cores = 0
self.sockets = 0
self.cpuinfo = []
if (platform.system() == 'Windows'):
raise RuntimeError('Windows platform is not supported!!!')
elif (platform.system() == 'Linux'):
args = ['lscpu']
lscpu_info = subprocess.check_output(args, universal_newlines=True).split('\n')
for line in lscpu_info:
int_re = re.compile('\\d+')
if (line.find('Core(s) per socket:') >= 0):
core_per_socket_int = [int(i) for i in int_re.findall(line)]
self.cores = core_per_socket_int[0]
elif (line.find('Socket(s):') >= 0):
socket_int = [int(i) for i in int_re.findall(line)]
self.sockets = socket_int[0]
def get_cores_per_socket(self):
return self.cores
def get_sockets(self):
return self.sockets |
def partition_data(datadir, partition, n_nets, alpha, logger):
logger.info('partition data')
(X_train, y_train, X_test, y_test) = load_cifar10_data(datadir)
n_train = X_train.shape[0]
if (partition == 'homo'):
total_num = n_train
idxs = np.random.permutation(total_num)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
elif (partition == 'hetero'):
min_size = 0
K = 10
N = y_train.shape[0]
logger.info(('N = ' + str(N)))
net_dataidx_map = {}
while (min_size < 10):
idx_batch = [[] for _ in range(n_nets)]
for k in range(K):
idx_k = np.where((y_train == k))[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
proportions = np.array([(p * (len(idx_j) < (N / n_nets))) for (p, idx_j) in zip(proportions, idx_batch)])
proportions = (proportions / proportions.sum())
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:(- 1)]
idx_batch = [(idx_j + idx.tolist()) for (idx_j, idx) in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif (partition == 'dir'):
n_client = n_nets
n_cls = 10
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.random.dirichlet(alpha=([alpha] * n_cls), size=n_client)
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
elif (partition == 'n_cls'):
n_client = n_nets
n_cls = 10
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.zeros(shape=(n_client, n_cls))
for i in range(n_client):
cls_priors[i][random.sample(range(n_cls), int(alpha))] = (1.0 / alpha)
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
cls_amount[cls_label] = np.random.randint(0, len(idx_list[cls_label]))
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
elif (partition == 'my_part'):
n_shards = alpha
n_client = n_nets
n_cls = 10
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.zeros(shape=(n_client, n_cls))
cls_priors_tmp = np.random.dirichlet(alpha=([0.3] * n_cls), size=int(n_shards))
for i in range(n_client):
cls_priors[i] = cls_priors_tmp[int((i / n_shards))]
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
cls_amount[cls_label] = np.random.randint(0, len(idx_list[cls_label]))
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logger)
return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts) |
def reduce_loss(loss, reduction):
reduction_enum = F._Reduction.get_enum(reduction)
if (reduction_enum == 0):
return loss
elif (reduction_enum == 1):
return loss.mean()
elif (reduction_enum == 2):
return loss.sum() |
def process_triples(mtriples):
nodes = []
for m in mtriples:
ms = m.firstChild.nodeValue
ms = ms.strip().split(' | ')
n1 = ms[0]
n2 = ms[2]
nodes1 = get_nodes(n1)
nodes2 = get_nodes(n2)
edge = get_relation(ms[1])
edge_split = camel_case_split(edge)
edges = ' '.join(edge_split)
nodes.append('<H>')
nodes.extend(nodes1.split())
nodes.append('<R>')
nodes.extend(edges.split())
nodes.append('<T>')
nodes.extend(nodes2.split())
return nodes |
class Pandaset(BaseDataset):
def __init__(self, dataset_path, name='Pandaset', cache_dir='./logs/cache', use_cache=False, ignored_label_inds=[], test_result_folder='./logs/test_log', test_split=['115', '116', '117', '119', '120', '124', '139', '149', '158'], training_split=['001', '002', '003', '005', '011', '013', '015', '016', '017', '019', '021', '023', '024', '027', '028', '029', '030', '032', '033', '034', '035', '037', '038', '039', '040', '041', '042', '043', '044', '046', '052', '053', '054', '056', '057', '058', '064', '065', '066', '067', '070', '071', '072', '073', '077', '078', '080', '084', '088', '089', '090', '094', '095', '097', '098', '101', '102', '103', '105', '106', '109', '110', '112', '113'], validation_split=['122', '123'], all_split=['001', '002', '003', '005', '011', '013', '015', '016', '017', '019', '021', '023', '024', '027', '028', '029', '030', '032', '033', '034', '035', '037', '038', '039', '040', '041', '042', '043', '044', '046', '052', '053', '054', '056', '057', '058', '064', '065', '066', '067', '069', '070', '071', '072', '073', '077', '078', '080', '084', '088', '089', '090', '094', '095', '097', '098', '101', '102', '103', '105', '106', '109', '110', '112', '113', '115', '116', '117', '119', '120', '122', '123', '124', '139', '149', '158'], **kwargs):
super().__init__(dataset_path=dataset_path, name=name, cache_dir=cache_dir, use_cache=use_cache, ignored_label_inds=ignored_label_inds, test_result_folder=test_result_folder, test_split=test_split, training_split=training_split, validation_split=validation_split, all_split=all_split, **kwargs)
cfg = self.cfg
self.label_to_names = self.get_label_to_names()
self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for (k, v) in self.label_to_names.items()])
def get_label_to_names():
label_to_names = {1: 'Reflection', 2: 'Vegetation', 3: 'Ground', 4: 'Road', 5: 'Lane Line Marking', 6: 'Stop Line Marking', 7: 'Other Road Marking', 8: 'Sidewalk', 9: 'Driveway', 10: 'Car', 11: 'Pickup Truck', 12: 'Medium-sized Truck', 13: 'Semi-truck', 14: 'Towed Object', 15: 'Motorcycle', 16: 'Other Vehicle - Construction Vehicle', 17: 'Other Vehicle - Uncommon', 18: 'Other Vehicle - Pedicab', 19: 'Emergency Vehicle', 20: 'Bus', 21: 'Personal Mobility Device', 22: 'Motorized Scooter', 23: 'Bicycle', 24: 'Train', 25: 'Trolley', 26: 'Tram / Subway', 27: 'Pedestrian', 28: 'Pedestrian with Object', 29: 'Animals - Bird', 30: 'Animals - Other', 31: 'Pylons', 32: 'Road Barriers', 33: 'Signs', 34: 'Cones', 35: 'Construction Signs', 36: 'Temporary Construction Barriers', 37: 'Rolling Containers', 38: 'Building', 39: 'Other Static Object'}
return label_to_names
def get_split(self, split):
return PandasetSplit(self, split=split)
def get_split_list(self, split):
cfg = self.cfg
dataset_path = cfg.dataset_path
file_list = []
if (split in ['train', 'training']):
seq_list = cfg.training_split
elif (split in ['test', 'testing']):
seq_list = cfg.test_split
elif (split in ['val', 'validation']):
seq_list = cfg.validation_split
elif (split in ['all']):
seq_list = cfg.all_split
else:
raise ValueError('Invalid split {}'.format(split))
for seq_id in seq_list:
pc_path = join(dataset_path, seq_id, 'lidar')
for f in np.sort(os.listdir(pc_path)):
if (f.split('.')[(- 1)] == 'gz'):
file_list.append(join(pc_path, f))
return file_list
def is_tested(self, attr):
pass
def save_test_result(self, results, attr):
cfg = self.cfg
pred = results['predict_labels']
name = attr['name']
test_path = join(cfg.test_result_folder, 'sequences')
make_dir(test_path)
save_path = join(test_path, name, 'predictions')
make_dir(save_path)
pred = results['predict_labels']
for ign in cfg.ignored_label_inds:
pred[(pred >= ign)] += 1
store_path = join(save_path, (name + '.label'))
pred = pred.astype(np.uint32)
pred.tofile(store_path) |
class ProjectWidget():
def __init__(self, viz):
self.viz = viz
self.search_dirs = []
self.project_path = ''
self.browse_cache = dict()
self.browse_refocus = False
self.P = None
self.slide_paths = []
self.model_paths = []
self.slide_idx = 0
self.model_idx = 0
self.content_height = 0
self._show_welcome = False
def load(self, project, ignore_errors=False):
viz = self.viz
viz.clear_result()
viz.skip_frame()
if (project == ''):
viz.result = EasyDict(message='No project loaded')
return
try:
self.project_path = project
viz.defer_rendering()
sf.log.debug('Loading project at {}...'.format(project))
self.P = sf.Project(project)
self.slide_paths = sorted(self.P.dataset().slide_paths())
self.viz.create_toast(f'Loaded project at {project}', icon='success')
except Exception:
self.project_path = project
self.viz.create_toast(f'Unable to load project at {project}', icon='error')
viz.result = EasyDict(error=CapturedException())
if (not ignore_errors):
raise
def recursive_model_scan(self):
viz = self.viz
def recurse(parents, dryrun=False):
key = tuple(parents)
items = self.browse_cache.get(key, None)
if (items is None):
items = self._list_runs_and_models(parents)
self.browse_cache[key] = items
has_model = False
recurse_checks = []
for item in items:
if (item.type == 'run'):
_recurse_has_models = recurse([item.path], dryrun=True)
recurse_checks.append(_recurse_has_models)
if (_recurse_has_models and (not dryrun) and imgui.tree_node(item.name)):
recurse([item.path])
imgui.tree_pop()
if (item.type == 'model'):
has_model = True
if (not dryrun):
(clicked, _state) = imgui.menu_item(item.name)
if clicked:
self.viz.load_model(item.path)
return (any(recurse_checks) or has_model)
result = recurse([self.P.models_dir])
if self.browse_refocus:
imgui.set_scroll_here()
viz.skip_frame()
self.browse_refocus = False
return result
def _list_runs_and_models(self, parents):
items = []
run_regex = re.compile('\\d+-.*')
params_regex = re.compile('params\\.json')
zip_regex = re.compile('.*\\.zip')
for parent in set(parents):
if os.path.isdir(parent):
for entry in os.scandir(parent):
if (entry.is_dir() and run_regex.fullmatch(entry.name)):
items.append(EasyDict(type='run', name=entry.name, path=os.path.join(parent, entry.name)))
elif entry.is_dir():
for model_file in os.scandir(os.path.join(parent, entry.name)):
if (model_file.is_file() and params_regex.fullmatch(model_file.name)):
items.append(EasyDict(type='model', name=entry.name, path=os.path.join(parent, entry.name)))
elif (entry.is_file() and zip_regex.fullmatch(entry.name)):
items.append(EasyDict(type='model', name=entry.name, path=os.path.join(parent, entry.name)))
items = sorted(items, key=(lambda item: (item.name.replace('_', ' '), item.path)))
return items
def draw_slide_list(self):
for path in self.slide_paths:
if imgui.menu_item(imgui_utils.ellipsis_clip(sf.util.path_to_name(path), 33))[0]:
self.viz.load_slide(path)
if imgui.is_item_hovered():
imgui.set_tooltip(path)
def draw_info(self):
viz = self.viz
config = viz._model_config
imgui.text_colored('Name', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
with imgui_utils.clipped_with_tooltip(self.P.name, 22):
imgui.text(imgui_utils.ellipsis_clip(self.P.name, 22))
imgui.text_colored('Path', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
with imgui_utils.clipped_with_tooltip(self.P.root, 22):
imgui.text(imgui_utils.ellipsis_clip(self.P.root, 22))
imgui.text_colored('Annotations', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
imgui.text(imgui_utils.ellipsis_clip(basename(self.P.annotations), 22))
if imgui.is_item_hovered():
imgui.set_tooltip(self.P.annotations)
imgui.text_colored('Dataset config', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
imgui.text(imgui_utils.ellipsis_clip(basename(self.P.dataset_config), 22))
if imgui.is_item_hovered():
imgui.set_tooltip(self.P.dataset_config)
imgui.text_colored('Sources', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
source_str = str(self.P.sources)
with imgui_utils.clipped_with_tooltip(source_str, 22):
imgui.text(imgui_utils.ellipsis_clip(source_str, 22))
imgui.text_colored('Slides', *viz.theme.dim)
imgui.same_line((viz.font_size * 6))
imgui.text(str(len(self.slide_paths)))
imgui_utils.vertical_break()
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
if show:
if (self.P is None):
viz.header('Project')
if (self.P is not None):
with viz.header_with_buttons('Project'):
imgui.same_line((imgui.get_content_region_max()[0] - (viz.font_size * 1.5)))
(cx, cy) = imgui.get_cursor_pos()
imgui.set_cursor_position((cx, (cy - int((viz.font_size * 0.25)))))
if viz.sidebar.small_button('refresh'):
self.load(self.project_path)
if (show and (self.P is None)):
imgui_utils.padded_text('No project has been loaded.', vpad=[int((viz.font_size / 2)), int(viz.font_size)])
if viz.sidebar.full_button('Load a Project'):
viz.ask_load_project()
elif show:
if viz.collapsing_header('Info', default=True):
self.draw_info()
if viz.collapsing_header('Slides', default=False):
if (not len(self.slide_paths)):
imgui_utils.padded_text('No slides found.', vpad=[int((viz.font_size / 2)), int(viz.font_size)])
else:
self.draw_slide_list()
if viz.collapsing_header('Models', default=False):
if (not self.recursive_model_scan()):
imgui_utils.padded_text('No models found.', vpad=[int((viz.font_size / 2)), int(viz.font_size)]) |
def test_simple_creation() -> None:
tensor = tf.constant(np.random.rand(3, 2, 3))
box_tensor = TFSigmoidBoxTensor(tensor)
assert (tensor.numpy() == box_tensor.data.numpy()).all()
assert isinstance(box_tensor, TFBoxTensor)
tensor = tf.constant(np.random.rand(2, 10))
box_tensor = TFSigmoidBoxTensor(tensor)
assert (tensor.numpy() == box_tensor.data.numpy()).all()
assert isinstance(box_tensor, TFBoxTensor) |
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if (data_type in ('mot', 'lab')):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore) |
def interpolation_str2int(interpolation):
if isinstance(interpolation, (list, tuple)):
return [interpolation_str2int(i) for i in interpolation]
if (interpolation == 'cubic'):
return cv2.INTER_CUBIC
elif (interpolation == 'linear'):
return cv2.INTER_LINEAR
elif (interpolation == 'nearest'):
return cv2.INTER_NEAREST
else:
raise RuntimeError(f'Unknown interpolation type: "{interpolation}"') |
class TFCTRLModelTester(object):
def __init__(self, parent):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = 'gelu'
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = (self.vocab_size - 1)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFCTRLModel(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
inputs = [input_ids, None, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_ctrl_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFCTRLLMHeadModel(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'labels': sequence_labels}
model = TFCTRLForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
class cassieRLEnvStepInPlace(cassieRLEnvDelay):
def __init__(self):
self.sim = CassieSim()
self.vis = CassieVis()
self.observation_space = np.zeros(80)
self.action_space = np.zeros(10)
with open('step_in_place_trajectory', 'rb') as fp:
self.trajectory = pickle.load(fp)
self.P = np.array([100, 100, 88, 96, 50, 100, 100, 88, 96, 50])
self.D = np.array([10.0, 10.0, 8.0, 9.6, 5.0, 10.0, 10.0, 8.0, 9.6, 5.0])
self.u = pd_in_t()
self.time = 0
self.phase = 0
self.counter = 0
self.time_limit = 400
self.state_buffer = []
self.buffer_size = 150
self.delay = True
def step_simulation(self, action):
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
self.state_buffer.append((qpos, qvel))
if (len(self.state_buffer) > self.buffer_size):
self.state_buffer.pop(0)
pos_index = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
vel_index = [6, 7, 8, 12, 18, 19, 20, 21, 25, 31]
(ref_pos, ref_vel) = self.get_kin_next_state()
target = (action + ref_pos[pos_index])
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.torque[i] = 0
self.u.rightLeg.motorPd.pTarget[i] = target[(i + 5)]
self.u.rightLeg.motorPd.pGain[i] = self.P[(i + 5)]
self.u.rightLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dGain[i] = self.D[(i + 5)]
self.sim.step_pd(self.u)
def get_state(self):
if ((len(self.state_buffer) >= 80) and self.delay):
random_index = random.randint(0, 20)
state = self.state_buffer[random_index]
qpos = np.copy(state[0])
qvel = np.copy(state[1])
else:
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
(ref_pos, ref_vel) = self.get_kin_next_state()
pos_index = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30, 34])
vel_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31])
return np.concatenate([qpos[pos_index], qvel[vel_index], ref_pos[pos_index], ref_vel[vel_index]])
def get_kin_state(self):
pose = np.copy(self.trajectory[self.phase][0])
vel = np.copy(self.trajectory[self.phase][1])
return (pose, vel)
def get_kin_next_state(self):
phase = (self.phase + 1)
if (phase >= 28):
phase = 0
pose = np.copy(self.trajectory[self.phase][0])
vel = np.copy(self.trajectory[self.phase][1])
return (pose, vel) |
.parametrize('with_attention', [True, False])
.parametrize('quantization_setup', [{'numeric2': [0.0, 50.0, 100.0]}, None])
def test_chunk_tab_preprocessor_with_params(with_attention, quantization_setup):
df = pd.read_csv(os.path.join(data_folder, fname))
tab_processor = TabPreprocessor(cat_embed_cols=cat_cols, continuous_cols=num_cols, cols_to_scale=['numeric1'], with_attention=with_attention, with_cls_token=with_attention, quantization_setup=quantization_setup)
X_tab = tab_processor.fit_transform(df)
chunk_tab_processor = ChunkTabPreprocessor(n_chunks=n_chunks, cat_embed_cols=cat_cols, continuous_cols=num_cols, cols_to_scale=['numeric1'], with_attention=with_attention, with_cls_token=with_attention, cols_and_bins=quantization_setup)
for chunk in pd.read_csv(os.path.join(data_folder, fname), chunksize=chunksize):
chunk_tab_processor.partial_fit(chunk)
X_tab_chunk = chunk_tab_processor.transform(df)
reconstruced_df_chunk = chunk_tab_processor.inverse_transform(X_tab_chunk)
reconstruced_df = tab_processor.inverse_transform(X_tab)
assert reconstruced_df.equals(reconstruced_df_chunk) |
_grad()
def validation_one_epoch(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Val:'
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* {top1.global_avg:.3f} {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
class Renderer(object):
def ax_zoomable(ax):
return bool((ax and ax.get_navigate()))
def ax_has_xgrid(ax):
return bool((ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines()))
def ax_has_ygrid(ax):
return bool((ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines()))
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
def draw_figure(self, fig, props):
if (hasattr(self, '_current_fig') and (self._current_fig is not None)):
warnings.warn('figure embedded in figure: something is wrong')
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
(yield)
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
def draw_axes(self, ax, props):
if (hasattr(self, '_current_ax') and (self._current_ax is not None)):
warnings.warn('axes embedded in axes: something is wrong')
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
(yield)
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
(yield)
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
def open_figure(self, fig, props):
pass
def close_figure(self, fig):
pass
def open_axes(self, ax, props):
pass
def close_axes(self, ax):
pass
def open_legend(self, legend, props):
pass
def close_legend(self, legend):
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle, label, mplobj=None):
if (linestyle is not None):
self.draw_line(data, coordinates, linestyle, label, mplobj)
if (markerstyle is not None):
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
pathcodes = (['M'] + ((data.shape[0] - 1) * ['L']))
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates, pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
def _iter_path_collection(paths, path_transforms, offsets, styles):
N = max(len(paths), len(offsets))
if (not path_transforms):
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if (np.size(edgecolor) == 0):
edgecolor = ['none']
facecolor = styles['facecolor']
if (np.size(facecolor) == 0):
facecolor = ['none']
elements = [paths, path_transforms, offsets, edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None):
if (offset_order == 'before'):
raise NotImplementedError('offset before transform')
for tup in self._iter_path_collection(paths, path_transforms, offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
(vertices, pathcodes) = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
if (path_coordinates == 'figure'):
path_coordinates = 'points'
style = {'edgecolor': utils.color_to_hex(ec), 'facecolor': utils.color_to_hex(fc), 'edgewidth': lw, 'dasharray': '10,0', 'alpha': styles['alpha'], 'zorder': styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
(vertices, pathcodes) = style['markerpath']
pathstyle = dict(((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']))
pathstyle['dasharray'] = '10,0'
for vertex in data:
self.draw_path(data=vertices, coordinates='points', pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj)
def draw_text(self, text, position, coordinates, style, text_type=None, mplobj=None):
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style, offset=None, offset_coordinates='data', mplobj=None):
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
raise NotImplementedError() |
def postprocess_results(dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target):
def to_np(x):
return (None if (x is None) else x.detach().cpu().numpy())
sample_ids = [dataset.ids[i] for i in sample['id'].tolist()]
texts = (sample['src_texts'] if ('src_texts' in sample) else ([''] * len(hypos)))
attns = [to_np(hypo['attn']) for hypo in hypos]
eos_probs = [to_np(hypo.get('eos_prob', None)) for hypo in hypos]
feat_preds = [to_np(hypo['feature']) for hypo in hypos]
wave_preds = [to_np(resample_fn(h['waveform'])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo['targ_feature']) for hypo in hypos]
wave_targs = [to_np(resample_fn(h['targ_waveform'])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds, feat_targs, wave_targs) |
class AstronomicalObject():
def __init__(self, **kwargs):
self.dec = kwargs.get('dec')
if (self.dec is None):
raise AstronomicalObjectError("Error constructing AstronomicalObject. Missing variable 'dec'")
self.los_id = kwargs.get('los_id')
if (self.los_id is None):
raise AstronomicalObjectError("Error constructing AstronomicalObject. Missing variable 'los_id'")
self.ra = kwargs.get('ra')
if (self.ra is None):
raise AstronomicalObjectError("Error constructing AstronomicalObject. Missing variable 'ra'")
self.z = kwargs.get('z')
if (self.z is None):
raise AstronomicalObjectError("Error constructing AstronomicalObject. Missing variable 'z'")
self.healpix = healpy.ang2pix(16, ((np.pi / 2) - self.dec), self.ra)
def __gt__(self, other):
is_greater = False
if (self.healpix > other.healpix):
is_greater = True
elif (self.healpix == other.healpix):
if (self.ra > other.ra):
is_greater = True
if (self.ra == other.ra):
if (self.dec > other.dec):
is_greater = True
if (self.dec == other.dec):
if (self.z > other.z):
is_greater = True
return is_greater
def __eq__(self, other):
return ((self.healpix == other.healpix) and (self.ra == other.ra) and (self.dec == other.dec) and (self.z == other.z))
def get_header(self):
header = [{'name': 'LOS_ID', 'value': self.los_id, 'comment': 'Picca line-of-sight id'}, {'name': 'RA', 'value': self.ra, 'comment': 'Right Ascension [rad]'}, {'name': 'DEC', 'value': self.dec, 'comment': 'Declination [rad]'}, {'name': 'Z', 'value': self.z, 'comment': 'Redshift'}]
return header
def get_metadata(self):
return [self.los_id, self.ra, self.dec, self.z]
def get_metadata_dtype(cls):
return [('LOS_ID', int), ('RA', float), ('DEC', float), ('Z', float)]
def get_metadata_units(cls):
return ['', 'rad', 'rad', ''] |
def encode_image(model, processor, image_url: str, device='cpu'):
import requests
from io import BytesIO
import torch
from PIL import Image
response = requests.get(image_url)
image = Image.open(BytesIO(response.content))
with torch.no_grad():
photo_preprocessed = processor(text=None, images=image, return_tensors='pt', padding=True)['pixel_values']
search_photo_feature = model.get_image_features(photo_preprocessed.to(device))
search_photo_feature /= search_photo_feature.norm(dim=(- 1), keepdim=True)
return search_photo_feature.cpu().numpy() |
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
d = [m.group(0) for m in matches]
new_d = []
for token in d:
token = token.replace('(', '')
token_split = token.split('_')
for t in token_split:
new_d.append(t)
return new_d |
def drop_blocks(drop_block_rate=0.0):
return [None, None, (DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None), (DropBlock2d(drop_block_rate, 3, 1.0) if drop_block_rate else None)] |
class Av2DataModule(LightningDataModule):
def __init__(self, data_root: str, data_folder: str, train_batch_size: int=32, val_batch_size: int=32, test_batch_size: int=32, shuffle: bool=True, num_workers: int=8, pin_memory: bool=True, test: bool=False):
super(Av2DataModule, self).__init__()
self.data_root = Path(data_root)
self.data_folder = data_folder
self.batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.test_batch_size = test_batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.pin_memory = pin_memory
self.test = test
def setup(self, stage: Optional[str]=None) -> None:
if (not self.test):
self.train_dataset = Av2Dataset(data_root=(self.data_root / self.data_folder), cached_split='train')
self.val_dataset = Av2Dataset(data_root=(self.data_root / self.data_folder), cached_split='val')
else:
self.test_dataset = Av2Dataset(data_root=(self.data_root / self.data_folder), cached_split='test')
def train_dataloader(self):
return TorchDataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=self.shuffle, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=collate_fn)
def val_dataloader(self):
return TorchDataLoader(self.val_dataset, batch_size=self.val_batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=collate_fn)
def test_dataloader(self):
return TorchDataLoader(self.test_dataset, batch_size=self.test_batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=self.pin_memory, collate_fn=collate_fn) |
def check_Xs(Xs, multiview=False, enforce_views=None, copy=False, return_dimensions=False):
if (not isinstance(Xs, list)):
if (not isinstance(Xs, np.ndarray)):
msg = f'If not list, input must be of type np.ndarray, not {type(Xs)}'
raise ValueError(msg)
if (Xs.ndim == 2):
Xs = [Xs]
else:
Xs = list(Xs)
n_views = len(Xs)
if (n_views == 0):
msg = 'Length of input list must be greater than 0'
raise ValueError(msg)
if multiview:
if (n_views == 1):
msg = 'Must provide at least two data matrices'
raise ValueError(msg)
if ((enforce_views is not None) and (n_views != enforce_views)):
msg = 'Wrong number of views. Expected {} but found {}'.format(enforce_views, n_views)
raise ValueError(msg)
Xs = [check_array(X, allow_nd=False, copy=copy) for X in Xs]
if (not (len(set([X.shape[0] for X in Xs])) == 1)):
msg = 'All views must have the same number of samples'
raise ValueError(msg)
if return_dimensions:
n_samples = Xs[0].shape[0]
n_features = [X.shape[1] for X in Xs]
return (Xs, n_views, n_samples, n_features)
else:
return Xs |
class SudokuStateManager(StateManagerBase):
def __init__(self) -> None:
super().__init__()
self.sudoku_matrix_history = []
def update_state(self, solution) -> bool:
solution_key = json.dumps(solution.tolist())
for state in self.sudoku_matrix_history:
state_key = json.dumps(state.tolist())
if (solution_key == state_key):
return False
self.sudoku_matrix_history.append(solution)
return True
def get_current_state(self) -> object:
return self.get_state(0)
def is_at_initial_state(self) -> bool:
return (len(self.sudoku_matrix_history) == 1)
def get_initial_state(self) -> object:
history_len = len(self.sudoku_matrix_history)
if (history_len == 0):
return None
return self.get_state((history_len - 1))
def get_state(self, rollback_steps) -> object:
if (len(self.sudoku_matrix_history) <= rollback_steps):
return None
return self.sudoku_matrix_history[(- (rollback_steps + 1))]
def rollback(self, rollback_steps) -> bool:
if (len(self.sudoku_matrix_history) == 0):
return False
print('START STATE ROLLBACK, current depth: {}'.format(len(self.sudoku_matrix_history)))
for state in self.sudoku_matrix_history:
print('State:', json.dumps(state.tolist()))
for i in range(rollback_steps):
self.sudoku_matrix_history.pop()
print('STATE ROLLBACK DONE, current depth: {}\n'.format(len(self.sudoku_matrix_history)))
def max_rollback_steps(self) -> int:
return (len(self.sudoku_matrix_history) - 1) |
def load_json_config(path):
with open(path) as data_file:
config = json.load(data_file)
config = config_init(config)
return config |
def main():
device = 'cuda:3'
torch.random.manual_seed(42)
model = NoiseNet(is_train=True).to(device)
model.apply(functools.partial(weights_init_kaiming, scale=0.001))
data_set = GenImageDataset('data_new/train/clean', phase='train', crop_size=128)
train_loader = torch.utils.data.DataLoader(data_set, batch_size=1, shuffle=True, num_workers=1, pin_memory=True, drop_last=False)
lr = 0.0001
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=0.9)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100, 200, 300], gamma=0.1)
epochs = 1000
model.train()
for e in tqdm(range(epochs)):
e_loss = 0
acc = 0
acc_2 = 0
print('lr: {:.2e}'.format(optimizer.param_groups[0]['lr']))
for (n_img, trg) in tqdm(train_loader):
trg = trg.unsqueeze(1).to(device).float()
optimizer.zero_grad()
out = model(n_img.to(device))
loss = criterion(out, trg)
loss.backward()
optimizer.step()
e_loss += loss.item()
acc += (np.abs((out.detach().cpu().numpy() - trg.detach().cpu().numpy())) < 1).sum()
acc_2 += (np.abs((out.detach().cpu().numpy() - trg.detach().cpu().numpy())) <= 2).sum()
print([float('{:.1f}'.format(x)) for x in out.detach().cpu().numpy().flatten()][:5])
print(trg.detach().cpu().numpy().T[0][:5])
print('Epoch: {}\t Loss: {:.3f}\t acc: {:.3f} \t acc_2: {:.3f} '.format(e, (e_loss / len(data_set)), (acc / len(data_set)), (acc_2 / len(data_set))), end='')
scheduler.step()
torch.save(model.state_dict(), 'checkpoints/NoiseNet/latest.pth') |
def create_tfkeras_pruning_callback(*args, **kwargs):
from bigdl.nano.deps.automl.hpo_api import create_optuna_tfkeras_pruning_callback
return create_optuna_tfkeras_pruning_callback(*args, **kwargs) |
def dlib_and_3DDFA(dlib_landmark_model='shape_predictor_68_face_landmarks.dat', checkpoint_fp='phase1_wpdc_vdc.pth.tar', arch='mobilenet_1'):
face_regressor = dlib.shape_predictor(dlib_landmark_model)
face_detector = dlib.get_frontal_face_detector()
checkpoint = torch.load(checkpoint_fp, map_location=(lambda storage, loc: storage))['state_dict']
model_3ddfa = getattr(mobilenet_v1, arch)(num_classes=62)
model_dict = model_3ddfa.state_dict()
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model_3ddfa.load_state_dict(model_dict)
model_3ddfa = model_3ddfa.cuda()
model_3ddfa.eval()
return (face_regressor, face_detector, model_3ddfa) |
class YolosOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 0.0001
def default_onnx_opset(self) -> int:
return 12 |
class TestFrozenPbModel(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
get_model_type_patcher = patch('neural_insights.components.model.model_type_getter.nc_get_model_type')
self.addCleanup(get_model_type_patcher.stop)
get_model_type_mock = get_model_type_patcher.start()
get_model_type_mock.side_effect = self._get_model_type
nc_tensorflow_model_patcher = patch('neural_insights.components.model.tensorflow.model.NCModel')
self.addCleanup(nc_tensorflow_model_patcher.stop)
nc_model_instance_mock = nc_tensorflow_model_patcher.start()
nc_model_instance_mock.return_value.input_node_names = ['first input node', 'second input node']
nc_model_instance_mock.return_value.output_node_names = ['first output node', 'second output node']
def _get_model_type(self, path: str) -> str:
if ('/path/to/frozen_pb.pb' == path):
return 'frozen_pb'
raise ValueError()
def test_get_framework_name(self) -> None:
self.assertEqual(Frameworks.TF.value, FrozenPbModel.get_framework_name())
def test_supports_correct_path(self) -> None:
self.assertTrue(FrozenPbModel.supports_path('/path/to/frozen_pb.pb'))
def test_supports_incorrect_path(self) -> None:
self.assertFalse(FrozenPbModel.supports_path('/path/to/model.txt'))
('neural_insights.components.model.tensorflow.model.check_module')
def test_guard_requirements_installed(self, mocked_check_module: MagicMock) -> None:
model = FrozenPbModel('/path/to/frozen_pb.pb')
model.guard_requirements_installed()
mocked_check_module.assert_called_once_with('tensorflow')
def test_get_input_nodes(self) -> None:
model = FrozenPbModel('/path/to/frozen_pb.pb')
self.assertEqual(['first input node', 'second input node'], model.get_input_nodes())
def test_get_output_nodes(self) -> None:
model = FrozenPbModel('/path/to/frozen_pb.pb')
self.assertEqual(['first output node', 'second output node', 'custom'], model.get_output_nodes())
def test_get_input_and_output_nodes(self) -> None:
model = FrozenPbModel('/path/to/frozen_pb.pb')
self.assertEqual(['first input node', 'second input node'], model.get_input_nodes())
self.assertEqual(['first output node', 'second output node', 'custom'], model.get_output_nodes())
('neural_insights.components.model.tensorflow.model.TensorflowReader', autospec=True)
def test_get_model_graph(self, mocked_tensorflow_graph_reader: MagicMock) -> None:
expected = Graph()
mocked_tensorflow_graph_reader.return_value.read.return_value = expected
model = FrozenPbModel('/path/to/frozen_pb.pb')
self.assertEqual(expected, model.get_model_graph())
mocked_tensorflow_graph_reader.assert_called_once_with(model)
def test_domain_object_detection_domain(self) -> None:
self.assert_model_domain_matches_expected(node_names=['boxes', 'scores', 'classes'], expected_domain=Domains.OBJECT_DETECTION.value, expected_domain_flavour=DomainFlavours.NONE.value)
def test_domain_object_detection_domain_ssd(self) -> None:
self.assert_model_domain_matches_expected(node_names=['bboxes', 'scores', 'classes', 'ssd'], expected_domain=Domains.OBJECT_DETECTION.value, expected_domain_flavour=DomainFlavours.SSD.value)
def test_domain_object_detection_domain_yolo(self) -> None:
self.assert_model_domain_matches_expected(node_names=['boxes', 'yolo'], expected_domain=Domains.OBJECT_DETECTION.value, expected_domain_flavour=DomainFlavours.YOLO.value)
def test_domain_image_recognition_resnet(self) -> None:
self.assert_model_domain_matches_expected(node_names=['resnet_model/Pad'], expected_domain=Domains.IMAGE_RECOGNITION.value, expected_domain_flavour=DomainFlavours.NONE.value)
def test_domain_unknown(self) -> None:
self.assert_model_domain_matches_expected(node_names=['foo', 'bar', 'baz', 'ssd'], expected_domain='', expected_domain_flavour='')
('neural_insights.components.model.tensorflow.model.TensorflowReader', autospec=True)
def test_domain_graph_reader_exception(self, mocked_tensorflow_graph_reader: MagicMock) -> None:
mocked_tensorflow_graph_reader.return_value.read.side_effect = Exception()
model = FrozenPbModel('/path/to/frozen_pb.pb')
expected = Domain(domain='', domain_flavour='')
self.assertEqual(expected, model.domain)
mocked_tensorflow_graph_reader.assert_called_once_with(model)
def test_shape_elements_order(self) -> None:
model = FrozenPbModel('/path/to/frozen_pb.pb')
self.assertListEqual(model.shape_elements_order, ['height', 'width', 'channels'])
('neural_insights.components.model.tensorflow.model.TensorflowReader', autospec=True)
def assert_model_domain_matches_expected(self, mocked_tensorflow_graph_reader: MagicMock, node_names: List[str], expected_domain: str, expected_domain_flavour: str) -> None:
def graph_with_nodes() -> Graph:
graph = Graph()
for name in node_names:
graph.add_node(Node(id=name, label=name))
return graph
mocked_tensorflow_graph_reader.return_value.read.return_value = graph_with_nodes()
model = FrozenPbModel('/path/to/frozen_pb.pb')
expected = Domain(domain=expected_domain, domain_flavour=expected_domain_flavour)
self.assertEqual(expected, model.domain)
mocked_tensorflow_graph_reader.assert_called_once_with(model) |
class EvalHook(Hook):
def __init__(self, dataloader, interval=1, by_epoch=False, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError(f'dataloader must be a pytorch DataLoader, but got {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_iter(self, runner):
if (self.by_epoch or (not self.every_n_iters(runner, self.interval))):
return
from mmseg.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_epoch(self, runner):
if ((not self.by_epoch) or (not self.every_n_epochs(runner, self.interval))):
return
from mmseg.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs)
for (name, val) in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True |
class TensorBoardOutput(LogOutput):
def __init__(self, log_dir, x_axis=None, additional_x_axes=None, flush_secs=120, histogram_samples=1000.0):
if (x_axis is None):
assert (not additional_x_axes), 'You have to specify an x_axis if you want additional axes.'
additional_x_axes = (additional_x_axes or [])
self._writer = tbX.SummaryWriter(log_dir, flush_secs=flush_secs)
self._x_axis = x_axis
self._additional_x_axes = additional_x_axes
self._default_step = 0
self._histogram_samples = int(histogram_samples)
self._added_graph = False
self._waiting_for_dump = []
self._tf = tf
self._warned_once = set()
self._disable_warnings = False
def types_accepted(self):
if (self._tf is None):
return (TabularInput,)
else:
return (TabularInput, self._tf.Graph)
def record(self, data, prefix=''):
if isinstance(data, TabularInput):
self._waiting_for_dump.append(functools.partial(self._record_tabular, data))
elif ((self._tf is not None) and isinstance(data, self._tf.Graph)):
self._record_graph(data)
else:
raise ValueError('Unacceptable type.')
def _record_tabular(self, data, step):
if self._x_axis:
nonexist_axes = []
for axis in ([self._x_axis] + self._additional_x_axes):
if (axis not in data.as_dict):
nonexist_axes.append(axis)
if nonexist_axes:
self._warn('{} {} exist in the tabular data.'.format(', '.join(nonexist_axes), ('do not' if (len(nonexist_axes) > 1) else 'does not')))
for (key, value) in data.as_dict.items():
if (isinstance(value, np.ScalarType) and (self._x_axis in data.as_dict)):
if (self._x_axis is not key):
x = data.as_dict[self._x_axis]
self._record_kv(key, value, x)
for axis in self._additional_x_axes:
if ((key is not axis) and (key in data.as_dict)):
x = data.as_dict[axis]
self._record_kv('{}/{}'.format(key, axis), value, x)
else:
self._record_kv(key, value, step)
data.mark(key)
def _record_kv(self, key, value, step):
if isinstance(value, str):
self._writer.add_text(key, value, step)
elif isinstance(value, np.ScalarType):
self._writer.add_scalar(key, value, step)
elif isinstance(value, plt.Figure):
self._writer.add_figure(key, value, step)
elif (isinstance(value, np.ndarray) and (value.ndim == 5)):
self._writer.add_video(key, value, step, fps=15)
elif isinstance(value, scipy.stats._distn_infrastructure.rv_frozen):
shape = ((self._histogram_samples,) + value.mean().shape)
self._writer.add_histogram(key, value.rvs(shape), step)
elif isinstance(value, scipy.stats._multivariate.multi_rv_frozen):
self._writer.add_histogram(key, value.rvs(self._histogram_samples), step)
elif isinstance(value, Histogram):
self._writer.add_histogram(key, value, step)
def _record_graph(self, graph):
graph_def = graph.as_graph_def(add_shapes=True)
event = tbX.proto.event_pb2.Event(graph_def=graph_def.SerializeToString())
self._writer.file_writer.add_event(event)
def dump(self, step=None):
for p in self._waiting_for_dump:
p((step or self._default_step))
self._waiting_for_dump.clear()
for w in self._writer.all_writers.values():
w.flush()
self._default_step += 1
def close(self):
self._writer.close()
def _warn(self, msg):
if ((not self._disable_warnings) and (msg not in self._warned_once)):
warnings.warn(colorize(msg, 'yellow'), NonexistentAxesWarning, stacklevel=3)
self._warned_once.add(msg)
return msg |
_model
def tf_efficientnetv2_b0(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs)
return model |
class ActNormScale(nn.Module):
def __init__(self, num_channels):
super().__init__()
self.register_parameter('log_scale', torch.nn.Parameter(torch.zeros([1, num_channels, 1, 1])))
self.initialized = False
def scale(self):
return torch.exp(self.log_scale)
def forward(self, x):
if (not self.initialized):
x_var = (x ** 2).mean(dim=(0, 2, 3)).view_as(self.scale)
self.log_scale.data = ((- torch.log((x_var + 1e-06))) / 2)
self.initialized = True
self._logdet = ((x.shape[2] * x.shape[3]) * self.log_scale.sum().expand(x.shape[0]))
return (x * self.scale)
def inverse(self, x):
return (x / self.scale)
def logdet(self):
return self._logdet |
class TokenAttention(nn.Module):
def __init__(self, encoding_size, query_dims=0, condition_attention=False, tokenwise_attention=False):
super(TokenAttention, self).__init__()
self.condition_attention = condition_attention
if condition_attention:
self.attn_MLP_hidden_dims = 32
self.attn_input_dims = (encoding_size + query_dims)
self.token_attention_F = nn.Sequential(nn.Linear(self.attn_input_dims, self.attn_MLP_hidden_dims), nn.Tanh(), nn.Linear(self.attn_MLP_hidden_dims, 1))
else:
self.token_attention_F = nn.Linear(encoding_size, 1)
if tokenwise_attention:
self.attn_sm = nn.Sigmoid()
else:
self.attn_sm = nn.Softmax(dim=1)
def forward(self, hidden_input_states: PaddedSequence, query_v_for_attention, normalize=True):
if (not isinstance(hidden_input_states, PaddedSequence)):
raise TypeError('Expected an input of type PaddedSequence but got {}'.format(type(hidden_input_states)))
if self.condition_attention:
query_v_for_attention = query_v_for_attention.unsqueeze(dim=1)
query_v_for_attention = torch.cat((hidden_input_states.data.size()[1] * [query_v_for_attention]), dim=1)
attention_inputs = torch.cat([hidden_input_states.data, query_v_for_attention], dim=2)
else:
attention_inputs = hidden_input_states.data
raw_word_scores = self.token_attention_F(attention_inputs)
raw_word_scores = (raw_word_scores * hidden_input_states.mask(on=1.0, off=0.0, size=raw_word_scores.size(), device=raw_word_scores.device))
a = self.attn_sm(raw_word_scores)
masked_attention = (a * hidden_input_states.mask(on=1.0, off=0.0, size=a.size(), device=a.device))
if normalize:
weights = torch.sum(masked_attention, dim=1).unsqueeze(1)
a = (masked_attention / weights)
else:
a = masked_attention
return a |
def score_classifications(instances: List[dict], annotations: List[Annotation], docs: Dict[(str, List[str])], aopc_thresholds: List[float]) -> Dict[(str, float)]:
def compute_kl(cls_scores_, faith_scores_):
keys = list(cls_scores_.keys())
cls_scores_ = [cls_scores_[k] for k in keys]
faith_scores_ = [faith_scores_[k] for k in keys]
return entropy(faith_scores_, cls_scores_)
labels = list(set((x.classification for x in annotations)))
label_to_int = {l: i for (i, l) in enumerate(labels)}
key_to_instances = {inst['annotation_id']: inst for inst in instances}
truth = []
predicted = []
for ann in annotations:
truth.append(label_to_int[ann.classification])
inst = key_to_instances[ann.annotation_id]
predicted.append(label_to_int[inst['classification']])
classification_scores = classification_report(truth, predicted, output_dict=True, target_names=labels, digits=3)
accuracy = accuracy_score(truth, predicted)
if ('comprehensiveness_classification_scores' in instances[0]):
comprehensiveness_scores = [(x['classification_scores'][x['classification']] - x['comprehensiveness_classification_scores'][x['classification']]) for x in instances]
comprehensiveness_score = np.average(comprehensiveness_scores)
else:
comprehensiveness_score = None
comprehensiveness_scores = None
if ('sufficiency_classification_scores' in instances[0]):
sufficiency_scores = [(x['classification_scores'][x['classification']] - x['sufficiency_classification_scores'][x['classification']]) for x in instances]
sufficiency_score = np.average(sufficiency_scores)
else:
sufficiency_score = None
sufficiency_scores = None
if ('comprehensiveness_classification_scores' in instances[0]):
comprehensiveness_entropies = [(entropy(list(x['classification_scores'].values())) - entropy(list(x['comprehensiveness_classification_scores'].values()))) for x in instances]
comprehensiveness_entropy = np.average(comprehensiveness_entropies)
comprehensiveness_kl = np.average(list((compute_kl(x['classification_scores'], x['comprehensiveness_classification_scores']) for x in instances)))
else:
comprehensiveness_entropies = None
comprehensiveness_kl = None
comprehensiveness_entropy = None
if ('sufficiency_classification_scores' in instances[0]):
sufficiency_entropies = [(entropy(list(x['classification_scores'].values())) - entropy(list(x['sufficiency_classification_scores'].values()))) for x in instances]
sufficiency_entropy = np.average(sufficiency_entropies)
sufficiency_kl = np.average(list((compute_kl(x['classification_scores'], x['sufficiency_classification_scores']) for x in instances)))
else:
sufficiency_entropies = None
sufficiency_kl = None
sufficiency_entropy = None
if ('thresholded_scores' in instances[0]):
(aopc_thresholds, aopc_comprehensiveness_score, aopc_comprehensiveness_points, aopc_sufficiency_score, aopc_sufficiency_points) = compute_aopc_scores(instances, aopc_thresholds)
else:
(aopc_thresholds, aopc_comprehensiveness_score, aopc_comprehensiveness_points, aopc_sufficiency_score, aopc_sufficiency_points) = (None, None, None, None, None)
if ('tokens_to_flip' in instances[0]):
token_percentages = []
for ann in annotations:
docids = set((ev.docid for ev in chain.from_iterable(ann.evidences)))
inst = key_to_instances[ann.annotation_id]
tokens = inst['tokens_to_flip']
doc_lengths = sum((len(docs[d]) for d in docids))
token_percentages.append((tokens / doc_lengths))
token_percentages = np.average(token_percentages)
else:
token_percentages = None
return {'accuracy': accuracy, 'prf': classification_scores, 'comprehensiveness': comprehensiveness_score, 'sufficiency': sufficiency_score, 'comprehensiveness_entropy': comprehensiveness_entropy, 'comprehensiveness_kl': comprehensiveness_kl, 'sufficiency_entropy': sufficiency_entropy, 'sufficiency_kl': sufficiency_kl, 'aopc_thresholds': aopc_thresholds, 'comprehensiveness_aopc': aopc_comprehensiveness_score, 'comprehensiveness_aopc_points': aopc_comprehensiveness_points, 'sufficiency_aopc': aopc_sufficiency_score, 'sufficiency_aopc_points': aopc_sufficiency_points} |
def get_vgg(blocks, bias=True, use_bn=False, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 11):
layers = [1, 1, 2, 2, 2]
elif (blocks == 13):
layers = [2, 2, 2, 2, 2]
elif (blocks == 16):
layers = [2, 2, 3, 3, 3]
elif (blocks == 19):
layers = [2, 2, 4, 4, 4]
else:
raise ValueError('Unsupported VGG with number of blocks: {}'.format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(channels=channels, bias=bias, use_bn=use_bn, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='--options is deprecated in favor of --cfg_options\' and it will not be supported in version v0.22.0. Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--auto-resume', action='store_true', help='resume from the latest checkpoint automatically.')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args |
def register_criterion(name):
def register(criterion):
CRITERIA[name] = criterion
return criterion
return register |
def add_token(tokenizer, object, current_token, word_vector, glove):
tokenizer['vocab2token'][object] = current_token
tokenizer['token2vocab'][current_token] = object
current_token += 1
if (object == '"walk"'):
object = 'walk'
try:
word_vector.append(glove[object.lower()])
except Exception as e:
print(object)
word_vector.append(glove['<unk>'])
return (tokenizer, current_token) |
class DentexChallenge():
def __init__(self, categories, prediction_file, gt_file, output_file='/output/metrics.json'):
self.categories = categories
self.prediction_file = prediction_file
self.gt_file = gt_file
self.output_file = output_file
self._case_results = {}
self._aggregate_results = {}
def load_data(self):
with open(self.gt_file) as f:
self.gt_data = json.load(f)
with open(self.prediction_file) as f:
self.prediction_data = json.load(f)
def separate_data_gt(self, dat):
separated_data = {}
for cat in self.categories:
cat_list = cat.split('_')
cat_id_name = ((cat_list[0][:(- 3)] + 'y_id_') + cat_list[1])
category_name = cat_list[1]
data = dat.copy()
separated_data[category_name] = {'images': data['images'], 'annotations': [anno for anno in data['annotations']], 'categories': data[cat]}
for anno in separated_data[category_name]['annotations']:
anno['category_id'] = anno[cat_id_name]
separated_data[category_name]['file_path'] = f'/tmp/separated_data_{category_name}.json'
with open(separated_data[category_name]['file_path'], 'w') as f:
f.write(json.dumps(separated_data[category_name]))
return separated_data
def separate_data_predict(self, data):
separated_data_predict = {}
for i in range(len(self.categories)):
cat = self.categories[i]
cat_list = cat.split('_')
cat_id_name = ((cat_list[0][:(- 3)] + 'y_id_') + cat_list[1])
category_name = cat_list[1]
output_anno = []
data_boxes = data['boxes']
for anno in data_boxes:
bbox = [anno['corners'][0][0], anno['corners'][0][1], anno['corners'][3][0], anno['corners'][3][1]]
bbox[2] = (bbox[2] - bbox[0])
bbox[3] = (bbox[3] - bbox[1])
anno_ind = {'image_id': anno['corners'][0][2], 'category_id': int(anno['name'].split('-')[i]), 'bbox': bbox, 'score': anno['probability']}
output_anno.append(anno_ind)
separated_data_predict[category_name] = {'annotations': output_anno}
separated_data_predict[category_name]['file_path'] = f'/tmp/separated_data_predict_{category_name}.json'
with open(separated_data_predict[category_name]['file_path'], 'w') as f:
f.write(json.dumps(separated_data_predict[category_name]['annotations']))
return separated_data_predict
def score(self):
metrics_names = ['AP50', 'AP75', 'AP', 'AR']
separated_gt = self.separate_data_gt(self.gt_data)
job_pk = self.prediction_data[0]['pk']
r_path = self.prediction_data[0]['outputs'][0]['interface']['relative_path']
print(r_path)
prediction_path = f'/input/{job_pk}/output/{r_path}'
with open(prediction_path) as f:
self.prediction_data = json.load(f)
separated_prediction = self.separate_data_predict(self.prediction_data)
for cat in self.categories:
category_name = cat.split('_')[1]
gt = COCO(separated_gt[category_name]['file_path'])
prediction = gt.loadRes(separated_prediction[category_name]['file_path'])
cocoEval = COCOeval(gt, prediction, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
dict_stats = {'AP': cocoEval.stats[0], 'AP50': cocoEval.stats[1], 'AP75': cocoEval.stats[2], 'AR': cocoEval.stats[8]}
self._case_results[category_name] = dict_stats
for metric in metrics_names:
self._aggregate_results[metric] = (sum([self._case_results[cat][metric] for cat in self._case_results]) / len(self._case_results))
def save(self):
metrics = {}
for category in self._case_results:
if (category == '1'):
cat_name = 'Quadrant'
if (category == '2'):
cat_name = 'Enumeration'
if (category == '3'):
cat_name = 'Diagnosis'
metrics[f'{cat_name}'] = self._case_results[category]
metrics['Aggregates'] = self._aggregate_results
with open(self.output_file, 'w') as f:
f.write(json.dumps(metrics))
def evaluate(self):
self.load_data()
self.score()
self.save() |
class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class AbstractMazeWalk(physics.AbstractForce, metaclass=abc.ABCMeta):
def __init__(self, speed, maze_layer='walls'):
self._speed = speed
self._maze_layer = maze_layer
def reset(self, state):
self._maze = maze_lib.Maze.from_state(state, maze_layer=self._maze_layer)
def step(self, *sprites, updates_per_env_step=1):
for sprite in sprites:
self._step_sprite(sprite, updates_per_env_step=updates_per_env_step)
def _step_sprite(self, sprite, updates_per_env_step=1):
pass
def _get_pos_vel(self, sprite, updates_per_env_step=1):
position = sprite.position
velocity = (self._speed * np.sign(sprite.velocity))
next_position = (position + (velocity / updates_per_env_step))
intersection = ((self._maze.grid_side * self._get_nearest_point(position)) + self._maze.half_grid_side)
dist_next_current = sum(np.abs((next_position - position)))
dist_intersection_next = sum(np.abs((next_position - intersection)))
dist_intersection_current = sum(np.abs((next_position - intersection)))
entering_intersection = ((dist_next_current > dist_intersection_current) and (dist_next_current > dist_intersection_next))
return (position, velocity, entering_intersection)
def _get_nearest_point(self, position):
nearest_inds = np.round(((position / self._maze.grid_side) - 0.5))
return nearest_inds.astype(int) |
def main():
np.random.seed(SEED)
run_kfold(data_fn=FLAGS.data_fn, method=FLAGS.method, prop_missing=FLAGS.prop_missing, max_num_feature=FLAGS.max_num_feature, feature_selection=FLAGS.feature_selection, which_half=FLAGS.which_half, data_dir=FLAGS.data_dir, cache_dir=FLAGS.cache_dir, out_dir=FLAGS.out_dir) |
_on_pypy
def test_to_python():
mat = m.Matrix(5, 5)
assert (memoryview(mat).shape == (5, 5))
assert (mat[(2, 3)] == 0)
mat[(2, 3)] = 4
assert (mat[(2, 3)] == 4)
mat2 = np.array(mat, copy=False)
assert (mat2.shape == (5, 5))
assert (abs(mat2).sum() == 4)
assert (mat2[(2, 3)] == 4)
mat2[(2, 3)] = 5
assert (mat2[(2, 3)] == 5)
cstats = ConstructorStats.get(m.Matrix)
assert (cstats.alive() == 1)
del mat
pytest.gc_collect()
assert (cstats.alive() == 1)
del mat2
pytest.gc_collect()
assert (cstats.alive() == 0)
assert (cstats.values() == ['5x5 matrix'])
assert (cstats.copy_constructions == 0)
assert (cstats.copy_assignments == 0)
assert (cstats.move_assignments == 0) |
def verify_metadata(metadata, has_bounding_box):
index_has_bounding_box = all([(MetadataKW.BOUNDING_BOX in metadata[MetadataKW.INPUT_METADATA][i]) for i in range(len(metadata[MetadataKW.INPUT_METADATA]))])
for gt_metadata in metadata[MetadataKW.GT_METADATA]:
if (gt_metadata is not None):
index_has_bounding_box &= (MetadataKW.BOUNDING_BOX in gt_metadata)
has_bounding_box &= index_has_bounding_box
return has_bounding_box |
class PopularNegativeSampler(AbstractNegativeSampler):
def code(cls):
return 'popular'
def generate_negative_samples(self):
popular_items = self.items_by_popularity()
negative_samples = {}
print('Sampling negative items')
for user in trange(self.user_count):
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
samples = []
for item in popular_items:
if (len(samples) == self.sample_size):
break
if (item in seen):
continue
samples.append(item)
negative_samples[user] = samples
return negative_samples
def items_by_popularity(self):
popularity = Counter()
for user in range(self.user_count):
popularity.update(self.train[user])
popularity.update(self.val[user])
popularity.update(self.test[user])
popular_items = sorted(popularity, key=popularity.get, reverse=True)
return popular_items |
class Solver():
def __init__(self, model, modelDir, loadWeights, optimizer, criterions, iouThreshold):
self.model = model
self.optimizer = optimizer
self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.modelDir = modelDir
if loadWeights:
self.loadCheckpoint(join(self.modelDir, 'model-checkpoint.pt'))
self.model.to(self.device)
self.criterions = criterions
self.iouThreshold = iouThreshold
def loadCheckpoint(self, checkpointPath):
checkpoint = torch.load(checkpointPath)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for state in self.optimizer.state.values():
for (k, v) in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
def saveCheckpoint(self):
checkpointPath = join(self.modelDir, 'model-checkpoint.pt')
torch.save({'model_state_dict': self.model.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}, checkpointPath)
def formatScientific(self, number):
if (number < 0.001):
return np.format_float_scientific(number, unique=False, precision=3)
return '{:.04f}'.format(number)
def train(self, dataloader, epochs, lod, printAt=10):
for epoch in range(epochs):
batchCounter = 0
losses = np.array(([0] * len(self.criterions)), dtype=np.float32)
for batch in dataloader:
y = self.model.forward(batch['x'].to(self.device), lod)
self.model.zero_grad()
for (i, value) in enumerate(zip(y, self.criterions, batch)):
(output, criterion, key) = value
loss = criterion(output, batch[key].to(self.device))
loss.backward()
losses[i] += loss
self.optimizer.step()
batchCounter += 1
if ((epoch % printAt) == 0):
losses = (losses / batchCounter)
print('epoch {}, reconstruction/kld loss: {}, segmentation loss: {}'.format(epoch, self.formatScientific(losses[0]), self.formatScientific(losses[1])))
def intersectionOverUnion(self, label, label_reconstruction):
intersection = (((label >= self.iouThreshold) & (label_reconstruction >= self.iouThreshold)) * 1.0)
union = (((label >= self.iouThreshold) | (label_reconstruction >= self.iouThreshold)) * 1.0)
iou = (intersection.sum() / union.sum())
return (iou / label.shape[0])
def evaluate(self, dataloader, lod):
batchCounter = 0
losses = np.array(([0] * len(self.criterions)), dtype=np.float32)
iou = 0
for batch in dataloader:
y = self.model.forward(batch['x'].to(self.device), lod)
for (i, value) in enumerate(zip(y, self.criterions, batch)):
(output, criterion, key) = value
loss = criterion(output, batch[key].to(self.device))
losses[i] += loss
iou += self.intersectionOverUnion(label=batch['t'].to(self.device), label_reconstruction=y[1])
batchCounter += 1
losses /= batchCounter
iou /= batchCounter
print('evaluation, reconstruction loss: {}, segmentation loss: {}, IoU: {}'.format(self.formatScientific(losses[0]), self.formatScientific(losses[1]), self.formatScientific(iou)))
def reconstruct(self, dataloader, lod, count):
batch = next(iter(dataloader))
count = min(count, len(batch['x']))
x = batch['x'][:count].to(self.device)
t = batch['t'][:count].to(self.device)
y = self.model.forward(x, lod)
x_reconstruction = y[0][0]
x_segmentation = y[1]
return (x, t, x_reconstruction, x_segmentation)
def saveReconstructions(self, dataloader, lod, count, output_dir):
(x_ins, t_ins, x_outs, t_outs) = self.reconstruct(dataloader, lod, count)
x_ins = x_ins.detach().cpu().numpy()
t_ins = t_ins.detach().cpu().numpy()
x_outs = x_outs.detach().cpu().numpy()
t_outs = t_outs.detach().cpu().numpy()
for (i, value) in enumerate(zip(x_ins, t_ins, x_outs, t_outs)):
(x_in, t_in, x_out, t_out) = value
x = np.stack(([x_in] * 3), axis=0).squeeze().transpose((1, 2, 0))
t = np.stack(([t_in] * 3), axis=0).squeeze().transpose((1, 2, 0))
mask = (t[(..., 0)] > self.iouThreshold)
x[mask] = (np.array([0, 1, 0.5]) * t[mask])
plt.imsave(join(output_dir, 'res_{}_sample_{}_in.png'.format((2 ** lod), i)), x)
x_r = np.stack(([x_out] * 3), axis=0).squeeze().transpose((1, 2, 0))
t_r = np.stack(([t_out] * 3), axis=0).squeeze().transpose((1, 2, 0))
mask = (t_r[(..., 0)] > self.iouThreshold)
x_r[mask] = (np.array([0, 1, 0.5]) * t[mask])
plt.imsave(join(output_dir, 'res_{}_sample_{}_out.png'.format((2 ** lod), i)), x_r) |
class MoleculeModel(nn.Module):
def __init__(self, args: TrainArgs, featurizer: bool=False):
super(MoleculeModel, self).__init__()
self.args = args
self.classification = (args.dataset_type == 'classification')
self.multiclass = (args.dataset_type == 'multiclass')
self.featurizer = featurizer
self.output_size = args.num_tasks
if self.multiclass:
self.output_size *= args.multiclass_num_classes
if self.classification:
self.sigmoid = nn.Sigmoid()
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.create_encoder(args)
self.create_ffn(args)
initialize_weights(self)
def create_encoder(self, args: TrainArgs) -> None:
self.encoder = MPN(args)
if self.args.additional_encoder:
self.substructures_encoder = SubstructureLayer(args)
def create_ffn(self, args: TrainArgs) -> None:
self.multiclass = (args.dataset_type == 'multiclass')
if self.multiclass:
self.num_classes = args.multiclass_num_classes
if args.features_only:
first_linear_dim = args.features_size
elif self.args.additional_encoder:
first_linear_dim = (args.substructures_hidden_size + args.hidden_size)
if args.use_input_features:
first_linear_dim += args.features_size
else:
first_linear_dim = args.hidden_size
if args.use_input_features:
first_linear_dim += args.features_size
dropout = nn.Dropout(args.dropout)
activation = get_activation_function(args.activation)
if (args.ffn_num_layers == 1):
ffn = [dropout, nn.Linear(first_linear_dim, self.output_size)]
else:
ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]
for _ in range((args.ffn_num_layers - 2)):
ffn.extend([activation, dropout, nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size)])
ffn.extend([activation, dropout, nn.Linear(args.ffn_hidden_size, self.output_size)])
self.ffn = nn.Sequential(*ffn)
def featurize(self, batch: Union[(List[str], List[Chem.Mol], BatchMolGraph)], features_batch: List[np.ndarray]=None) -> torch.FloatTensor:
return self.ffn[:(- 1)](self.encoder(batch, features_batch))
def forward(self, batch: Union[(List[str], List[Chem.Mol], BatchMolGraph)], substructures_batch: Union[(List[str], List[Chem.Mol], BatchMolGraphWithSubstructures)]=None, features_batch: List[np.ndarray]=None) -> torch.FloatTensor:
if self.featurizer:
return self.featurize(batch, features_batch)
if self.args.additional_encoder:
substructures_mol_o = self.substructures_encoder(substructures_batch)
out = torch.cat((self.encoder(batch, features_batch), substructures_mol_o), dim=1)
output = self.ffn(out)
else:
output = self.ffn(self.encoder(batch, features_batch))
if (self.classification and (not self.training)):
output = self.sigmoid(output)
if self.multiclass:
output = output.reshape((output.size(0), (- 1), self.num_classes))
if (not self.training):
output = self.multiclass_softmax(output)
return output |
_optimizer('adadelta')
class Adadelta(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-06, metavar='EPS', help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay}
def supports_flat_params(self):
return True |
def require_torch_or_tf(test_case):
return unittest.skipUnless((is_torch_available() or is_tf_available()), 'test requires PyTorch or TensorFlow')(test_case) |
def paser_cfgs(cfgs):
ops_name = []
layer_output_infos_ids = []
op_infos_from_cfgs = {}
input_tensor_ids_op_name = {}
output_tensor_ids_op_name = {}
for module_key in cfgs.keys():
for state in cfgs[module_key]:
if (state == 'layer_output_infos'):
for (index, op_info) in enumerate(cfgs[module_key][state]):
name = (module_key, state, index)
ops_name.append(name)
layer_output_infos_ids.append(op_info['id'])
op_infos_from_cfgs[name] = op_info
continue
for op_cfg_id in cfgs[module_key][state].keys():
op_info = cfgs[module_key][state][op_cfg_id]
name = (module_key, state, op_cfg_id)
if (name not in ops_name):
ops_name.append(name)
else:
assert False, 'Please check IPEX int8 configure json whether have the same name ops'
op_infos_from_cfgs[name] = op_info
input_tensors = op_info['input_tensor_infos']
for input_tensor in input_tensors:
if ('id' not in input_tensor.keys()):
continue
else:
input_tensor_id = input_tensor['id']
if (input_tensor_id not in input_tensor_ids_op_name.keys()):
input_tensor_ids_op_name[input_tensor_id] = [name]
else:
input_tensor_ids_op_name[input_tensor_id].append(name)
output_tensors = op_info['output_tensor_infos']
for output_tensor in output_tensors:
if ('id' not in output_tensor.keys()):
continue
else:
output_tensor_id = output_tensor['id']
if (output_tensor_id not in output_tensor_ids_op_name.keys()):
output_tensor_ids_op_name[output_tensor_id] = [name]
else:
output_tensor_ids_op_name[output_tensor_id].append(name)
return (ops_name, op_infos_from_cfgs, input_tensor_ids_op_name, output_tensor_ids_op_name) |
class ByoModelCfg():
blocks: Tuple[(Union[(ByoBlockCfg, Tuple[(ByoBlockCfg, ...)])], ...)]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0
zero_init_last: bool = True
fixed_input_size: bool = False
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=(lambda : dict()))
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=(lambda : dict()))
block_kwargs: Dict[(str, Any)] = field(default_factory=(lambda : dict())) |
_config
def il_blind():
cfg = {}
cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'side_class': None, 'side_weights_path': None, 'side_kwargs': {}}}}}}}
cfg['training'] = {'sources': ['map', 'target']} |
class TextDocumentItem(TypedDict):
uri: DocumentUri
languageId: string
version: integer
text: string |
def output_combined_files(path, dataset_name, output_files_dict, category_names, write):
categorized_train_val_test_filenames = {category_name: [[], [], []] for category_name in category_names}
for (dir_name, category_dict) in output_files_dict.items():
for (category_name, paths) in category_dict.items():
for i in range(len(paths)):
categorized_train_val_test_filenames[category_name][i].append(paths[i])
subset_names = ['train', 'val', 'test']
summary_dict = {category_name: [] for category_name in category_names}
for (category_name, train_val_test_file_paths) in categorized_train_val_test_filenames.items():
for i in range(len(train_val_test_file_paths)):
subset_name = subset_names[i]
output_filename = '{0}_combined_{1}_{2}_files.txt'.format(dataset_name, category_name, subset_name)
output_file_path = os.path.join(path, output_filename)
print('>Process combined file for {} {} files'.format(category_name, subset_name))
if write:
with open(output_file_path, 'w') as out_file:
for txt_file_path in train_val_test_file_paths[i]:
print('>>Opening txt file: {}'.format(extract_filename_from_url(txt_file_path)))
with open(txt_file_path, 'r') as in_file:
out_file.write(in_file.read())
print(('>>Combined file saved as %s' % output_file_path))
else:
print('>>Dry run. Use --write to actually output the combined files')
for txt_file_path in train_val_test_file_paths[i]:
print('>>>Reference txt file: {}'.format(extract_filename_from_url(txt_file_path)))
size = 0
for txt_file_path in train_val_test_file_paths[i]:
try:
with open(txt_file_path, 'r') as f:
size += sum((1 for _ in f))
except FileNotFoundError:
print('\n A file was not found at the expected path when validating and \n summariing the dataset. This problem is most likely caused by \n not running with --write flag. Re-run the program with --write \n flag. The summary below may be inaccurate.\n\n The problematic file is {}\n '.format(extract_filename_from_url(txt_file_path)))
summary_dict[category_name].append(size)
(success_train_len, success_val_len, success_test_len) = summary_dict['success_only']
(failure_train_len, failure_val_len, failure_test_len) = summary_dict['task_failure_only']
(error_train_len, error_val_len, error_test_len) = summary_dict['error_failure_only']
dataset_splits_csv = 'subset, train_count, val_count, test_count\n'
dataset_splits_csv += 'success_only, {0}, {1}, {2}\n'.format(success_train_len, success_val_len, success_test_len)
dataset_splits_csv += 'task_and_error_failure, {0}, {1}, {2}\n'.format((failure_train_len + error_train_len), (failure_val_len + error_val_len), (failure_test_len + error_test_len))
dataset_splits_csv += 'task_failure_only, {0}, {1}, {2}\n'.format(failure_train_len, failure_val_len, failure_test_len)
dataset_splits_csv += 'error_failure_only, {0}, {1}, {2}\n'.format(error_train_len, error_val_len, error_test_len)
dataset_splits_csv_filename = (dataset_name + '_combined_summary.csv')
print(((('\n' + dataset_splits_csv_filename) + '\n') + dataset_splits_csv))
csv_path = os.path.join(path, dataset_splits_csv_filename)
if write:
with open(csv_path, 'w') as file_object:
file_object.write(dataset_splits_csv)
print(('>CSV file saved as %s' % csv_path))
else:
print(('>>Dry run. The CSV file will be saved as %s' % csv_path))
print('>>Use --write to actually output the CSV file.') |
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
original_type = type(original)
replacement_type = type(replacement)
if (replacement_type == original_type):
return replacement
def conditional_cast(from_type, to_type):
if ((replacement_type == from_type) and (original_type == to_type)):
logger.warning('cast {} to {}', from_type, to_type)
return (True, to_type(replacement))
else:
return (False, None)
casts = [(tuple, list), (list, tuple), (bool, int)]
try:
casts.append((str, unicode))
except Exception:
pass
for (from_type, to_type) in casts:
(converted, converted_value) = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(original_type, replacement_type, original, replacement, full_key)) |
_materialize('core')
class CastF32(Cast):
out_dtypes = [(DType.float32,)]
def __init__(self):
super().__init__(DType.float32) |
def create_lmsm_solver(outfname, net_name, max_iter=10000, lr=0.1, weight_decay=0.0005, snapshot_dir='snapshots', solver_mode='GPU'):
txt = open('model/cifar_solver.prototxt', 'r').read()
txt = txt.replace('_NET_NAME_', net_name)
txt = txt.replace('_MAX_ITER_', str(max_iter))
txt = txt.replace('_LR_', str(lr))
txt = txt.replace('_WEIGHT_DECAY_', str(weight_decay))
txt = txt.replace('_SNAPSHOT_DIR_', snapshot_dir)
txt = txt.replace('_SOLVER_MODE_', solver_mode)
write_to_file(outfname, txt) |
class WeightedTreeLSTMLayer(object):
def __init__(self, model, dim, W, Wf, Uf, dropout, dropout_mask_x, dropout_mask_h, path_dropout, device, init_to_zero=False):
self.model = model
self.device = device
self.dim = dim
self.W = dynet.parameter(W)
self.Wf = dynet.parameter(Wf)
self.Uf = dynet.parameter(Uf)
self.bias = dynet.inputVector([1], device=self.device)
self.h_t = None
self.c_t = None
self.next_layer = None
self.h_t_sources = []
self.c_t_sources = []
self.weights = []
if init_to_zero:
self.h_t_sources = [dynet.vecInput(dim, device=self.device)]
self.c_t_sources = [dynet.vecInput(dim, device=self.device)]
self.weights = [dynet.scalarInput(0.0, device=self.device)]
self.dropout = dropout
self.dropout_mask_x = None
self.dropout_mask_h = None
if self.dropout:
self.dropout_mask_x = dropout_mask_x
self.dropout_mask_h = dropout_mask_h
self.path_dropout = path_dropout
self.path_selected = None
def add_history(self, c_t_stack, h_t_stack, weight):
self.c_t_sources.append(c_t_stack.pop(0))
self.h_t_sources.append(h_t_stack.pop(0))
self.weights.append(weight)
if self.next_layer:
self.next_layer.add_history(c_t_stack, h_t_stack, weight)
def get_path(self, weights=None):
if (self.path_selected is None):
assert (weights is not None)
if (len(weights) == 1):
self.path_selected = 0
else:
self.path_selected = util.weightedChoice(weights, range(len(weights)), apply_softmax=True)
return self.path_selected
def concat_weights(self):
self.weights = dynet.nobackprop(dynet.concatenate(self.weights))
if (self.next_layer is not None):
self.next_layer.concat_weights()
def apply_gumbel_noise_to_weights(self, temperature=1.0, noise=None):
(shape, batch) = self.weights.dim()
if (shape == (1,)):
return
if (noise is None):
noise = dynet.random_gumbel(shape, batch_size=batch)
self.weights += noise
if (temperature != 1.0):
self.weights *= (1.0 / temperature)
if (self.next_layer is not None):
self.next_layer.apply_gumbel_noise_to_weights(temperature, noise)
def weights_to_argmax(self):
(shape, batch) = self.weights.dim()
if (shape == (1,)):
return
m_is = numpy.argmax(self.weights.npvalue(), 0)
if (batch == 1):
self.weights = dynet.inputTensor([((- 99999) if (i != m_is) else 99999) for i in range(shape[0])], device=self.device)
else:
self.weights = dynet.inputTensor([[((- 99999) if (i != m_i) else 99999) for m_i in m_is] for i in range(shape[0])], batched=True, device=self.device)
if (self.next_layer is not None):
self.next_layer.weights_to_argmax()
def calculate_h_t(self):
if (self.h_t is None):
if (len(self.h_t_sources) == 1):
self.h_t = self.h_t_sources[0]
elif self.path_dropout:
self.h_t = self.h_t_sources[self.get_path([w.scalar_value() for w in self.weights])]
else:
self.h_t = (dynet.concatenate_cols(self.h_t_sources) * dynet.to_device(dynet.softmax(self.weights), self.device))
return self.h_t
def calculate_c_t(self):
if (self.c_t is None):
if (len(self.c_t_sources) == 1):
self.c_t = self.c_t_sources[0]
elif self.path_dropout:
self.c_t = self.c_t_sources[self.get_path([w.scalar_value() for w in self.weights])]
else:
self.c_t = (dynet.concatenate_cols(self.c_t_sources) * dynet.to_device(dynet.softmax(self.weights), self.device))
return self.c_t
def add_input(self, x_t):
x_t = dynet.to_device(x_t, self.device)
h_t = self.calculate_h_t()
if self.dropout:
x_t = dynet.cmult(x_t, self.dropout_mask_x)
h_t = dynet.cmult(h_t, self.dropout_mask_h)
bias = self.bias
gates = (self.W * dynet.concatenate([x_t, h_t, bias]))
o = dynet.logistic(dynet.pickrange(gates, 0, self.dim))
g = dynet.tanh(dynet.pickrange(gates, self.dim, (self.dim * 2)))
Wfx = (self.Wf * dynet.concatenate([x_t, bias]))
if ((len(self.h_t_sources) == 1) or self.path_dropout):
if (len(self.h_t_sources) == 1):
idx = 0
else:
idx = self.get_path()
c_t = self.c_t_sources[idx]
f_k = dynet.logistic((Wfx + (self.Uf * h_t)))
i = (1.0 - f_k)
c_t = (dynet.cmult(f_k, c_t) + dynet.cmult(i, g))
else:
weights = dynet.to_device(dynet.softmax(self.weights), self.device)
if self.dropout:
f_k = [(dynet.logistic((Wfx + (self.Uf * dynet.cmult(h, self.dropout_mask_h)))) * w) for (h, w) in zip(self.h_t_sources, weights)]
else:
f_k = [(dynet.logistic((Wfx + (self.Uf * h))) * w) for (h, w) in zip(self.h_t_sources, weights)]
i = (1.0 - dynet.esum(f_k))
c_t = (dynet.esum([dynet.cmult(f, c) for (f, c) in zip(f_k, self.c_t_sources)]) + dynet.cmult(i, g))
h_t = dynet.cmult(o, dynet.tanh(c_t))
if (self.next_layer is not None):
(c_stack, h_stack) = self.next_layer.add_input(h_t)
return (([c_t] + c_stack), ([h_t] + h_stack))
else:
return ([c_t], [h_t])
def output(self):
if (self.next_layer is None):
return self.calculate_h_t()
else:
return self.next_layer.output()
def all_layer_outputs(self):
if (self.next_layer is None):
return [self.calculate_h_t()]
return ([self.calculate_h_t()] + self.next_layer.all_layer_outputs())
def all_layer_states(self):
if (self.next_layer is None):
return [self.calculate_c_t()]
return ([self.calculate_c_t()] + self.next_layer.all_layer_outputs()) |
_model
def dla46_c(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla46_c']
model = DLA(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class HardtanhActivationMixin():
def init_activation(self, upperbound=1.0, eps=1e-08, **kwargs):
self._activation_func = nn.Hardtanh(0, upperbound)
self.upperbound = torch.tensor(upperbound)
self.eps = eps
self.activation_func = (lambda x: (self._activation_func(x) + eps))
self.log_activation_func = (lambda x: torch.log(self.activation_func(x)))
def upperbound_cond_int(self, history=None, dim=None) -> float:
if (dim is None):
dim = self.dim
return ((self.upperbound + (10.0 * self.eps)) * dim) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.