code stringlengths 101 5.91M |
|---|
class EncoderConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(EncoderConv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 3, padding=1)
self.gn = nn.GroupNorm((out_ch // 4), out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.gn(x)
x = self.relu(x)
return x |
class XGBoostModelBuilder(ModelBuilder):
def __init__(self, model_type='regressor', cpus_per_trial=1, **xgb_configs):
self.model_type = model_type
self.model_config = xgb_configs.copy()
if (('n_jobs' in xgb_configs) and (xgb_configs['n_jobs'] != cpus_per_trial)):
logger.warning(f"Found n_jobs={xgb_configs['n_jobs']} in xgb_configs. It will not take effect since we assign cpus_per_trials(={cpus_per_trial}) to xgboost n_jobs. Please throw an issue if you do need different values for xgboost n_jobs and cpus_per_trials.")
self.model_config['n_jobs'] = cpus_per_trial
def build(self, config):
model = XGBoost(model_type=self.model_type, config=self.model_config)
model._build(**config)
return model |
def test_torch_Accuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import Accuracy
pred = torch.tensor([0, 2, 3, 4])
target = torch.tensor([1, 2, 3, 4])
acc = Accuracy()
acc(pred, target)
assert (acc.compute() == 0.75)
pred = torch.tensor([0, 2, 3, 4])
target = torch.tensor([1, 1, 2, 4])
acc(pred, target)
assert (acc.compute() == 0.5) |
def extract_comments(directory):
for (parent, dir_names, file_names) in os.walk(directory):
for file_name in file_names:
if ((os.path.splitext(file_name)[1] == '.py') and (file_name != '__init__.py')):
doc = get_comments_str(os.path.join(parent, file_name))
directory_out = os.path.join('docs', parent.replace(directory, ''))
if (not os.path.exists(directory_out)):
os.makedirs(directory_out)
output_file = open(os.path.join(directory_out, (file_name[:(- 3)] + '.md')), 'w')
output_file.write(doc)
output_file.close() |
def get_class_name():
import platform
if (platform.system() == 'Windows'):
return WindowsJob
elif (platform.system() == 'Linux'):
return LinuxJob
else:
return None |
def miliseconds_to_frame_index(miliseconds: int, fps: int) -> int:
return int((fps * (miliseconds / 1000))) |
def mlperf_log_epoch_start(iteration, iters_per_epoch):
if (iteration == 0):
log_start(key=constants.BLOCK_START, metadata={'first_epoch_num': 1, 'epoch_count': 1})
log_start(key=constants.EPOCH_START, metadata={'epoch_num': 1})
return
if ((iteration % iters_per_epoch) == 0):
epoch = (iteration // iters_per_epoch)
log_start(key=constants.BLOCK_START, metadata={'first_epoch_num': epoch, 'epoch_count': 1})
log_start(key=constants.EPOCH_START, metadata={'epoch_num': epoch}) |
class TestSparseWeightedAverage(unittest.TestCase):
def device(self):
return 'cuda'
def setUpClass(cls):
if (not torch.cuda.is_available()):
raise unittest.SkipTest('No CUDA capable device detected')
def _zero_grad(self, Q, K):
for x in [Q, K]:
if (x.grad is not None):
x.grad[...] = 0
def test_correctness(self):
N = 2
H = 4
L = 3000
S = 3000
E = 32
k = 32
weights = torch.rand(N, H, L, k).to(self.device).requires_grad_(True)
values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)
attn = torch.randn(N, H, L, S).to(self.device).requires_grad_(False)
(topk_v, topk) = torch.topk(attn, k, dim=(- 1))
self._zero_grad(weights, values)
values_selected = values[(torch.arange(N).view(N, 1, 1, 1).to(self.device), torch.arange(H).view(1, H, 1, 1).to(self.device), topk)]
output = (weights.unsqueeze((- 1)) * values_selected).sum((- 2))
output.sum().backward()
grad = [torch.clone(weights.grad), torch.clone(values.grad)]
self._zero_grad(weights, values)
output_hat = sparse_weighted_average(weights, values, topk)
output_hat.sum().backward()
grad_hat = [torch.clone(weights.grad), torch.clone(values.grad)]
self.assertLess(torch.abs((output - output_hat)).max(), 0.0001)
for (g1, g2) in zip(grad, grad_hat):
self.assertLess(torch.abs((g1 - g2)).max(), 0.0001)
def test_forward(self):
N = 5
H = 2
L = 100
S = 100
E = 32
k = 5
weights = torch.arange(0, k).expand(N, H, L, k).to(self.device).float().requires_grad_(True)
values = torch.arange(0, E).expand(N, H, L, E).to(self.device).float().requires_grad_(True)
attn = torch.arange(0, S).expand(N, H, L, S).to(self.device).float().requires_grad_(False)
(topk_v, topk) = torch.topk(attn, k, dim=(- 1))
values_selected = values[(torch.arange(N).view(N, 1, 1, 1).to(self.device), torch.arange(H).view(1, H, 1, 1).to(self.device), topk)]
output = (weights.unsqueeze((- 1)) * values_selected).sum((- 2))
output_hat = sparse_weighted_average(weights, values, topk)
self.assertLess(torch.abs((output - output_hat)).max(), 0.0001)
(os.getenv('BENCHMARK_TESTS', ''), 'no benchmarks')
def test_benchmark_forward(self):
N = 12
H = 8
L = 2000
S = 2000
E = 32
k = 32
weights = torch.rand(N, H, L, k).to(self.device).requires_grad_(True)
values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)
attn = torch.randn(N, H, L, S).to(self.device).requires_grad_(False)
(topk_v, topk) = torch.topk(attn, k, dim=(- 1))
topk = topk.contiguous()
for i in range(2000):
output_hat = sparse_weighted_average(weights, values, topk)
s = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
s.record()
output_hat = sparse_weighted_average(weights, values, topk)
e.record()
torch.cuda.synchronize()
t_sparse = s.elapsed_time(e)
print('T_sparse Forward:{}'.format(t_sparse))
(os.getenv('BENCHMARK_TESTS', ''), 'no benchmarks')
def test_benchmark_backward(self):
N = 12
H = 8
L = 2000
S = 2000
E = 32
k = 32
weights = torch.rand(N, H, L, k).to(self.device).requires_grad_(True)
values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True)
attn = torch.randn(N, H, L, S).to(self.device).requires_grad_(False)
(topk_v, topk) = torch.topk(attn, k, dim=(- 1))
topk = topk.contiguous()
for i in range(2000):
output_hat = sparse_weighted_average(weights, values, topk)
self._zero_grad(weights, values)
s = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
s.record()
output_hat.sum().backward()
e.record()
torch.cuda.synchronize()
t_sparse = s.elapsed_time(e)
print('T_sparse Backward:{}'.format(t_sparse)) |
def build_model(base_model_cfg='vgg'):
if (base_model_cfg == 'vgg'):
return TUN_bone(base_model_cfg, *extra_layer(base_model_cfg, vgg16()))
elif (base_model_cfg == 'resnet'):
return TUN_bone(base_model_cfg, *extra_layer(base_model_cfg, resnet50())) |
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
print('Reflection Padding')
conv2d = nn.Conv2d(in_channels, v, kernel_size=3)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
def graph_mean_and_std(categories, means, stds, ymin=0, ymax=500, xaxis='', filename=''):
plt.errorbar(categories, means, stds, linestyle='None', marker='^')
plt.ylim(ymin, ymax)
plt.xlabel(xaxis, fontsize=12)
plt.ylabel('Mean Difference in Stopping Epoch', fontsize=12)
fname = (('graph_images/' + filename) + '.pgf')
plt.savefig(fname) |
def get_scheduler(optimizer, policy, nepoch_fix=None, nepoch=None, decay_step=None):
if (policy == 'lambda'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, (epoch - nepoch_fix)) / float(((nepoch - nepoch_fix) + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=decay_step, gamma=0.1)
elif (policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', policy)
return scheduler |
def create_cpp_version_headers(dir_path, license_c):
version_lines = ['\n', '// Automatic generated header with version information.', '#ifndef VERSION_H_', '#define VERSION_H_', '#define L2A_VERSION_GIT_SHA_HEAD_ "{}"'.format(get_git_sha()), '#endif', '']
with open(os.path.join(dir_path, 'version.h'), 'w') as version_header:
version_header.write((license_c + '\n'.join(version_lines))) |
def test_mobilenet_v2():
for s in [224, 192, 160, 128]:
for wm in [1.0, 0.75, 0.5, 0.25]:
cfg.merge_from_file('configs/cifar/mbv2_cifar100_224_e100.yaml')
cfg.MODEL.COMPRESSION.WIDTH_MULTIPLIER = wm
round_nearest = cfg.MODEL.COMPRESSION.ROUND_NEAREST
feature_dims = make_divisible((cfg.MODEL.HEAD.FEATURE_DIMS * wm), round_nearest)
cfg.MODEL.HEAD.FEATURE_DIMS = feature_dims
print(f's: {s}, wn: {wm}, feature_dims: {feature_dims}')
model = MobileNetV2(cfg)
data = torch.randn(1, 3, s, s)
outputs = model(data)[KEY_OUTPUT]
print(outputs.shape)
assert (outputs.shape == (1, 100)) |
class StableDiffusionDiffEditPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class PermutationProblem(Problem[PermutationSolution], ABC):
def __init__(self):
super(PermutationProblem, self).__init__() |
def MakeInterAll(Info, InteractionType):
L = Info['L']
InterAllArray = []
if (Info['model'] == '"Fermion Hubbard"'):
if (InteractionType == 'Normal'):
for x in range(0, L):
lattice_origin = x
lattice_forward = ((x + 1) % L)
InterAllArray.append([lattice_origin, 0, lattice_forward, 0, lattice_forward, 1, lattice_origin, 1])
elif (InteractionType == 'Diagonal'):
for x in range(0, L):
lattice_origin = x
lattice_forward = ((x + 1) % L)
InterAllArray.append([lattice_origin, 0, lattice_forward, 1, lattice_forward, 1, lattice_origin, 0])
elif (Info['model'] == '"Spin"'):
for x in range(0, L):
lattice_origin = x
lattice_forward = ((x + 1) % L)
InterAllArray.append([lattice_origin, 0, lattice_origin, 1, lattice_forward, 1, lattice_forward, 0])
elif (Info['model'] == '"Kondo"'):
for x in range(0, L):
lattice_origin = x
lattice_forward = ((x + 1) % L)
InterAllArray.append([lattice_origin, 0, lattice_origin, 1, lattice_forward, 1, lattice_forward, 0])
else:
return False
return InterAllArray |
def quaternion_linear(input, r_weight, i_weight, j_weight, k_weight, bias=True):
cat_kernels_4_r = torch.cat([r_weight, (- i_weight), (- j_weight), (- k_weight)], dim=0)
cat_kernels_4_i = torch.cat([i_weight, r_weight, (- k_weight), j_weight], dim=0)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, (- i_weight)], dim=0)
cat_kernels_4_k = torch.cat([k_weight, (- j_weight), i_weight, r_weight], dim=0)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
if (input.dim() == 2):
if (bias is not None):
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if (bias is not None):
return (output + bias)
else:
return output |
def normalized(a, axis=(- 1), order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[(l2 == 0)] = 1
return (a / np.expand_dims(l2, axis)) |
def predict_cases_fastest(model, list_of_lists, output_filenames, folds, num_threads_preprocessing, num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True, overwrite_existing=False, all_in_gpu=True, step_size=0.5, checkpoint_name='model_final_checkpoint'):
assert (len(list_of_lists) == len(output_filenames))
if (segs_from_prev_stage is not None):
assert (len(segs_from_prev_stage) == len(output_filenames))
pool = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
(dr, f) = os.path.split(o)
if (len(dr) > 0):
maybe_mkdir_p(dr)
if (not f.endswith('.nii.gz')):
(f, _) = os.path.splitext(f)
f = (f + '.nii.gz')
cleaned_output_files.append(join(dr, f))
if (not overwrite_existing):
print('number of cases:', len(list_of_lists))
not_done_idx = [i for (i, j) in enumerate(cleaned_output_files) if (not isfile(j))]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if (segs_from_prev_stage is not None):
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print('number of cases that still need to be predicted:', len(cleaned_output_files))
print('emptying cuda cache')
torch.cuda.empty_cache()
print('loading parameters for folds,', folds)
(trainer, params) = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name)
print('starting preprocessing generator')
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage)
print('starting prediction...')
for preprocessed in preprocessing:
print('getting data from preprocessor')
(output_filename, (d, dct)) = preprocessed
print('got something')
if isinstance(d, str):
print('what I got is a string, so I need to load a file')
data = np.load(d)
os.remove(d)
d = data
all_softmax_outputs = np.zeros((len(params), trainer.num_classes, *d.shape[1:]), dtype=np.float16)
all_seg_outputs = np.zeros((len(params), *d.shape[1:]), dtype=int)
print('predicting', output_filename)
for (i, p) in enumerate(params):
trainer.load_checkpoint_ram(p, False)
res = trainer.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=step_size, use_gaussian=True, all_in_gpu=all_in_gpu, mixed_precision=mixed_precision)
if (len(params) > 1):
all_softmax_outputs[i] = res[1]
all_seg_outputs[i] = res[0]
print('aggregating predictions')
if (len(params) > 1):
softmax_mean = np.mean(all_softmax_outputs, 0)
seg = softmax_mean.argmax(0)
else:
seg = all_seg_outputs[0]
print('applying transpose_backward')
transpose_forward = trainer.plans.get('transpose_forward')
if (transpose_forward is not None):
transpose_backward = trainer.plans.get('transpose_backward')
seg = seg.transpose([i for i in transpose_backward])
print('initializing segmentation export')
results.append(pool.starmap_async(save_segmentation_nifti, ((seg, output_filename, dct, 0, None),)))
print('done')
print('inference done. Now waiting for the segmentation export to finish...')
_ = [i.get() for i in results]
results = []
pp_file = join(model, 'postprocessing.json')
if isfile(pp_file):
print('postprocessing...')
shutil.copy(pp_file, os.path.dirname(output_filenames[0]))
(for_which_classes, min_valid_obj_size) = load_postprocessing(pp_file)
results.append(pool.starmap_async(load_remove_save, zip(output_filenames, output_filenames, ([for_which_classes] * len(output_filenames)), ([min_valid_obj_size] * len(output_filenames)))))
_ = [i.get() for i in results]
else:
print(('WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is %s' % model))
pool.close()
pool.join() |
class Stage(Enum):
COMPILATION = 'compilation'
EXECUTION = 'execution'
VERIFICATION = 'verification' |
def validation(df, valDir, inPklCoarse, network, trainMode):
strideNet = 16
minSize = 480
precAllAlign = np.zeros(8)
totalAlign = 0
pixelGrid = np.around(np.logspace(0, np.log10(36), 8).reshape((- 1), 8))
for key in list(network.keys()):
network[key].eval()
with torch.no_grad():
for i in tqdm(range(len(df))):
scene = df['scene'][i]
Is = Image.open(os.path.join(os.path.join(valDir, scene), df['source_image'][i])).convert('RGB')
(Is, Xs, Ys) = ResizeMinResolution(minSize, Is, df['XA'][i], df['YA'][i], strideNet)
(Isw, Ish) = Is.size
IsTensor = transforms.ToTensor()(Is).unsqueeze(0).cuda()
It = Image.open(os.path.join(os.path.join(valDir, scene), df['target_image'][i])).convert('RGB')
(It, Xt, Yt) = ResizeMinResolution(minSize, It, df['XB'][i], df['YB'][i], strideNet)
(Itw, Ith) = It.size
ItTensor = transforms.ToTensor()(It).unsqueeze(0).cuda()
gridY = torch.linspace((- 1), 1, steps=ItTensor.size(2)).view(1, (- 1), 1, 1).expand(1, ItTensor.size(2), ItTensor.size(3), 1)
gridX = torch.linspace((- 1), 1, steps=ItTensor.size(3)).view(1, 1, (- 1), 1).expand(1, ItTensor.size(2), ItTensor.size(3), 1)
grid = torch.cat((gridX, gridY), dim=3).cuda()
bestParam = inPklCoarse[i]
flowGlobalT = F.affine_grid(torch.from_numpy(bestParam).unsqueeze(0).cuda(), ItTensor.size())
IsSample = F.grid_sample(IsTensor, flowGlobalT)
featsSample = F.normalize(network['netFeatCoarse'](IsSample))
featt = F.normalize(network['netFeatCoarse'](ItTensor))
corr21 = network['netCorr'](featt, featsSample)
(_, flowCoarse) = model.predFlowCoarse(corr21, network['netFlowCoarse'], grid)
flowFinal = F.grid_sample(flowGlobalT.permute(0, 3, 1, 2), flowCoarse).permute(0, 2, 3, 1).contiguous()
(pixelDiffT, nbAlign) = alignmentError(Itw, Ith, Isw, Ish, Xs, Ys, Xt, Yt, flowFinal, pixelGrid)
precAllAlign += pixelDiffT
totalAlign += nbAlign
return (precAllAlign / totalAlign) |
def test_pred_files() -> None:
assert (len(PRED_FILES) >= 6)
assert all((path.endswith(('.csv', '.csv.gz', '.json', '.json.gz')) for path in PRED_FILES.values()))
for (model, path) in PRED_FILES.items():
msg = f'Missing preds file for model={model!r}, expected at path={path!r}'
assert os.path.isfile(path), msg |
class FasterRCNN(object):
__category__ = 'architecture'
__inject__ = ['backbone', 'rpn_head', 'bbox_assigner', 'roi_extractor', 'bbox_head', 'fpn']
def __init__(self, backbone, rpn_head, roi_extractor, bbox_head='BBoxHead', bbox_assigner='BBoxAssigner', rpn_only=False, fpn=None):
super(FasterRCNN, self).__init__()
self.backbone = backbone
self.rpn_head = rpn_head
self.bbox_assigner = bbox_assigner
self.roi_extractor = roi_extractor
self.bbox_head = bbox_head
self.fpn = fpn
self.rpn_only = rpn_only
def build(self, feed_vars, mode='train'):
if (mode == 'train'):
required_fields = ['gt_class', 'gt_bbox', 'is_crowd', 'im_info']
else:
required_fields = ['im_shape', 'im_info']
self._input_check(required_fields, feed_vars)
im = feed_vars['image']
im_info = feed_vars['im_info']
if (mode == 'train'):
gt_bbox = feed_vars['gt_bbox']
is_crowd = feed_vars['is_crowd']
else:
im_shape = feed_vars['im_shape']
mixed_precision_enabled = (mixed_precision_global_state() is not None)
if mixed_precision_enabled:
im = fluid.layers.cast(im, 'float16')
body_feats = self.backbone(im)
body_feat_names = list(body_feats.keys())
if mixed_precision_enabled:
body_feats = OrderedDict(((k, fluid.layers.cast(v, 'float32')) for (k, v) in body_feats.items()))
if (self.fpn is not None):
(body_feats, spatial_scale) = self.fpn.get_output(body_feats)
rois = self.rpn_head.get_proposals(body_feats, im_info, mode=mode)
if (mode == 'train'):
rpn_loss = self.rpn_head.get_loss(im_info, gt_bbox, is_crowd)
for var in ['gt_class', 'is_crowd', 'gt_bbox', 'im_info']:
assert (var in feed_vars), '{} has no {}'.format(feed_vars, var)
outs = self.bbox_assigner(rpn_rois=rois, gt_classes=feed_vars['gt_class'], is_crowd=feed_vars['is_crowd'], gt_boxes=feed_vars['gt_bbox'], im_info=feed_vars['im_info'])
rois = outs[0]
labels_int32 = outs[1]
bbox_targets = outs[2]
bbox_inside_weights = outs[3]
bbox_outside_weights = outs[4]
elif self.rpn_only:
im_scale = fluid.layers.slice(im_info, [1], starts=[2], ends=[3])
im_scale = fluid.layers.sequence_expand(im_scale, rois)
rois = (rois / im_scale)
return {'proposal': rois}
if (self.fpn is None):
body_feat = body_feats[body_feat_names[(- 1)]]
roi_feat = self.roi_extractor(body_feat, rois)
else:
roi_feat = self.roi_extractor(body_feats, rois, spatial_scale)
if (mode == 'train'):
loss = self.bbox_head.get_loss(roi_feat, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)
loss.update(rpn_loss)
total_loss = fluid.layers.sum(list(loss.values()))
loss.update({'loss': total_loss})
return loss
else:
pred = self.bbox_head.get_prediction(roi_feat, rois, im_info, im_shape)
return pred
def build_multi_scale(self, feed_vars):
required_fields = ['image', 'im_info', 'im_shape']
self._input_check(required_fields, feed_vars)
result = {}
im_shape = feed_vars['im_shape']
result['im_shape'] = im_shape
for i in range((len(self.im_info_names) // 2)):
im = feed_vars[self.im_info_names[(2 * i)]]
im_info = feed_vars[self.im_info_names[((2 * i) + 1)]]
body_feats = self.backbone(im)
body_feat_names = list(body_feats.keys())
if (self.fpn is not None):
(body_feats, spatial_scale) = self.fpn.get_output(body_feats)
rois = self.rpn_head.get_proposals(body_feats, im_info, mode='test')
if (self.fpn is None):
body_feat = body_feats[body_feat_names[(- 1)]]
roi_feat = self.roi_extractor(body_feat, rois)
else:
roi_feat = self.roi_extractor(body_feats, rois, spatial_scale)
pred = self.bbox_head.get_prediction(roi_feat, rois, im_info, im_shape, return_box_score=True)
bbox_name = ('bbox_' + str(i))
score_name = ('score_' + str(i))
if ('flip' in im.name):
bbox_name += '_flip'
score_name += '_flip'
result[bbox_name] = pred['bbox']
result[score_name] = pred['score']
return result
def _input_check(self, require_fields, feed_vars):
for var in require_fields:
assert (var in feed_vars), '{} has no {} field'.format(feed_vars, var)
def _inputs_def(self, image_shape):
im_shape = ([None] + image_shape)
inputs_def = {'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0}, 'im_info': {'shape': [None, 3], 'dtype': 'float32', 'lod_level': 0}, 'im_id': {'shape': [None, 1], 'dtype': 'int64', 'lod_level': 0}, 'im_shape': {'shape': [None, 3], 'dtype': 'float32', 'lod_level': 0}, 'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1}, 'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1}, 'is_crowd': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1}, 'is_difficult': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1}}
return inputs_def
def build_inputs(self, image_shape=[3, None, None], fields=['image', 'im_info', 'im_id', 'gt_bbox', 'gt_class', 'is_crowd'], multi_scale=False, num_scales=(- 1), use_flip=None, use_dataloader=True, iterable=False):
inputs_def = self._inputs_def(image_shape)
fields = copy.deepcopy(fields)
if multi_scale:
(ms_def, ms_fields) = multiscale_def(image_shape, num_scales, use_flip)
inputs_def.update(ms_def)
fields += ms_fields
self.im_info_names = (['image', 'im_info'] + ms_fields)
feed_vars = OrderedDict([(key, fluid.data(name=key, shape=inputs_def[key]['shape'], dtype=inputs_def[key]['dtype'], lod_level=inputs_def[key]['lod_level'])) for key in fields])
loader = (fluid.io.DataLoader.from_generator(feed_list=list(feed_vars.values()), capacity=16, use_double_buffer=True, iterable=iterable) if use_dataloader else None)
return (feed_vars, loader)
def train(self, feed_vars):
return self.build(feed_vars, 'train')
def eval(self, feed_vars, multi_scale=None):
if multi_scale:
return self.build_multi_scale(feed_vars)
return self.build(feed_vars, 'test')
def test(self, feed_vars):
return self.build(feed_vars, 'test') |
class Mask(object):
def __init__(self, gamma=0.7):
self.gamma = gamma
def __call__(self, sequence):
copied_sequence = copy.deepcopy(sequence)
mask_nums = int((self.gamma * len(copied_sequence)))
mask = [0 for i in range(mask_nums)]
mask_idx = random.sample([i for i in range(len(copied_sequence))], k=mask_nums)
for (idx, mask_value) in zip(mask_idx, mask):
copied_sequence[idx] = mask_value
return copied_sequence |
_module()
class PANet(TextDetectorMixin, SingleStageTextDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, show_score=False, init_cfg=None):
SingleStageTextDetector.__init__(self, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
TextDetectorMixin.__init__(self, show_score) |
def test_caffe2xavierinit():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))) |
def decide_function(path, label, sl, xs):
img = Image.open(path)
attack_image = add_watermark_to_image(img, xs, watermark, sl)
attack_image = attack_image.convert('RGB')
predict = label_model(model, attack_image).cpu().detach().numpy()
if (np.argmax(predict) != int(label)):
return True
else:
return False |
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
kmer_size = 4
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(features_tra_cdr3)
logits_tra_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_tra_cdr3, features_tra_mask])
logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(features_trb_cdr3)
logits_trb_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_trb_cdr3, features_trb_mask])
logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model |
class CNNModel(Model):
def __init__(self, filters, strides, padding, name=None, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer()):
super().__init__(name)
self._filters = filters
self._strides = strides
self._padding = padding
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
def _build(self, state_input, name=None):
del name
return cnn(input_var=state_input, filters=self._filters, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, strides=self._strides, padding=self._padding, name='cnn') |
_module()
class PascalVOCDataset(CustomDataset):
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
def __init__(self, split, **kwargs):
super(PascalVOCDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
assert (osp.exists(self.img_dir) and (self.split is not None)), (self.img_dir, self.split)
def results2img(self, results, imgfile_prefix, palette=False, indices=None):
if (indices is None):
indices = list(range(len(self)))
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
for (result, idx) in zip(results, indices):
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
output = Image.fromarray(result.astype(np.uint8)).convert('P')
if palette:
output.putpalette(np.array(self.PALETTE))
output.save(png_filename)
result_files.append(png_filename)
return result_files
def format_results(self, results, imgfile_prefix, palette=False, indices=None):
if (indices is None):
indices = list(range(len(self)))
assert isinstance(results, list), 'results must be a list.'
assert isinstance(indices, list), 'indices must be a list.'
result_files = self.results2img(results, imgfile_prefix, palette, indices)
return result_files |
.filterwarnings('ignore::DeprecationWarning')
def test_log() -> None:
configure_logging()
logger.info('Testing') |
def read_png(filename):
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=3)
image.set_shape([None, None, 3])
image = tf.cast(image, tf.float32)
image /= 255
return image |
def resnet_v1(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope=None):
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
with slim.arg_scope([slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if (output_stride is not None):
if ((output_stride % 4) != 0):
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points[(sc.name + '/logits')] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[(sc.name + '/spatial_squeeze')] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return (net, end_points) |
def main(model_args, data_args, training_args):
last_checkpoint = None
if (not model_args.hyper_param_search):
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.linearization_strategy != 'concat'):
tokenizer.add_special_tokens({'cls_token': '[CLS]', 'sep_token': '[SEP]'})
model = None
if model_args.hyper_param_search:
def model_init():
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
return model
else:
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.linearization_strategy != 'concat'):
model.resize_token_embeddings(len(tokenizer))
if (model_args.model_name_or_path == 't5-large'):
print('CUDA device count:', torch.cuda.device_count())
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
datasets = load_dataset(extension, data_files=data_files, field='data')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
if model_args.hyper_param_search:
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_raw_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_raw_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
result = {}
metric = load_metric('sacrebleu')
(decoded_preds, decoded_labels) = postprocess_text(decoded_raw_preds, decoded_raw_labels, 'sacrebleu')
res = metric.compute(predictions=decoded_preds, references=decoded_labels)
result['sacrebleu'] = res['score']
return result
else:
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_raw_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_raw_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
dic_pred_label = {'predictions': decoded_raw_preds, 'labels': decoded_raw_labels}
save_json(dic_pred_label, os.path.join(training_args.output_dir, 'detokenized_outputs.json'))
result = {}
for metric_name in data_args.metric_names:
metric = load_metric(metric_name)
(decoded_preds, decoded_labels) = postprocess_text(decoded_raw_preds, decoded_raw_labels, metric_name)
if (metric_name == 'bertscore'):
res = metric.compute(predictions=decoded_preds, references=decoded_labels, lang='en')
for (k, v) in res.items():
if (k == 'hashcode'):
continue
result[f'{metric_name}_{k}_0'] = round(v[0], 2)
result[f'{metric_name}_{k}_1'] = round(v[1], 2)
else:
res = metric.compute(predictions=decoded_preds, references=decoded_labels)
if (metric_name == 'sacrebleu'):
result[metric_name] = res['score']
elif (metric_name == 'bleurt'):
result[f'{metric_name}_0'] = round(res['scores'][0], 2)
result[f'{metric_name}_1'] = round(res['scores'][1], 2)
else:
result[metric_name] = res[metric_name]
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result
linearize_method = linearization_dic[data_args.linearization_strategy]
(train_dataset, eval_dataset, test_dataset) = preprocess(model_args, data_args, training_args, datasets, tokenizer, linearize_method)
if ((train_dataset is None) and (eval_dataset is None) and (test_dataset is None)):
return
best_run = None
if (model_args.hyper_param_search and training_args.do_train):
trainer = Seq2SeqTrainer(model_init=model_init, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
def my_hp_space_ray(trial):
from ray import tune
return {'learning_rate': tune.loguniform(1e-06, 0.0001), 'num_train_epochs': tune.choice(list(range(6, 30))), 'seed': tune.choice(list(range(1, 41))), 'per_device_train_batch_size': tune.choice([4, 8, 16])}
best_run = trainer.hyperparameter_search(n_trials=10, direction='maximize', checkpoint_freq=500, compute_objective=default_compute_objective, hp_space=my_hp_space_ray)
save_json(best_run.hyperparameters, os.path.join(training_args.output_dir, 'best_run.json'))
for (n, v) in best_run.hyperparameters.items():
setattr(trainer.args, n, v)
exit(0)
else:
if (model is None):
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model.resize_token_embeddings(len(tokenizer))
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
all_metrics = {}
if training_args.do_train:
if (last_checkpoint is not None):
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = (trainer.train(resume_from_checkpoint=checkpoint) if (best_run is None) else trainer.train())
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
if trainer.is_world_process_zero():
metrics_formatted = trainer.metrics_format(metrics)
logger.info('***** train metrics *****')
k_width = max((len(str(x)) for x in metrics_formatted.keys()))
v_width = max((len(str(x)) for x in metrics_formatted.values()))
for key in sorted(metrics_formatted.keys()):
logger.info(f' {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}')
save_json(metrics, os.path.join(training_args.output_dir, 'train_results.json'))
all_metrics.update(metrics)
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json'))
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='eval')
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
if trainer.is_world_process_zero():
metrics_formatted = trainer.metrics_format(metrics)
logger.info('***** val metrics *****')
k_width = max((len(str(x)) for x in metrics_formatted.keys()))
v_width = max((len(str(x)) for x in metrics_formatted.values()))
for key in sorted(metrics_formatted.keys()):
logger.info(f' {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}')
save_json(metrics, os.path.join(training_args.output_dir, 'eval_results.json'))
all_metrics.update(metrics)
if training_args.do_predict:
logger.info('*** Test ***')
test_results = trainer.predict(test_dataset, metric_key_prefix='test', max_length=data_args.val_max_target_length, num_beams=data_args.num_beams)
metrics = test_results.metrics
max_test_samples = (data_args.max_test_samples if (data_args.max_test_samples is not None) else len(test_dataset))
metrics['test_samples'] = min(max_test_samples, len(test_dataset))
if trainer.is_world_process_zero():
metrics_formatted = trainer.metrics_format(metrics)
logger.info('***** test metrics *****')
k_width = max((len(str(x)) for x in metrics_formatted.keys()))
v_width = max((len(str(x)) for x in metrics_formatted.values()))
for key in sorted(metrics_formatted.keys()):
logger.info(f' {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}')
save_json(metrics, os.path.join(training_args.output_dir, 'test_results.json'))
all_metrics.update(metrics)
if training_args.predict_with_generate:
test_preds = tokenizer.batch_decode(test_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
test_preds = [pred.strip() for pred in test_preds]
output_test_preds_file = os.path.join(training_args.output_dir, 'test_preds_seq2seq.txt')
with open(output_test_preds_file, 'w') as writer:
writer.write('\n'.join(test_preds)) |
class Adapter(nn.Module):
def __init__(self, cfg, red_fac=2):
super(Adapter, self).__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder_embed_dim
self.quant_noise = getattr(cfg, 'quant_noise_pq', 0)
self.quant_noise_block_size = (getattr(cfg, 'quant_noise_pq_block_size', 8) or 8)
self.activation_fn = utils.get_activation_fn(activation=(getattr(cfg, 'activation_fn', 'relu') or 'relu'))
self.fc1 = quant_noise(nn.Linear(self.embed_dim, (self.embed_dim // red_fac)), p=self.quant_noise, block_size=self.quant_noise_block_size)
self.fc2 = quant_noise(nn.Linear((self.embed_dim // red_fac), self.embed_dim), p=self.quant_noise, block_size=self.quant_noise_block_size)
activation_dropout_p = (getattr(cfg, 'activation_dropout', 0) or 0)
if (activation_dropout_p == 0):
activation_dropout_p = (getattr(cfg, 'relu_dropout', 0) or 0)
self.activation_dropout_module = FairseqDropout(float(activation_dropout_p), module_name=self.__class__.__name__)
def forward(self, x):
x = self.activation_fn(self.fc1(x))
if ((not hasattr(self.cfg, 'adapter_dropout')) or self.cfg.adapter_dropout):
x = self.activation_dropout_module(x)
x = self.fc2(x)
return x |
def resnet101(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
return ResNet(torchvision.models.resnet101(pretrained, progress, **kwargs)) |
class FGSM(object):
def attack(model, epsilon, x, target):
xn = Point(x.data)
xn.requires_grad_()
model.optimizer.zero_grad()
loss = model.stdLoss(xn, None, target).sum()
loss.backward()
r = (x + Point((epsilon * torch.sign(xn.grad.data))))
model.optimizer.zero_grad()
return r |
class Data(object):
def get_size(self):
raise NotImplementedError()
def get_by_idxs(self, idxs):
data = defaultdict(list)
for idx in idxs:
each_data = self.get_one(idx)
for (key, val) in each_data.items():
data[key].append(val)
return data
def get_one(self, idx):
raise NotImplementedError()
def get_empty(self):
raise NotImplementedError()
def __add__(self, other):
raise NotImplementedError() |
class TFFlaubertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class Optimizers(object):
def __init__(self):
self.optimizers = []
self.lrs = []
def add(self, optimizer, lr):
self.optimizers.append(optimizer)
self.lrs.append(lr)
def step(self):
for optimizer in self.optimizers:
optimizer.step()
def zero_grad(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def __getitem__(self, index):
return self.optimizers[index]
def __setitem__(self, index, value):
self.optimizers[index] = value |
class BartForConditionalGeneration():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class LL2XYProjector():
def __init__(self, lat_origin, lon_origin):
self.lat_origin = lat_origin
self.lon_origin = lon_origin
self.zone = (math.floor(((lon_origin + 180.0) / 6)) + 1)
self.p = pyproj.Proj(proj='utm', ellps='WGS84', zone=self.zone, datum='WGS84')
[self.x_origin, self.y_origin] = self.p(lon_origin, lat_origin)
def latlon2xy(self, lat, lon):
[x, y] = self.p(lon, lat)
return [(x - self.x_origin), (y - self.y_origin)] |
class Lipophilicity(MoleculeCSVDataset):
def __init__(self, smiles_to_graph=smiles_2_dgl, load=False, log_every=1000, cache_file_path='./lipophilicity_dglgraph.bin', n_jobs=1):
self._url = 'dataset/lipophilicity.zip'
data_path = (get_download_dir() + '/lipophilicity.zip')
dir_path = (get_download_dir() + '/lipophilicity')
download(_get_dgl_url(self._url), path=data_path, overwrite=False)
extract_archive(data_path, dir_path)
df = pd.read_csv((dir_path + '/Lipophilicity.csv'))
super(Lipophilicity, self).__init__(df=df, smiles_to_graph=smiles_to_graph, smiles_column='smiles', cache_file_path=cache_file_path, task_names=['exp'], load=load, log_every=log_every, init_mask=False, n_jobs=n_jobs)
self.load_full = False
self.chembl_ids = df['CMPD_CHEMBLID'].tolist()
self.chembl_ids = [self.chembl_ids[i] for i in self.valid_ids]
def __getitem__(self, item):
if self.load_full:
return (self.smiles[item], self.graphs[item], self.labels[item], self.chembl_ids[item])
else:
return (self.smiles[item], self.graphs[item], self.labels[item]) |
class BasicModule(Module):
def __init__(self):
super(BasicModule, self).__init__()
self.model_name = self.__class__.__name__
def save(self, name=None):
prefix = (('checkpoints/' + self.model_name) + '_')
if (name is None):
name = time.strftime((prefix + '%Y%m%d_%H%M%S.pth'), time.localtime())
torch.save(self.state_dict(), name)
return name
def load(self, path):
self.load_state_dict(torch.load(path)) |
class GNActDWConv2d(nn.Module):
def __init__(self, indim, gn_groups=32):
super().__init__()
self.gn = nn.GroupNorm(gn_groups, indim)
self.conv = nn.Conv2d(indim, indim, 5, dilation=1, padding=2, groups=indim, bias=False)
def forward(self, x, size_2d):
(h, w) = size_2d
(_, bs, c) = x.size()
x = x.view(h, w, bs, c).permute(2, 3, 0, 1)
x = self.gn(x)
x = F.gelu(x)
x = self.conv(x)
x = x.view(bs, c, (h * w)).permute(2, 0, 1)
return x |
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp1():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state |
def checkpoint(nets, history, cfg, epoch):
print('Saving checkpoints...')
(net_encoder, net_decoder, crit) = nets
dict_encoder = net_encoder.state_dict()
dict_decoder = net_decoder.state_dict()
torch.save(history, '{}/history_epoch_{}.pth'.format(cfg.DIR, epoch))
torch.save(dict_encoder, '{}/encoder_epoch_{}.pth'.format(cfg.DIR, epoch))
torch.save(dict_decoder, '{}/decoder_epoch_{}.pth'.format(cfg.DIR, epoch)) |
class ClusteredSparseDotProduct(torch.autograd.Function):
dot = {'cpu': clustered_sparse_dot_product_cpu, 'cuda': clustered_sparse_dot_product_cuda}
dot_backward = {'cpu': clustered_sparse_dot_backward_cpu, 'cuda': clustered_sparse_dot_backward_cuda}
def forward(ctx, Q, K, topk, groups, counts, lengths):
ctx.save_for_backward(Q, K, topk, groups, counts)
device = Q.device
(N, H, L, E) = Q.shape
(_, _, C, k) = topk.shape
product = torch.zeros((N, H, L, k), device=device)
if (device.type == 'cpu'):
ClusteredSparseDotProduct.dot[device.type](Q, K, groups, topk, product)
else:
with torch.no_grad():
Q_pb = 16
block_counts = (((counts + Q_pb) - 1) // Q_pb)
block_counts = block_counts.int()
block_counts_cumsum = block_counts.view((- 1)).cumsum((- 1)).view(N, H, C).int()
indx_maps = torch.ones((block_counts.sum(), 4), device=Q.device, dtype=torch.int32)
counts_cumsum = counts.cumsum((- 1)).int()
total_blocks = block_counts.sum().item()
ClusteredSparseDotProduct.dot[device.type](Q, K, topk.int(), (counts_cumsum - counts), counts_cumsum, block_counts, block_counts_cumsum, total_blocks, indx_maps, product)
return product
def backward(ctx, grad_output):
(Q, K, topk, groups, counts) = ctx.saved_tensors
device = Q.device
grad_Q = torch.zeros_like(Q)
grad_K = torch.zeros_like(K)
if (device.type == 'cpu'):
ClusteredSparseDotProduct.dot_backward[Q.device.type](Q, K, groups, topk, grad_output, grad_Q, grad_K)
else:
(N, H, L, E) = Q.shape
(_, _, C, k) = topk.shape
with torch.no_grad():
Q_pb = 16
block_counts = (((counts + Q_pb) - 1) // Q_pb)
block_counts = block_counts.int()
block_counts_cumsum = block_counts.view((- 1)).cumsum((- 1)).view(N, H, C).int()
indx_maps = torch.ones((block_counts.sum(), 4), device=Q.device, dtype=torch.int32)
counts_cumsum = counts.cumsum((- 1)).int()
total_blocks = block_counts.sum().item()
ClusteredSparseDotProduct.dot_backward[Q.device.type](Q, K, groups.int(), topk.int(), grad_output, grad_Q, grad_K, (counts_cumsum - counts), counts_cumsum, block_counts, block_counts_cumsum, total_blocks, indx_maps)
return (grad_Q, grad_K, None, None, None, None, None) |
def load_from_splits(paths, original_test_filename, model_predicted_filename):
sentence_potential_mistake_count = defaultdict(int)
for path in paths:
original_test = os.path.join(path, original_test_filename)
model_predicted = os.path.join(path, model_predicted_filename)
assert os.path.exists(original_test)
assert os.path.exists(model_predicted)
original_test = load_dataset_from_column(original_test)
model_predicted = load_dataset_from_column(model_predicted, schema='none')
for ((original_sentence, original_labels), (model_sentence, model_labels)) in zip(original_test, model_predicted):
assert (' '.join(original_sentence) == ' '.join(model_sentence))
if (' '.join(original_labels) != ' '.join(model_labels)):
sentence_potential_mistake_count[' '.join(original_sentence)] += 1
return sentence_potential_mistake_count |
class BridgeTowerForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_diaghom(precision='d'):
from phcpy.sets import witness_set_of_hypersurface
hyp1 = 'x1*x2;'
hyp2 = 'x1 - x2;'
(w1sys, w1sols) = witness_set_of_hypersurface(2, hyp1, precision)
print('the witness sets for', hyp1)
for pol in w1sys:
print(pol)
for sol in w1sols:
print(sol)
(w2sys, w2sols) = witness_set_of_hypersurface(2, hyp2, precision)
print('the witness sets for', hyp2)
for pol in w2sys:
print(pol)
for sol in w2sols:
print(sol)
(sys, sols) = diagonal_solver(2, 1, w1sys, w1sols, 1, w2sys, w2sols, 0, precision)
print('the end system :')
for pol in sys:
print(pol)
print('the solutions of the diagonal solver :')
for sol in sols:
print(sol) |
def _create_dummy_line_str_file(ann_file):
ann_info1 = 'sample1.jpg hello'
ann_info2 = 'sample2.jpg world'
with open(ann_file, 'w') as fw:
for ann_info in [ann_info1, ann_info2]:
fw.write((ann_info + '\n')) |
class DukeMTMCreID(BaseImageDataset):
dataset_dir = 'dukemtmcreid'
def __init__(self, root='', verbose=True, pid_begin=0, **kwargs):
super(DukeMTMCreID, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = '
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self.pid_begin = pid_begin
self._download_data()
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> DukeMTMC-reID loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams, self.num_train_vids) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams, self.num_query_vids) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams, self.num_gallery_vids) = self.get_imagedata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print('This dataset has been downloaded.')
return
print('Creating directory {}'.format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print('Downloading DukeMTMC-reID dataset')
urllib.request.urlretrieve(self.dataset_url, fpath)
print('Extracting files')
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
cam_container = set()
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
assert (1 <= camid <= 8)
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, (self.pid_begin + pid), camid, 1))
cam_container.add(camid)
print(cam_container, 'cam_container')
return dataset |
class LRASPP(nn.Module):
def __init__(self, backbone, low_channels, high_channels, num_classes, inter_channels=128):
super().__init__()
self.backbone = backbone
self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels)
def forward(self, input):
features = self.backbone(input)
out = self.classifier(features)
out = F.interpolate(out, size=input.shape[(- 2):], mode='bilinear', align_corners=False)
result = OrderedDict()
result['out'] = out
return result |
def _unbroadcast(x, shape):
extra_dims = (x.ndim - len(shape))
assert (extra_dims >= 0)
dim = [i for i in range(x.ndim) if ((x.shape[i] > 1) and ((i < extra_dims) or (shape[(i - extra_dims)] == 1)))]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape((- 1), *x.shape[(extra_dims + 1):])
assert (x.shape == shape)
return x |
def ssd_print(key, value=None, stack_offset=1, deferred=False, extra_print=True, prefix=''):
return _mlperf_print(key=key, value=value, benchmark=SSD, stack_offset=stack_offset, tag_set=SSD_TAG_SET, deferred=deferred, extra_print=extra_print, root_dir=ROOT_DIR_SSD, prefix=prefix) |
def _unescape_token(token):
def match(m):
.\n\n If m.group(1) exists, then use the integer in m.group(1) to return a\n unicode character.\n\n Args:\n m: match object\n\n Returns:\n String to replace matched object with.\n '
if (m.group(1) is None):
return (u'_' if (m.group(0) == u'\\u') else u'\\')
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return _UNDEFINED_UNICODE
return _UNESCAPE_REGEX.sub(match, token) |
def combine(video_name_split_path, video_duration_path, save_path):
video_name_split = load_json(video_name_split_path)
video_duration_dict = load_json(video_duration_path)
combined_dict = {}
for (split_name, split_video_names) in video_name_split.items():
combined_dict[split_name] = {vid_name: video_duration_dict[vid_name] for vid_name in split_video_names}
save_json(combined_dict, save_path) |
class ContextGuidedBlock(nn.Module):
def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False, residual=True, norm_layer=nn.BatchNorm2d):
super(ContextGuidedBlock, self).__init__()
inter_channels = ((out_channels // 2) if (not down) else out_channels)
if down:
self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer)
self.reduce = nn.Conv2d((inter_channels * 2), out_channels, 1, bias=False)
else:
self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer)
self.f_loc = _ChannelWiseConv(inter_channels, inter_channels)
self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation)
self.bn = norm_layer((inter_channels * 2))
self.prelu = nn.PReLU((inter_channels * 2))
self.f_glo = _FGlo(out_channels, reduction)
self.down = down
self.residual = residual
def forward(self, x):
out = self.conv(x)
loc = self.f_loc(out)
sur = self.f_sur(out)
joi_feat = torch.cat([loc, sur], dim=1)
joi_feat = self.prelu(self.bn(joi_feat))
if self.down:
joi_feat = self.reduce(joi_feat)
out = self.f_glo(joi_feat)
if self.residual:
out = (out + x)
return out |
def count_conv2d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
(kh, kw) = m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
kernel_ops = ((multiply_adds * kh) * kw)
bias_ops = (1 if (m.bias is not None) else 0)
ops_per_element = (kernel_ops + bias_ops)
output_elements = (((batch_size * out_w) * out_h) * cout)
total_ops = (((output_elements * ops_per_element) * cin) // m.groups)
m.total_ops = torch.Tensor([int(total_ops)]) |
class CollectTestLoss(MultipleRunBase):
MultipleRun_params = luigi.DictParameter()
score_name = luigi.Parameter(default='Test loss')
def obj_task(self, **kwargs):
return PerformanceEvaluation(**kwargs) |
def _write_config(config, config_path):
config_text = text_format.MessageToString(config)
with tf.gfile.Open(config_path, 'wb') as f:
f.write(config_text) |
class DiscreteActionSpace(ActionSpace):
def __init__(self, num):
super(DiscreteActionSpace, self).__init__()
self.num = num
def sample(self):
return self.rng.randint(self.num)
def num_actions(self):
return self.num
def __repr__(self):
return 'DiscreteActionSpace({})'.format(self.num)
def __str__(self):
return 'DiscreteActionSpace({})'.format(self.num) |
def atom_features(atom):
return torch.Tensor((((onek_encoding_unk(atom.GetSymbol(), ELEM_LIST) + onek_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5])) + onek_encoding_unk(atom.GetFormalCharge(), [(- 1), (- 2), 1, 2, 0])) + [atom.GetIsAromatic()])) |
def main():
list_mod = ['Nsite', 'Lanczos_max', 'NumAve', 'ExpecInterval']
dict_mod = func_mod('modpara.def', list_mod)
max_set = int(dict_mod['NumAve'])
max_eigen = int(dict_mod['Lanczos_max'])
Ref_ave_Temp = np.zeros([max_eigen], dtype=np.float64)
Ref_err_Temp = np.zeros([max_eigen], dtype=np.float64)
Ref_ave_Ene = np.zeros([max_eigen], dtype=np.float64)
Ref_err_Ene = np.zeros([max_eigen], dtype=np.float64)
ave_Temp = np.zeros([max_eigen], dtype=np.float64)
err_Temp = np.zeros([max_eigen], dtype=np.float64)
InvTemp = np.zeros([max_set, max_eigen], dtype=np.float64)
ave_InvTemp = np.zeros([max_eigen], dtype=np.float64)
err_InvTemp = np.zeros([max_eigen], dtype=np.float64)
Ene = np.zeros([max_set, max_eigen], dtype=np.float64)
ave_Ene = np.zeros([max_eigen], dtype=np.float64)
err_Ene = np.zeros([max_eigen], dtype=np.float64)
Ene2 = np.zeros([max_set, max_eigen], dtype=np.float64)
ave_Ene2 = np.zeros([max_eigen], dtype=np.float64)
err_Ene2 = np.zeros([max_eigen], dtype=np.float64)
Spc = np.zeros([max_set, max_eigen], dtype=np.float64)
ave_Spc = np.zeros([max_eigen], dtype=np.float64)
err_Spc = np.zeros([max_eigen], dtype=np.float64)
Ent = np.zeros([max_set, (max_eigen - 1)], dtype=np.float64)
ave_Ent = np.zeros([(max_eigen - 1)], dtype=np.float64)
err_Ent = np.zeros([(max_eigen - 1)], dtype=np.float64)
for cnt_set in range(0, max_set):
with open(('output/SS_rand%d.dat' % cnt_set)) as f:
data = f.read()
data = data.split('\n')
for i in range(1, len(data)):
tmp_i = (i - 1)
tmp = data[i].split()
if (len(tmp) > 1):
InvTemp[cnt_set][tmp_i] = float(tmp[0])
Ene[cnt_set][tmp_i] = float(tmp[1])
Ene2[cnt_set][tmp_i] = float(tmp[2])
Spc[cnt_set][tmp_i] = (((float(tmp[2]) - (float(tmp[1]) ** 2)) * float(tmp[0])) * float(tmp[0]))
ave_InvTemp = np.mean(InvTemp, axis=0)
err_InvTemp = np.std(InvTemp, axis=0, ddof=1)
ave_Ene = np.mean(Ene, axis=0)
err_Ene = np.std(Ene, axis=0, ddof=1)
ave_Spc = np.mean(Spc, axis=0)
err_Spc = np.std(Spc, axis=0, ddof=1)
for cnt_set in range(0, max_set):
tmp_Ent = 0.0
for i in range(0, (max_eigen - 1)):
tmp_Ent += (Spc[cnt_set][i] * (1.0 - (InvTemp[cnt_set][i] / InvTemp[cnt_set][(i + 1)])))
Ent[cnt_set][i] = tmp_Ent
ave_Ent = np.mean(Ent, axis=0)
err_Ent = np.std(Ent, axis=0, ddof=1)
with open('ave_tpq.dat', 'w') as f:
for cnt in range(1, (max_eigen - 1)):
temp = (1.0 / ave_InvTemp[cnt])
temp_err = (err_InvTemp[cnt] / (ave_InvTemp[cnt] ** 2))
ave_Temp[cnt] = temp
err_Temp[cnt] = temp_err
print((' %.16f ' % temp), end='', file=f)
print((' %.16f ' % temp_err), end='', file=f)
print((' %.16f ' % ave_Ene[cnt]), end='', file=f)
print((' %.16f ' % err_Ene[cnt]), end='', file=f)
print((' %.16f ' % ave_Spc[cnt]), end='', file=f)
print((' %.16f ' % err_Spc[cnt]), end='', file=f)
print((' %.16f ' % ave_Ent[cnt]), end='', file=f)
print((' %.16f ' % err_Ent[cnt]), end='', file=f)
print(' ', file=f)
with open('reference.dat') as f:
data = f.read()
data = data.split('\n')
for i in range(0, len(data)):
tmp_i = (i + 1)
tmp = data[i].split()
if (len(tmp) > 1):
Ref_ave_Temp[tmp_i] = float(tmp[0])
Ref_err_Temp[tmp_i] = float(tmp[1])
Ref_ave_Ene[tmp_i] = float(tmp[2])
Ref_err_Ene[tmp_i] = float(tmp[3])
result = 0
for cnt in range(1, (max_eigen - 2)):
diff_temp = abs((Ref_ave_Temp[cnt] - ave_Temp[cnt]))
diff_ene = abs((Ref_ave_Ene[cnt] - ave_Ene[cnt]))
if (diff_temp > max((2 * Ref_err_Temp[cnt]), 1e-08)):
result = (- 1)
print('fatatl error in temp ')
print(cnt, Ref_ave_Temp[cnt], ave_Temp[cnt], Ref_err_Temp[cnt], Ref_ave_Ene[cnt], ave_Ene[cnt], Ref_err_Ene[cnt])
if (diff_ene > max((2 * Ref_err_Ene[cnt]), 1e-08)):
result = (- 1)
print('fatatl error in ene ')
print(cnt, Ref_ave_Temp[cnt], ave_Temp[cnt], Ref_err_Temp[cnt], Ref_ave_Ene[cnt], ave_Ene[cnt], Ref_err_Ene[cnt])
sys.exit(result) |
_experiment
def dqn_cartpole(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = ((n_epochs * steps_per_epoch) * sampler_batch_size)
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(10000.0))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1)
algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=exploration_policy, replay_buffer=replay_buffer, steps_per_epoch=steps_per_epoch, qf_lr=0.0001, discount=1.0, min_buffer_size=int(1000.0), double_q=True, n_train_steps=500, target_network_update_freq=1, buffer_batch_size=32)
runner.setup(algo, env)
runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size) |
def test_octree_OctreeNodeInfo():
origin = [0, 0, 0]
size = 2.0
depth = 5
child_index = 7
node_info = o3d.geometry.OctreeNodeInfo(origin, size, depth, child_index)
np.testing.assert_equal(node_info.origin, origin)
np.testing.assert_equal(node_info.size, size)
np.testing.assert_equal(node_info.depth, depth)
np.testing.assert_equal(node_info.child_index, child_index) |
def sparsenet161(**kwargs):
return get_sparsenet(num_layers=161, model_name='sparsenet161', **kwargs) |
.mujoco
.no_cover
.timeout(60)
def test_mtppo_metaworld_ml1_push():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'torch/mtppo_metaworld_ml1_push.py')), '--epochs', '1', '--batch_size', '1'], check=False).returncode == 0) |
def test_sql_observer_failed_event_updates_run(sql_obs, sample_run, session):
sql_obs.started_event(**sample_run)
fail_trace = ['lots of errors and', 'so', 'on...']
sql_obs.failed_event(fail_time=T2, fail_trace=fail_trace)
assert (session.query(Run).count() == 1)
db_run = session.query(Run).first()
assert (db_run.stop_time == T2)
assert (db_run.status == 'FAILED')
assert (db_run.fail_trace == 'lots of errors and\nso\non...') |
def readme():
with open('README_mmdet.md', encoding='utf-8') as f:
content = f.read()
return content |
class PathwayTypes(object):
NORM = 0
SUBS = 1
FC = 2
def pTypes(self):
return [self.NORM, self.SUBS, self.FC] |
def mixing_noise(batch, latent_dim, prob, device):
if ((prob > 0) and (random.random() < prob)):
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)] |
def create_datasets(data_dir: str, dest_dir: str):
try:
assert os.path.exists(data_dir)
except AssertionError:
raise Exception(f'[create_datasets] ERROR: DATA_DIR {data_dir} MUST EXIST')
if (not os.path.exists(dest_dir)):
os.makedirs(dest_dir)
dataset_creators = [ProofStepClassificationDatasetCreator(open(os.path.join(dest_dir, 'proof_step_classification.json'), 'w')), PremiseClassificationDatasetCreator(open(os.path.join(dest_dir, 'premise_classification.json'), 'w')), TheoremNamePredictionDatasetCreator(open(os.path.join(dest_dir, 'theorem_name_prediction.json'), 'w')), NextLemmaPredictionDatasetCreator(open(os.path.join(dest_dir, 'next_lemma_prediction.json'), 'w')), ProofTermPredictionDatasetCreator(open(os.path.join(dest_dir, 'proof_term_prediction.json'), 'w')), SkipProofDatasetCreator(open(os.path.join(dest_dir, 'skip_proof.json'), 'w')), TypePredictionDatasetCreator(open(os.path.join(dest_dir, 'type_prediction.json'), 'w')), TSElabDatasetCreator(open(os.path.join(dest_dir, 'ts_elab.json'), 'w')), ProofTermElabDatasetCreator(open(os.path.join(dest_dir, 'proof_term_elab.json'), 'w')), ResultElabDatasetCreator(open(os.path.join(dest_dir, 'result_elab.json'), 'w'))]
if (not os.path.exists(dest_dir)):
os.makedirs(dest_dir)
json_files = files_with_extension(data_dir, 'json')
print('JSON FILES: ', json_files)
for json_file in tqdm(json_files):
line = 0
try:
with open(json_file, 'r') as json_file_handle:
for json_line in json_file_handle:
line += 1
try:
dp = json.loads(json_line)
for dc in dataset_creators:
dc.process_dp(dp)
except Exception as e:
print(f'BAD LINE IN FILE: {json_file} EXCEPTION: {e}')
except Exception as e:
print(f'BAD FILE: {json_file} LINE: {line}: EXCEPTION: {e}') |
class HuffmanCodeBuilder():
def __init__(self):
self.symbols = Counter()
def add_symbols(self, *syms) -> None:
self.symbols.update(syms)
def increment(self, symbol: str, cnt: int) -> None:
self.symbols[symbol] += cnt
def from_file(cls, filename):
c = cls()
with open(filename, 'r', encoding='utf-8') as input:
for line in input:
split = re.split('[\\s]+', line)
c.increment(split[0], int(split[1]))
return c
def to_file(self, filename, sep='\t'):
with open(filename, 'w', encoding='utf-8') as output:
for (tok, cnt) in self.symbols.most_common():
output.write(f'''{tok}{sep}{cnt}
''')
def _smallest(self, q1: deque, q2: deque) -> HuffmanNode:
if (len(q1) == 0):
return q2.pop()
if (len(q2) == 0):
return q1.pop()
if (q1[(- 1)].count < q2[(- 1)].count):
return q1.pop()
return q2.pop()
def __add__(self, c: 'HuffmanCodeBuilder') -> 'HuffmanCodeBuilder':
new_c = (self.symbols + c.symbols)
new_b = HuffmanCodeBuilder()
new_b.symbols = new_c
return new_b
def build_code(self, bos='<s>', pad='<pad>', eos='</s>', unk='<unk>') -> HuffmanCoder:
assert (len(self.symbols) > 0), 'cannot build code from empty list of symbols'
if (self.symbols[bos] == 0):
self.add_symbols(bos)
if (self.symbols[pad] == 0):
self.add_symbols(pad)
if (self.symbols[eos] == 0):
self.add_symbols(eos)
if (self.symbols[unk] == 0):
self.add_symbols(unk)
node_id = 0
leaves_queue = deque([HuffmanNode(symbol=symbol, count=count, id=idx) for (idx, (symbol, count)) in enumerate(self.symbols.most_common())])
if (len(leaves_queue) == 1):
root = leaves_queue.pop()
root.id = 0
return HuffmanCoder(root)
nodes_queue = deque()
while ((len(leaves_queue) > 0) or (len(nodes_queue) != 1)):
node1 = self._smallest(leaves_queue, nodes_queue)
node2 = self._smallest(leaves_queue, nodes_queue)
nodes_queue.appendleft(HuffmanNode(count=(node1.count + node2.count), left=node1, right=node2, id=node_id))
node_id += 1
return HuffmanCoder(nodes_queue.pop(), bos=bos, pad=pad, eos=eos, unk=unk) |
class SentencePieceUnigramTokenizer(BaseTokenizer):
def __init__(self, replacement: str='', add_prefix_space: bool=True, unk_token: Union[(str, AddedToken)]='<unk>', eos_token: Union[(str, AddedToken)]='</s>', pad_token: Union[(str, AddedToken)]='<pad>'):
self.special_tokens = {'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}}
self.special_tokens_list = ([None] * len(self.special_tokens))
for token_dict in self.special_tokens.values():
self.special_tokens_list[token_dict['id']] = token_dict['token']
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence([normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}'), ' '), normalizers.Lowercase()])
tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space), pre_tokenizers.Digits(individual_digits=True), pre_tokenizers.Punctuation()])
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.post_processor = TemplateProcessing(single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])])
parameters = {'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space}
super().__init__(tokenizer, parameters)
def train(self, files: Union[(str, List[str])], vocab_size: int=8000, show_progress: bool=True):
trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
self.add_unk_id()
def train_from_iterator(self, iterator: Union[(Iterator[str], Iterator[Iterator[str]])], vocab_size: int=8000, show_progress: bool=True):
trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=self.special_tokens_list, show_progress=show_progress)
self._tokenizer.train_from_iterator(iterator, trainer=trainer)
self.add_unk_id()
def add_unk_id(self):
tokenizer_json = json.loads(self._tokenizer.to_str())
tokenizer_json['model']['unk_id'] = self.special_tokens['unk']['id']
self._tokenizer = Tokenizer.from_str(json.dumps(tokenizer_json)) |
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert ('NoFrameskip' in env.spec.id)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if (max_episode_steps is not None):
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env |
def _f_lcs(llcs, m, n):
r_lcs = (llcs / m)
p_lcs = (llcs / n)
beta = (p_lcs / (r_lcs + 1e-12))
num = (((1 + (beta ** 2)) * r_lcs) * p_lcs)
denom = (r_lcs + ((beta ** 2) * p_lcs))
f_lcs = (num / (denom + 1e-12))
return f_lcs |
def train_transform():
transform_list = [transforms.Resize(size=(512, 512)), transforms.RandomCrop(256), transforms.ToTensor()]
return transforms.Compose(transform_list) |
def plot_augmentations(y, sr, time_shift=3000, pitch_shift=12, time_stretch=1.3):
augmentations = {'Original': y, 'Timeshift left': y[time_shift:], 'Timeshift right': numpy.concatenate([numpy.zeros(time_shift), y[:(- time_shift)]]), 'Timestretch faster': librosa.effects.time_stretch(y, time_stretch), 'Timestretch slower': librosa.effects.time_stretch(y, (1 / time_stretch)), 'Pitchshift up': librosa.effects.pitch_shift(y, sr, pitch_shift), 'Pitchshift down': librosa.effects.pitch_shift(y, sr, (- pitch_shift))}
layout = [['Original', 'Original', 'Original'], ['Timeshift right', 'Timestretch faster', 'Pitchshift up'], ['Timeshift left', 'Timestretch slower', 'Pitchshift down']]
shape = numpy.array(layout).shape
(fig, axs) = plt.subplots(shape[0], shape[1], figsize=(16, 6), sharex=True)
for row in range(shape[0]):
for col in range(shape[1]):
description = layout[row][col]
ax = axs[row][col]
data = augmentations[description]
S = numpy.abs(librosa.stft(data))
S = scipy.ndimage.filters.gaussian_filter(S, 0.7)
S = librosa.amplitude_to_db(S, ref=numpy.max)
S -= S.mean()
librosa.display.specshow(S, ax=ax, sr=sr, y_axis='hz')
ax.set_ylim(0, 5000)
ax.set_title(description)
return fig |
def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None):
img_str = ''
if (image_logs is not None):
for (i, log) in enumerate(image_logs):
images = log['images']
validation_prompt = log['validation_prompt']
validation_image = log['validation_image']
validation_image.save(os.path.join(repo_folder, 'image_control.png'))
img_str += f'''prompt: {validation_prompt}
'''
images = ([validation_image] + images)
make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f'images_{i}.png'))
img_str += f'''
'''
yaml = f'''
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- controlnet
- jax-diffusers-event
inference: true
---
'''
model_card = f'''
# controlnet- {repo_id}
These are controlnet weights trained on {base_model} with new type of conditioning. You can find some example images in the following.
{img_str}
'''
with open(os.path.join(repo_folder, 'README.md'), 'w') as f:
f.write((yaml + model_card)) |
class TestPointAssigner(unittest.TestCase):
def test_point_assigner(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_instances.labels = torch.LongTensor([0, 1])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_gt(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0) |
def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1):
return [(f'{module_name}_{postfix}/conv', nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False)), (f'{module_name}_{postfix}/norm', get_norm(_NORM, out_channels)), (f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True))] |
def create_feedforward_V_function(observation_shape, *args, observation_preprocessor=None, name='feedforward_V', **kwargs):
input_shapes = (observation_shape,)
preprocessors = (observation_preprocessor, None)
return feedforward_model(input_shapes, *args, output_size=1, preprocessors=preprocessors, **kwargs) |
def get_tasks(task_names):
task_names = task_names.split(',')
if ('all' in task_names):
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert (task_name in TASKS), ('Task %s not found!' % task_name)
tasks.append(task_name)
return tasks |
class ProbModel(torch.nn.Module):
def __init__(self, model):
super(ProbModel, self).__init__()
self.model = model
def forward(self, x):
x = self.model(x)
x = torch.softmax(x, 1)
return x |
def remove_small_elements(segmentation_mask: np.ndarray, min_size: int=1000) -> np.ndarray:
pred_mask = (segmentation_mask > 0)
mask = remove_small_objects(pred_mask, min_size=min_size)
clean_segmentation = (segmentation_mask * mask)
return clean_segmentation |
def conv_layer(ni, nf, ks=3, stride=1, zero_bn=False, act=True):
bn = nn.BatchNorm2d(nf)
nn.init.constant_(bn.weight, (0.0 if zero_bn else 1.0))
layers = [conv(ni, nf, ks, stride=stride), bn]
if act:
layers.append(act_fn)
return nn.Sequential(*layers) |
class DynamicMTGATPruneModel(nn.Module):
def __init__(self, config, concat=True, num_gat_layers=1, use_pe=False):
super(DynamicMTGATPruneModel, self).__init__()
self.config = config
self.use_pe = use_pe
if concat:
gat_out_channel = int((self.config['graph_conv_out_dim'] / self.config['gat_conv_num_heads']))
else:
gat_out_channel = int(self.config['graph_conv_out_dim'])
self.gats = nn.ModuleList([])
for l in range(num_gat_layers):
if (self.config['time_aware_edges'] and self.config['type_aware_edges']):
num_edge_types = 27
if (self.config['time_aware_edges'] and (not self.config['type_aware_edges'])):
num_edge_types = 3
if ((not self.config['time_aware_edges']) and self.config['type_aware_edges']):
num_edge_types = 9
if ((not self.config['time_aware_edges']) and (not self.config['type_aware_edges'])):
num_edge_types = 1
self.gats.append(MTGATConv(in_channels=self.config['graph_conv_in_dim'], num_node_types=3, num_edge_types=num_edge_types, out_channels=gat_out_channel, heads=self.config['gat_conv_num_heads'], dropout=self.config['gnn_dropout'], concat=concat))
if (l in range(0, num_gat_layers)):
if (self.config['prune_type'] == 'topk'):
self.gats.append(TopKEdgePooling(min_score=None, percentage=self.config['prune_keep_p']))
elif (self.config['prune_type'] == 'random'):
self.gats.append(RandomEdgePooling(percentage=self.config['prune_keep_p']))
else:
raise NotImplementedError
if use_pe:
self.vision_pe = PositionalEncoding(d_model=self.config['graph_conv_in_dim'])
if self.config['use_conv1d']:
self.vision_conv = nn.Sequential(nn.Conv1d(in_channels=self.config['vision_dim'], out_channels=self.config['graph_conv_in_dim'], kernel_size=3), nn.ReLU())
vision_fc_in_dim = self.config['graph_conv_in_dim']
else:
vision_fc_in_dim = self.config['vision_dim']
if self.config['use_ffn']:
self.vision_fc = nn.Sequential(nn.Linear(in_features=vision_fc_in_dim, out_features=self.config['graph_conv_in_dim']), nn.ReLU(), nn.Linear(in_features=self.config['graph_conv_in_dim'], out_features=self.config['graph_conv_in_dim']), nn.ReLU())
else:
self.vision_fc = nn.Sequential(nn.Linear(in_features=vision_fc_in_dim, out_features=self.config['graph_conv_in_dim']))
if use_pe:
self.text_pe = PositionalEncoding(d_model=self.config['graph_conv_in_dim'])
if self.config['use_ffn']:
self.text_fc = nn.Sequential(nn.Linear(in_features=self.config['text_dim'], out_features=self.config['graph_conv_in_dim']), nn.ReLU(), nn.Linear(in_features=self.config['graph_conv_in_dim'], out_features=self.config['graph_conv_in_dim']), nn.ReLU())
else:
self.text_fc = nn.Sequential(nn.Linear(in_features=self.config['text_dim'], out_features=self.config['graph_conv_in_dim']))
if use_pe:
self.audio_pe = PositionalEncoding(d_model=self.config['graph_conv_in_dim'])
if self.config['use_ffn']:
self.audio_fc = nn.Sequential(nn.Linear(in_features=self.config['audio_dim'], out_features=self.config['graph_conv_in_dim']), nn.ReLU(), nn.Linear(in_features=self.config['graph_conv_in_dim'], out_features=self.config['graph_conv_in_dim']), nn.ReLU())
else:
self.audio_fc = nn.Sequential(nn.Linear(in_features=self.config['audio_dim'], out_features=self.config['graph_conv_in_dim']))
if self.config['graph_activation']:
if (self.config['graph_activation'] == 'lrelu'):
self.activation = nn.LeakyReLU(negative_slope=0.1)
elif (self.config['graph_activation'] == 'gelu'):
self.activation = nn.GELU()
def data_to_graph_nodes(self, **kwargs):
vision = kwargs.pop('vision', None)
text = kwargs.pop('text', None)
audio = kwargs.pop('audio', None)
if (vision is not None):
if self.config['use_conv1d']:
vision = torch.cat((vision, torch.zeros((vision.shape[0], 2, vision.shape[2])).to(device)), dim=1)
vision = self.vision_conv(vision.permute(0, 2, 1).contiguous())
vision = vision.permute(0, 2, 1).contiguous()
vision = self.vision_fc(vision)
if self.use_pe:
vision = self.vision_pe(vision.permute(1, 0, 2).contiguous())
vision = vision.permute(1, 0, 2).contiguous()
if (text is not None):
text = self.text_fc(text)
if self.use_pe:
text = self.text_pe(text.permute(1, 0, 2).contiguous())
text = text.permute(1, 0, 2).contiguous()
if (audio is not None):
audio = self.audio_fc(audio)
if self.use_pe:
audio = self.audio_pe(audio.permute(1, 0, 2).contiguous())
audio = audio.permute(1, 0, 2).contiguous()
return (vision, text, audio)
def sequential_process(self, **kwargs):
vision = kwargs.pop('vision', None)
text = kwargs.pop('text', None)
audio = kwargs.pop('audio', None)
processed_feat_dict = {}
if (vision is not None):
processed_feat_dict['vision'] = vision
if (text is not None):
processed_feat_dict['text'] = text
if (audio is not None):
processed_feat_dict['audio'] = audio
return processed_feat_dict
def forward(self, vision, text, audio, v_mask, t_mask, a_mask):
(vision, text, audio) = self.data_to_graph_nodes(vision=vision, text=text, audio=audio)
(batch_x, batch_x_type, edge_index_list, batch_edge_types) = construct_time_aware_dynamic_graph(vision, text, audio, v_mask, t_mask, a_mask, all_to_all=self.config['use_all_to_all'], time_aware=self.config['time_aware_edges'], type_aware=self.config['type_aware_edges'])
assert (vision.shape[0] == text.shape[0] == audio.shape[0]), 'Batch sizes must be the same!'
batch_size = vision.shape[0]
try:
l = [gData(x=batch_x[i], edge_index=edge_index_list[i], x_type=batch_x_type[i], edge_type=batch_edge_types[i]) for i in range(batch_size)]
except:
import ipdb
ipdb.set_trace()
batch = Batch.from_data_list(l)
context_summ = batch.x
if self.config['return_layer_outputs']:
(nodes_rec, edge_indices_rec, edge_weights_rec, edge_types_rec) = ([context_summ], [batch.edge_index], [None], [batch.edge_type])
for module in self.gats:
if (type(module) == MTGATConv):
try:
(gat_output, (ei, e_weights)) = module(context_summ, edge_index=batch.edge_index, x_type=batch.x_type, edge_type=batch.edge_type, return_attention_weights=True)
if self.config['use_residual']:
context_summ = (context_summ + gat_output)
else:
context_summ = gat_output
except:
print(traceback.print_exc())
ipdb.set_trace()
if self.config['graph_activation']:
context_summ = self.activation(context_summ)
elif (type(module) == TopKPooling):
context_summ = module(context_summ, edge_index=batch.edge_index)
elif (type(module) == TopKEdgePooling):
(ei, e_weights, kept_index) = module(ei, e_weights, return_kept_index=True)
batch.edge_index = ei
batch.edge_type = batch.edge_type[kept_index]
if self.config['return_layer_outputs']:
nodes_rec.append(context_summ)
edge_indices_rec.append(ei)
edge_weights_rec.append(e_weights)
edge_types_rec.append(batch.edge_type)
elif (type(module) == RandomEdgePooling):
(ei, e_weights, kept_index) = module(ei, e_weights, return_kept_index=True)
batch.edge_index = ei
batch.edge_type = batch.edge_type[kept_index]
if self.config['return_layer_outputs']:
nodes_rec.append(context_summ)
edge_indices_rec.append(ei)
edge_weights_rec.append(e_weights)
edge_types_rec.append(batch.edge_type)
shapes = [bx.shape[0] for bx in batch_x]
if self.config['remove_isolated']:
(_, _, mask0) = pyg.utils.isolated.remove_isolated_nodes(batch.edge_index)
mask = (torch.zeros(batch.x.shape[0]).to(device) == 0)
mask[:mask0.shape[0]] = mask0
else:
mask = (torch.ones(batch.x.shape[0]) == 1)
node_features = []
offset = 0
for (i, s) in enumerate(shapes):
mask_s = mask[offset:(offset + s)]
node_features.append(context_summ[offset:(offset + s)][mask_s])
offset += s
if self.config['return_layer_outputs']:
return (node_features, batch, nodes_rec, edge_indices_rec, edge_weights_rec, edge_types_rec)
return node_features |
def create_dummy_func(func, dependency, message=''):
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func)
if message:
err = ((err + ' ') + message)
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError(err)
return _dummy |
def load_data(folder, domain):
from scipy import io
data = io.loadmat(os.path.join(folder, (domain + '_fc6.mat')))
return (data['fts'], data['labels']) |
def f2_score_multi(y_true, y_pred, average):
return fbeta_score(y_true, y_pred, average=average, beta=2) |
class GaussianDropout(Layer):
def __init__(self, rate, bigdl_type='float'):
super(GaussianDropout, self).__init__(None, bigdl_type, rate) |
class _StemBlock(nn.Module):
def __init__(self, num_input_channels, num_init_features):
super(_StemBlock, self).__init__()
num_stem_features = int((num_init_features / 2))
self.stem1 = BasicConv2d(num_input_channels, num_init_features, kernel_size=3, stride=2, padding=1)
self.stem2a = BasicConv2d(num_init_features, num_stem_features, kernel_size=1, stride=1, padding=0)
self.stem2b = BasicConv2d(num_stem_features, num_init_features, kernel_size=3, stride=2, padding=1)
self.stem3 = BasicConv2d((2 * num_init_features), num_init_features, kernel_size=1, stride=1, padding=0)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
out = self.stem1(x)
branch2 = self.stem2a(out)
branch2 = self.stem2b(branch2)
branch1 = self.pool(out)
out = torch.cat([branch1, branch2], 1)
out = self.stem3(out)
return out |
def test(cfg_file, ckpt: str, output_path: str=None, datasets: dict=None, save_attention: bool=False, save_scores: bool=False) -> None:
cfg = load_config(Path(cfg_file))
(model_dir, load_model, device, n_gpu, num_workers, normalization, fp16) = parse_train_args(cfg['training'], mode='prediction')
if (len(logger.handlers) == 0):
pkg_version = make_logger(model_dir, mode='test')
if ('joeynmt_version' in cfg):
check_version(pkg_version, cfg['joeynmt_version'])
if (datasets is None):
(src_vocab, trg_vocab, _, dev_data, test_data) = load_data(data_cfg=cfg['data'], datasets=['dev', 'test'])
data_to_predict = {'dev': dev_data, 'test': test_data}
else:
data_to_predict = {'dev': datasets['dev'], 'test': datasets['test']}
src_vocab = datasets['src_vocab']
trg_vocab = datasets['trg_vocab']
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
if save_attention:
if (cfg['model']['decoder']['type'] == 'transformer'):
assert (cfg['testing'].get('beam_size', 1) == 1), 'Attention plots can be saved with greedy decoding only. Please set `beam_size: 1` in the config.'
cfg['testing']['return_attention'] = True
return_prob = cfg['testing'].get('return_prob', 'none')
if save_scores:
assert output_path, 'Please specify --output_path for saving scores.'
if (return_prob == 'none'):
logger.warning('Please specify prob type: {`ref` or `hyp`} in the config. Scores will not be saved.')
save_scores = False
elif (return_prob == 'ref'):
assert (cfg['testing'].get('beam_size', 1) == 1), 'Scores of given references can be computed with greedy decoding only.Please set `beam_size: 1` in the config.'
model.loss_function = (cfg['training'].get('loss', 'crossentropy'), cfg['training'].get('label_smoothing', 0.1))
load_model = (load_model if (ckpt is None) else Path(ckpt))
ckpt = resolve_ckpt_path(load_model, model_dir)
model_checkpoint = load_checkpoint(ckpt, device=device)
model.load_state_dict(model_checkpoint['model_state'])
if (device.type == 'cuda'):
model.to(device)
if ((n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = _DataParallel(model)
logger.info(model)
set_seed(seed=cfg['training'].get('random_seed', 42))
for (data_set_name, data_set) in data_to_predict.items():
if (data_set is not None):
data_set.reset_random_subset()
logger.info('%s on %s set...', ('Scoring' if (return_prob == 'ref') else 'Decoding'), data_set_name)
(_, _, hypotheses, hypotheses_raw, seq_scores, att_scores) = predict(model=model, data=data_set, compute_loss=(return_prob == 'ref'), device=device, n_gpu=n_gpu, num_workers=num_workers, normalization=normalization, cfg=cfg['testing'], fp16=fp16)
if save_attention:
if att_scores:
attention_file_name = f'{data_set_name}.{ckpt.stem}.att'
attention_file_path = (model_dir / attention_file_name).as_posix()
logger.info('Saving attention plots. This might take a while..')
store_attention_plots(attentions=att_scores, targets=hypotheses_raw, sources=data_set.get_list(lang=data_set.src_lang, tokenized=True), indices=range(len(hypotheses)), output_prefix=attention_file_path)
logger.info('Attention plots saved to: %s', attention_file_path)
else:
logger.warning('Attention scores could not be saved. Note that attention scores are not available when using beam search. Set beam_size to 1 for greedy decoding.')
if (output_path is not None):
if (save_scores and (seq_scores is not None)):
output_path_scores = Path(f'{output_path}.{data_set_name}.scores')
write_list_to_file(output_path_scores, seq_scores)
output_path_tokens = Path(f'{output_path}.{data_set_name}.tokens')
write_list_to_file(output_path_tokens, hypotheses_raw)
logger.info('Scores and corresponding tokens saved to: %s.{scores|tokens}', f'{output_path}.{data_set_name}')
if (hypotheses is not None):
output_path_set = Path(f'{output_path}.{data_set_name}')
write_list_to_file(output_path_set, hypotheses)
logger.info('Translations saved to: %s.', output_path_set) |
class UmapKmeans():
def __init__(self, n_clusters, umap_dim=2, umap_neighbors=10, umap_min_distance=float(0), umap_metric='euclidean', random_state=0):
self.n_clusters = n_clusters
self.manifold_in_embedding = umap.UMAP(random_state=random_state, metric=umap_metric, n_components=umap_dim, n_neighbors=umap_neighbors, min_dist=umap_min_distance)
self.cluster_manifold = KMeans(n_clusters=n_clusters, random_state=random_state, n_jobs=(- 1))
self.hle = None
def fit(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
def predict(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_pred = self.cluster_manifold.predict(manifold)
return np.asarray(y_pred)
def fit_predict(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
y_pred = self.cluster_manifold.predict(self.hle)
return np.asarray(y_pred) |
def _graph_network(graph_tuple):
update_node_fn = (lambda n, se, re, g: n)
update_edge_fn = (lambda e, sn, rn, g: e)
update_global_fn = (lambda gn, ge, g: g)
net = nn.GraphNetwork(update_edge_fn, update_node_fn, update_global_fn)
return net(graph_tuple) |
class Attacker():
def __init__(self, clip_max=1.0, clip_min=0.0):
self.clip_max = clip_max
self.clip_min = clip_min
def perturb(self, model, x, y):
pass |
def _master_is_failing_stamp(branch, commit):
return '<!-- commit {}{} -->'.format(commit, branch) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.