code stringlengths 17 6.64M |
|---|
def GetChemSpider12(a):
TreatedAtoms = np.array([1, 6, 7, 8], dtype=np.uint8)
PARAMS['NetNameSuffix'] = 'act_sigmoid100'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 21
PARAMS['batch_size'] = 50
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [2000, 2000, 2000]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 0.7]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 2
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_chemspider12_maxatom35_H2O_with_CH4_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout_act_sigmoid100_rightalpha', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout', False, False)
return manager
|
def Xmas():
'\n\tShoot a tree with a ball.\n\t'
tree = Mol()
tree.FromXYZString(treeXYZ)
tree.coords -= tree.Center()
ball = Mol()
ball.FromXYZString(ballXYZ)
ball.coords -= ball.Center()
ball.coords -= np.array([15.0, 0.0, 0.0])
ntree = tree.NAtoms()
toshoot = Mol(np.concatenate([tree.atoms, ball.atoms], axis=0), np.concatenate([tree.coords, ball.coords], axis=0))
v0 = np.zeros(toshoot.coords.shape)
v0[ntree:] -= np.array([(- 0.1581), 0.0, 0.0])
def GetEnergyForceForMol(m):
s = MSet()
s.mols.append(m)
manager = GetChemSpider12(s)
def EnAndForce(x_, DoForce=True):
tmpm = Mol(m.atoms, x_)
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, gradient) = manager.EvalBPDirectEEUpdateSingle(tmpm, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
energy = Etotal[0]
force = gradient[0]
if DoForce:
return (energy, force)
else:
return energy
return EnAndForce
F = GetEnergyForceForMol(toshoot)
PARAMS['MDThermostat'] = None
PARAMS['MDV0'] = None
traj = VelocityVerlet(None, toshoot, 'MerryXmas', F)
traj.v = v0.copy()
traj.Prop()
|
def Train():
if 1:
a = MSet('water_mini')
a.Load()
random.shuffle(a.mols)
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_EandG_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction')
PARAMS['Profiling'] = 0
manager.Train(1)
if 0:
a = MSet('water_mini')
a.Load()
random.shuffle(a.mols)
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [100, 100, 100]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 5
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE_SymFunction')
PARAMS['Profiling'] = 0
manager.Train(1)
|
def Eval():
if 1:
a = MSet('H2O_trimer_move', center_=False)
a.ReadXYZ('H2O_trimer_move')
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_EandG_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction')
manager = TFMolManage('Mol_water_mini_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EandG_SymFunction_training_sample', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction', False, False)
total_e = []
for m in a.mols:
(Etotal, Ebp, Ebp_atom, force) = manager.EvalBPDirectEandGLinearSingle(m, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'])
print('Unit of energy: a.u')
print(('Etotal: %8.6f' % Etotal))
print('Unit of diple: Joules/Angstrom')
print('force:', force)
total_e.append(Etotal)
np.savetxt('EanGlearning.dat', np.asarray(total_e))
if 1:
a = MSet('H2O_trimer_move', center_=False)
a.ReadXYZ('H2O_trimer_move')
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 5
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [100, 100, 100]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 2
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_water_mini_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EE_SymFunction_training_sample', tset, False, 'fc_sqdiff_BP_Direct_EE_SymFunction', False, False)
total_e = []
for m in a.mols:
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, force) = manager.EvalBPDirectEELinearSingle(m, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
print('Unit of energy: a.u')
print(('Etotal: %8.6f Ebp: %8.6f Ecc: %8.6f Evdw: %8.6f' % (Etotal, Ebp, Ecc, Evdw)))
print('Unit of diple: a.u')
print('Dipole: ', mol_dipole)
print('Unit of diple: Joules/Angstrom')
print('force:', force)
total_e.append(Etotal)
np.savetxt('EElearning.dat', np.asarray(total_e))
|
class GroundTruthDatasetFactory(Dataset):
'\n Factory to create projection datasets from any 2D image-data.\n\n This is essentially a simple version of dival[1] without any noise contribution.\n\n References:\n [1] Johannes Leuschner, Maximilian Schmidt, Daniel Otero Baguer, and Peter Maaß.\n The lodopab-ct dataset: A benchmark dataset for low-dose ct reconstruction methods.\n arXiv preprint arXiv:1910.01113, 2019.\n '
def __init__(self, train_gt_images, val_gt_images, test_gt_images, inner_circle=True):
'\n Note: Currently only odd sized images are supported.\n\n :param train_gt_images:\n :param val_gt_images:\n :param test_gt_images:\n :param inner_circle: all pixels outside the largest circle around the center are set to zero i.e.\n the detector length is equal to the image height\n '
self.train_gt_images = train_gt_images
self.val_gt_images = val_gt_images
self.test_gt_images = test_gt_images
assert (self.train_gt_images.shape[1] == self.train_gt_images.shape[2]), 'Train images are not square.'
assert ((self.train_gt_images.shape[1] % 2) == 1), 'Train image size has to be odd.'
assert (self.val_gt_images.shape[1] == self.val_gt_images.shape[2]), 'Val images are not square.'
assert ((self.val_gt_images.shape[1] % 2) == 1), 'Val image size has to be odd.'
assert (self.test_gt_images.shape[1] == self.test_gt_images.shape[2]), 'Test images are not square.'
assert ((self.test_gt_images.shape[1] % 2) == 1), 'Test image size has to be odd.'
self.shape = (self.train_gt_images.shape[1], self.train_gt_images.shape[2])
self.inner_circle = inner_circle
if self.inner_circle:
circ_space = np.sqrt((((self.shape[0] / 2.0) ** 2) / 2.0))
min_pt = [(- circ_space), (- circ_space)]
max_pt = [circ_space, circ_space]
else:
min_pt = [((- self.shape[0]) / 2.0), ((- self.shape[1]) / 2.0)]
max_pt = [(self.shape[0] / 2.0), (self.shape[1] / 2.0)]
space = uniform_discr(min_pt, max_pt, self.shape, dtype=np.float32)
self.train_len = self.train_gt_images.shape[0]
self.validation_len = self.val_gt_images.shape[0]
self.test_len = self.test_gt_images.shape[0]
self.random_access = True
super().__init__(space=space)
def _create_pair_dataset(self, forward_op, post_processor=None, noise_type=None, noise_kwargs=None, noise_seeds=None):
dataset = ObservationGroundTruthPairDataset(self.generator, forward_op, post_processor=post_processor, train_len=self.train_len, validation_len=self.validation_len, test_len=self.test_len, noise_type=noise_type, noise_kwargs=noise_kwargs, noise_seeds=noise_seeds)
return dataset
def build_projection_dataset(self, num_angles, upscale_shape=70, impl='astra_cpu'):
'\n Builds the forward projection operator. The ground truth images are upscaled during the forward\n operation to avoid the the [inverse crime](https://arxiv.org/abs/math-ph/0401050).\n\n :param num_angles: number of projection angles\n :param upscale_shape: to avoid inverse crime\n :param impl: radon transform implementation\n :return:\n '
(forward_op, get_reco_ray_trafo, reco_ray_trafo) = self._build_forward_op(upscale_shape, impl, num_angles)
ds = self._create_pair_dataset(forward_op=forward_op, noise_type=None)
ds.get_ray_trafo = get_reco_ray_trafo
ds.ray_trafo = reco_ray_trafo
return ds
def _build_forward_op(self, upscale_shape, impl, num_angles):
reco_space = self.space
if self.inner_circle:
space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32)
min_pt = reco_space.min_pt
max_pt = reco_space.max_pt
proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (((2 * int(reco_space.max_pt[0])) - 1),)), dtype=np.float32)
detector_length = get_detector_length(proj_space)
det_partition = odl.uniform_partition((- np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2))), np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2)), detector_length)
else:
space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32)
min_pt = reco_space.min_pt
max_pt = reco_space.max_pt
proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (reco_space.shape[0],)), dtype=np.float32)
detector_length = get_detector_length(proj_space)
det_partition = odl.uniform_partition(((- reco_space.shape[0]) / 2.0), (reco_space.shape[0] / 2.0), detector_length)
angle_partition = odl.uniform_partition(0, np.pi, num_angles)
reco_geometry = odl.tomo.Parallel2dGeometry(angle_partition, det_partition)
ray_trafo = odl.tomo.RayTransform(space, reco_geometry, impl=impl)
def get_reco_ray_trafo(**kwargs):
return odl.tomo.RayTransform(reco_space, reco_geometry, **kwargs)
reco_ray_trafo = get_reco_ray_trafo(impl=impl)
class _ResizeOperator(odl.Operator):
def __init__(self):
super().__init__(reco_space, space)
def _call(self, x, out, **kwargs):
out.assign(space.element(resize(x, (upscale_shape, upscale_shape), order=1)))
resize_op = _ResizeOperator()
forward_op = (ray_trafo * resize_op)
return (forward_op, get_reco_ray_trafo, reco_ray_trafo)
def generator(self, part='train'):
if (part == 'train'):
gen = self._train_generator()
elif (part == 'validation'):
gen = self._val_generator()
elif (part == 'test'):
gen = self._test_generator()
else:
raise NotImplementedError
for gt in gen:
(yield gt)
def _train_generator(self):
for i in range(self.train_len):
(yield self.train_gt_images[i].type(torch.float32))
def _test_generator(self):
for i in range(self.test_len):
(yield self.test_gt_images[i].type(torch.float32))
def _val_generator(self):
for i in range(self.validation_len):
(yield self.val_gt_images[i].type(torch.float32))
def get_sample(self, index, part='train', out=None):
if (out == None):
if (part == 'train'):
return self.train_gt_images[index].type(torch.float32)
elif (part == 'validation'):
return self.val_gt_images[index].type(torch.float32)
elif (part == 'test'):
return self.test_gt_images[index].type(torch.float32)
else:
raise NotImplementedError
elif (part == 'train'):
out = self.train_gt_images[index].type(torch.float32)
elif (part == 'validation'):
out = self.val_gt_images[index].type(torch.float32)
elif (part == 'test'):
out = self.test_gt_images[index].type(torch.float32)
else:
raise NotImplementedError
|
class SResFourierCoefficientDataset(Dataset):
def __init__(self, ds, amp_min, amp_max):
self.ds = ds
if ((amp_min == None) and (amp_max == None)):
tmp_imgs = []
for i in np.random.permutation(len(self.ds))[:200]:
img = self.ds[i]
tmp_imgs.append(img)
tmp_imgs = torch.stack(tmp_imgs)
tmp_ffts = torch.fft.rfftn(tmp_imgs, dim=[1, 2])
log_amps = log_amplitudes(tmp_ffts.abs())
self.amp_min = log_amps.min()
self.amp_max = log_amps.max()
else:
self.amp_min = amp_min
self.amp_max = amp_max
def __getitem__(self, item):
img = self.ds[item]
img_fft = torch.fft.rfftn(img, dim=[0, 1])
(img_amp, img_phi) = normalize_FC(img_fft, amp_min=self.amp_min, amp_max=self.amp_max)
img_fft = torch.stack([img_amp.flatten(), img_phi.flatten()], dim=(- 1))
return (img_fft, (self.amp_min.unsqueeze((- 1)), self.amp_max.unsqueeze((- 1))))
def __len__(self):
return len(self.ds)
|
class TRecFourierCoefficientDataset(Dataset):
def __init__(self, ds, angles, mag_min, mag_max, img_shape=42, inner_circle=True):
self.ds = ds
self.img_shape = img_shape
self.inner_circle = inner_circle
self.angles = angles
if ((mag_min == None) and (mag_max == None)):
tmp_sinos = []
for i in np.random.permutation(len(self.ds))[:200]:
(sino, _) = self.ds[i]
tmp_sinos.append(sino)
tmp_sinos = torch.stack(tmp_sinos)
tmp_ffts = torch.fft.rfftn(tmp_sinos, dim=[1, 2])
tmp_amps = log_amplitudes(tmp_ffts.abs())
self.amp_min = tmp_amps.min()
self.amp_max = tmp_amps.max()
else:
self.amp_min = mag_min
self.amp_max = mag_max
def __getitem__(self, item):
(sino, img) = self.ds[item]
fbp = torch.from_numpy(np.array(iradon(sino.numpy().T, theta=np.rad2deg((- self.angles)), circle=self.inner_circle, output_size=self.img_shape).astype(np.float32).T))
sino_fft = torch.fft.rfftn(torch.roll(sino, ((sino.shape[1] // 2) + 1), 1), dim=[(- 1)])
fbp_fft = torch.fft.rfftn(torch.roll(fbp, (2 * (((img.shape[0] // 2) + 1),)), (0, 1)), dim=[0, 1])
img_fft = torch.fft.rfftn(torch.roll(img, (2 * (((img.shape[0] // 2) + 1),)), (0, 1)), dim=[0, 1])
(sino_amp, sino_phi) = normalize_FC(sino_fft, amp_min=self.amp_min, amp_max=self.amp_max)
(fbp_amp, fbp_phi) = normalize_FC(fbp_fft, amp_min=self.amp_min, amp_max=self.amp_max)
(img_amp, img_phi) = normalize_FC(img_fft, amp_min=self.amp_min, amp_max=self.amp_max)
sino_fc = torch.stack([sino_amp.flatten(), sino_phi.flatten()], dim=(- 1))
fbp_fc = torch.stack([fbp_amp.flatten(), fbp_phi.flatten()], dim=(- 1))
img_fc = torch.stack([img_amp.flatten(), img_phi.flatten()], dim=(- 1))
return (sino_fc, fbp_fc, img_fc, img, (self.amp_min.unsqueeze((- 1)), self.amp_max.unsqueeze((- 1))))
def __len__(self):
return len(self.ds)
|
def _fc_prod_loss(pred_fc, target_fc, amp_min, amp_max):
pred_amp = denormalize_amp(pred_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max)
target_amp = denormalize_amp(target_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max)
pred_phi = denormalize_phi(pred_fc[(..., 1)])
target_phi = denormalize_phi(target_fc[(..., 1)])
amp_loss = (1 + torch.pow((pred_amp - target_amp), 2))
phi_loss = (2 - torch.cos((pred_phi - target_phi)))
return (torch.mean((amp_loss * phi_loss)), torch.mean(amp_loss), torch.mean(phi_loss))
|
def _fc_sum_loss(pred_fc, target_fc, amp_min, amp_max):
pred_amp = denormalize_amp(pred_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max)
target_amp = denormalize_amp(target_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max)
pred_phi = denormalize_phi(pred_fc[(..., 1)])
target_phi = denormalize_phi(target_fc[(..., 1)])
amp_loss = torch.pow((pred_amp - target_amp), 2)
phi_loss = (1 - torch.cos((pred_phi - target_phi)))
return (torch.mean((amp_loss + phi_loss)), torch.mean(amp_loss), torch.mean(phi_loss))
|
class SResTransformerTrain(torch.nn.Module):
def __init__(self, d_model, coords, flatten_order, attention_type='linear', n_layers=4, n_heads=4, d_query=32, dropout=0.1, attention_dropout=0.1):
super(SResTransformerTrain, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, (d_model // 2))
self.pos_embedding = PositionalEncoding2D((d_model // 2), coords=coords, flatten_order=flatten_order, persistent=False)
self.encoder = TransformerEncoderBuilder.from_kwargs(attention_type=attention_type, n_layers=n_layers, n_heads=n_heads, feed_forward_dimensions=((n_heads * d_query) * 4), query_dimensions=d_query, value_dimensions=d_query, dropout=dropout, attention_dropout=attention_dropout).get()
self.predictor_amp = torch.nn.Linear((n_heads * d_query), 1)
self.predictor_phase = torch.nn.Linear((n_heads * d_query), 1)
def forward(self, x):
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding(x)
triangular_mask = TriangularCausalMask(x.shape[1], device=x.device)
y_hat = self.encoder(x, attn_mask=triangular_mask)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return torch.cat([y_amp, y_phase], dim=(- 1))
|
class SResTransformerPredict(torch.nn.Module):
def __init__(self, d_model, coords, flatten_order, attention_type='full', n_layers=4, n_heads=4, d_query=32, dropout=0.1, attention_dropout=0.1):
super(SResTransformerPredict, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, (d_model // 2))
self.pos_embedding = PositionalEncoding2D((d_model // 2), coords=coords, flatten_order=flatten_order, persistent=False)
self.encoder = RecurrentEncoderBuilder.from_kwargs(attention_type=attention_type, n_layers=n_layers, n_heads=n_heads, feed_forward_dimensions=((n_heads * d_query) * 4), query_dimensions=d_query, value_dimensions=d_query, dropout=dropout, attention_dropout=attention_dropout).get()
self.predictor_amp = torch.nn.Linear((n_heads * d_query), 1)
self.predictor_phase = torch.nn.Linear((n_heads * d_query), 1)
def forward(self, x, i=0, memory=None):
x = x.view(x.shape[0], (- 1))
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding.forward_i(x, i)
(y_hat, memory) = self.encoder(x, memory)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return (torch.cat([y_amp, y_phase], dim=(- 1)), memory)
|
class RAdam(Optimizer):
def __init__(self, params, lr=0.001, max_grad_norm=1.0, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, degenerated_to_sgd=True):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if (isinstance(params, (list, tuple)) and (len(params) > 0) and isinstance(params[0], dict)):
for param in params:
if (('betas' in param) and ((param['betas'][0] != betas[0]) or (param['betas'][1] != betas[1]))):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def set_lr(self, lr):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (not state):
return
else:
group['lr'] = lr
return
def get_lr(self):
' get learning rate in training '
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (not state):
return [0]
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
state['step'] += 1
buffered = group['buffer'][int((state['step'] % 10))]
if (state['step'] == buffered[0]):
(N_sma, step_size) = (buffered[1], buffered[2])
else:
buffered[0] = state['step']
beta2_t = (beta2 ** state['step'])
N_sma_max = ((2 / (1 - beta2)) - 1)
N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t)))
buffered[1] = N_sma
if (N_sma >= 5):
step_size = (math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2))) / (1 - (beta1 ** state['step'])))
elif self.degenerated_to_sgd:
step_size = (1.0 / (1 - (beta1 ** state['step'])))
else:
step_size = (- 1)
buffered[2] = step_size
if (N_sma >= 5):
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(((- step_size) * group['lr']), exp_avg, denom)
p.data.copy_(p_data_fp32)
elif (step_size > 0):
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.add_(((- step_size) * group['lr']), exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
class PlainRAdam(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, degenerated_to_sgd=True):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
state['step'] += 1
beta2_t = (beta2 ** state['step'])
N_sma_max = ((2 / (1 - beta2)) - 1)
N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t)))
if (N_sma >= 5):
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
step_size = ((group['lr'] * math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2)))) / (1 - (beta1 ** state['step'])))
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_((- step_size), exp_avg, denom)
p.data.copy_(p_data_fp32)
elif self.degenerated_to_sgd:
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
step_size = (group['lr'] / (1 - (beta1 ** state['step'])))
p_data_fp32.add_((- step_size), exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
class AdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['warmup'] > state['step']):
scheduled_lr = (1e-08 + ((state['step'] * group['lr']) / group['warmup']))
else:
scheduled_lr = group['lr']
step_size = ((scheduled_lr * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * scheduled_lr), p_data_fp32)
p_data_fp32.addcdiv_((- step_size), exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
|
class TestTomoUtils(unittest.TestCase):
def setUp(self) -> None:
self.img_shape = 27
self.angles = np.array([0, (np.pi / 2), np.pi])
def test_cartesian_rfft_2D(self):
(x, y, flatten_indices, order) = get_cartesian_rfft_coords_2D(self.img_shape)
x_ordered = torch.zeros_like(x)
x_ordered[flatten_indices] = x
x_ordered = x_ordered.reshape(self.img_shape, (- 1))
y_ordered = torch.zeros_like(y)
y_ordered[flatten_indices] = y
y_ordered = y_ordered.reshape(self.img_shape, (- 1))
y_ordered = torch.roll(y_ordered, (- ((self.img_shape // 2) + 1)), 0)
(y_target, x_target) = torch.meshgrid(torch.arange(self.img_shape), torch.arange(((self.img_shape // 2) + 1)))
self.assertEqual(order[(0, 0)], 0, 'Top left pixel should have index 0.')
self.assertTrue((torch.all((x_target == x_ordered)) and torch.all((y_target == y_ordered))), 'rFFT coordinates are wrong.')
def test_polar_rfft_2D(self):
(r, phi, flatten_indices, order) = get_polar_rfft_coords_2D(img_shape=self.img_shape)
self.assertEqual(order[(0, 0)], 0, 'Top left pixel should have index 0.')
r_ordered = torch.zeros_like(r)
r_ordered[flatten_indices] = r
r_ordered = r_ordered.reshape(self.img_shape, (- 1))
self.assertEqual(r_ordered[(0, 0)], 0, 'Top left pixel does not have radius 0.')
phi_ordered = torch.zeros_like(phi)
phi_ordered[flatten_indices] = phi
phi_ordered = phi_ordered.reshape(self.img_shape, (- 1))
self.assertEqual(phi_ordered[(0, 0)], 0, 'Top left pixel angle does not correspond to 0.')
self.assertEqual(phi_ordered[((self.img_shape // 2), 0)], (np.pi / 2), 'Phi component is of (test 1).')
self.assertEqual(phi_ordered[((self.img_shape - 1), 0)], ((- np.pi) / 2), 'Phi component is of (test 2).')
def test_polar_sinogram(self):
(r, phi, flatten_indices) = get_polar_rfft_coords_sinogram(self.angles, self.img_shape)
self.assertTrue(torch.all(((r[0::3] == r[1::3]) == (r[1::3] == r[2::3]))), 'Radii of polar sinogram coords are off.')
phi_ordered = torch.zeros_like(phi)
phi_ordered[flatten_indices] = phi
self.assertTrue(torch.all((phi_ordered[:((self.img_shape // 2) + 1)] == (np.pi / 2.0))), 'Phi of polar sinogram coords are off (test1).')
self.assertTrue(torch.all((phi_ordered[((self.img_shape // 2) + 1):(- ((self.img_shape // 2) + 1))] == 0)), 'Phi of polar sinogram coords are off (test1).')
self.assertTrue(torch.all((phi_ordered[(- ((self.img_shape // 2) + 1)):] == ((- np.pi) / 2.0))), 'Phi of polar sinogram coords are off (test2).')
def test_cartesian_sinogram(self):
(x, y, flatten_indices) = get_cartesian_rfft_coords_sinogram(self.angles, self.img_shape)
print(x)
self.assertTrue(torch.all((x <= ((self.img_shape // 2) + 1))))
self.assertTrue(torch.all((x >= 0)))
self.assertTrue(torch.all((y <= self.img_shape)))
self.assertTrue(torch.all((y >= 0)))
|
class TestUtils(unittest.TestCase):
def test_cart2pol2cart(self):
x = torch.arange(1, 6, dtype=torch.float32)
y = torch.arange((- 2), 3, dtype=torch.float32)
(r, phi) = cart2pol(x, y)
(x_, y_) = pol2cart(r, phi)
self.assertTrue((torch.allclose(x, x_) and torch.allclose(y, y_)), 'Cartesian to polar coordinate transformations are broken.')
def test_normlize_denormalize_realspace(self):
data = torch.from_numpy(np.array([(- 1), 2, 4, 0, (- 5)], dtype=np.float32))
mean = torch.mean(data)
std = torch.std(data)
data_n = normalize(data, mean, std)
self.assertAlmostEqual(torch.mean(data_n).item(), 0, 7)
self.assertAlmostEqual(torch.std(data_n).item(), 1, 7)
data_dn = denormalize(data_n, mean, std)
self.assertTrue(torch.allclose(data, data_dn))
def test_normalize_denormalize_amplitudes(self):
amps = torch.exp(torch.arange(6, dtype=torch.float32))
log_amps = log_amplitudes(amps)
min_amp = log_amps.min()
max_amp = log_amps.max()
n_amps = normalize_amp(amps, amp_min=min_amp, amp_max=max_amp)
amps_ = denormalize_amp(n_amps, amp_min=min_amp, amp_max=max_amp)
self.assertTrue(torch.allclose(amps, amps_))
def test_normalize_denormalize_phases(self):
phases = torch.linspace((- np.pi), np.pi, 10)
phases_n = normalize_phi(phases)
phases_ = denormalize_phi(phases_n)
self.assertTrue(torch.allclose(phases, phases_))
def test_normalize_denormalize_FC(self):
img = psf_real(7, 27)
rfft = torch.fft.rfftn(img)
log_amps = log_amplitudes(rfft.abs())
min_amp = log_amps.min()
max_amp = log_amps.max()
(amp_n, phi_n) = normalize_FC(rfft, amp_min=min_amp, amp_max=max_amp)
fc_n = torch.stack([amp_n, phi_n], (- 1))
rfft_ = denormalize_FC(fc_n, amp_min=min_amp, amp_max=max_amp)
self.assertTrue(torch.allclose(rfft, rfft_))
def test_convert2DFT(self):
img = psf_real(7, 27)
rfft = torch.fft.rfftn(img)
log_amps = log_amplitudes(rfft.abs())
min_amp = log_amps.min()
max_amp = log_amps.max()
order = torch.from_numpy(np.random.permutation((27 * 14)))
(amp_n, phi_n) = normalize_FC(rfft, amp_min=min_amp, amp_max=max_amp)
fc_n = torch.stack([amp_n.flatten(), phi_n.flatten()], dim=(- 1))[order]
dft = convert2DFT(fc_n.unsqueeze(0), amp_min=min_amp, amp_max=max_amp, dst_flatten_order=order, img_shape=27)
img_ = torch.fft.irfftn(dft, s=(27, 27))
self.assertTrue(torch.allclose(img, img_))
|
def get_data(params, dataset_name, subset=None):
load_test = (('train_all' in params) and params['train_all'])
test_dataset = None
transform = transforms.Compose([transforms.ToTensor()])
if (dataset_name == 'mnist'):
dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, transform=transform)
if load_test:
test_dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, train=False, transform=transform)
elif (dataset_name == 'mnist-fashion'):
dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, transform=transform)
if load_test:
test_dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, train=False, transform=transform)
elif (dataset_name == 'cifar-10'):
dataset = datasets.cifar.CIFAR10(root=config.DATASETS_DIR, download=True, transform=transform)
if load_test:
test_dataset = datasets.cifar.CIFAR10(root=config.DATASETS_DIR, download=True, train=False, transform=transform)
else:
raise AttributeError('Dataset not found')
if ((subset is not None) and (subset > 0)):
dataset = Subset(dataset, random.sample(range(len(dataset)), subset))
if load_test:
train_loader = DataLoader(dataset, batch_size=params['train_batch_size'], shuffle=True)
val_loader = DataLoader(test_dataset, batch_size=params['val_batch_size'], shuffle=False)
else:
(train_dataset, val_dataset) = utils.split_dataset(dataset, val_split=params['val_split'])
train_loader = DataLoader(train_dataset, batch_size=params['train_batch_size'], shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=params['val_batch_size'], shuffle=False)
data_batch = next(iter(train_loader))[0]
logging.debug('Data batch min: {:.4f}, max: {:.4f}.'.format(torch.min(data_batch), torch.max(data_batch)))
logging.debug('Data batch mean: {1:.4f}, std: {0:.4f}.'.format(*torch.std_mean(data_batch)))
return (train_loader, val_loader)
|
def attach_handlers(run, model, optimizer, learning_rule, trainer, evaluator, train_loader, val_loader, params):
UnitConvergence(model[0], learning_rule.norm).attach(trainer.engine, 'unit_conv')
pbar = ProgressBar(persist=True, bar_format=config.IGNITE_BAR_FORMAT)
pbar.attach(trainer.engine, metric_names='all')
tqdm_logger = TqdmLogger(pbar=pbar)
tqdm_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(trainer.engine))
evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED(every=100), train_loader, val_loader)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=(lambda epoch: (1 - (epoch / params['epochs']))))
lr_scheduler = LRScheduler(lr_scheduler)
trainer.engine.add_event_handler(Events.EPOCH_COMPLETED, lr_scheduler)
mc_handler = ModelCheckpoint(config.MODELS_DIR, run.replace('/', '-'), n_saved=1, create_dir=True, require_empty=False, global_step_transform=global_step_from_engine(trainer.engine))
trainer.engine.add_event_handler(Events.EPOCH_COMPLETED, mc_handler, {'m': model})
tb_logger = TensorboardLogger(log_dir=os.path.join(config.TENSORBOARD_DIR, run))
(images, labels) = next(iter(train_loader))
tb_logger.writer.add_graph(copy.deepcopy(model).cpu(), images)
tb_logger.writer.add_hparams(params, {})
tb_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine))
tb_logger.attach_output_handler(trainer.engine, event_name=Events.EPOCH_COMPLETED, tag='train', metric_names=['unit_conv'])
input_shape = tuple(next(iter(train_loader))[0].shape[1:])
tb_logger.attach(trainer.engine, log_handler=WeightsImageHandler(model, input_shape), event_name=Events.EPOCH_COMPLETED)
tb_logger.attach(trainer.engine, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.EPOCH_STARTED)
return tb_logger
|
def main(args: Namespace, params: dict, dataset_name, run_postfix=''):
identifier = time.strftime('%Y%m%d-%H%M%S')
run = '{}/heb/{}'.format(dataset_name, identifier)
if run_postfix:
run += ('-' + run_postfix)
print("Starting run '{}'".format(run))
model = models.create_conv1_model(28, 1, num_kernels=400, n=1, batch_norm=True)
if (args.initial_weights is not None):
model = utils.load_weights(model, os.path.join(PATH, args.initial_weights))
freeze_layers = ['linear1']
else:
freeze_layers = None
device = utils.get_device(args.device)
model.to(device)
print("Device set to '{}'.".format(device))
(train_loader, val_loader) = data.get_data(params, dataset_name, subset=10000)
learning_rule = KrotovsRule(delta=params['delta'], k=params['k'], norm=params['norm'], normalize=False)
optimizer = Local(named_params=model.named_parameters(), lr=params['lr'])
def init_function(h_model):
h_criterion = torch.nn.CrossEntropyLoss()
h_evaluator = SupervisedEvaluator(model=h_model, criterion=h_criterion, device=device)
h_train_evaluator = SupervisedEvaluator(model=h_model, criterion=h_criterion, device=device)
h_optimizer = torch.optim.Adam(params=h_model.parameters(), lr=0.001)
h_lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(h_optimizer, 'max', verbose=True, patience=5, factor=0.5)
h_trainer = SupervisedTrainer(model=h_model, optimizer=h_optimizer, criterion=h_criterion, device=device)
h_pbar = ProgressBar(persist=False, bar_format=config.IGNITE_BAR_FORMAT)
h_pbar.attach(h_trainer.engine, metric_names='all')
h_tqdm_logger = TqdmLogger(pbar=h_pbar)
h_tqdm_logger.attach_output_handler(h_evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(h_trainer.engine))
h_tqdm_logger.attach_output_handler(h_train_evaluator.engine, event_name=Events.COMPLETED, tag='train', global_step_transform=global_step_from_engine(h_trainer.engine))
h_evaluator.engine.add_event_handler(Events.COMPLETED, (lambda engine: h_lr_scheduler.step(engine.state.metrics['accuracy'])))
h_handler = ModelCheckpoint(config.MODELS_DIR, run.replace('/', '-'), n_saved=1, create_dir=True, require_empty=False, score_name='acc', score_function=(lambda engine: engine.state.metrics['accuracy']), global_step_transform=global_step_from_engine(trainer.engine))
h_evaluator.engine.add_event_handler(Events.EPOCH_COMPLETED, h_handler, {'m': model})
h_es_handler = EarlyStopping(patience=15, min_delta=0.0001, score_function=(lambda engine: engine.state.metrics['accuracy']), trainer=h_trainer.engine, cumulative_delta=True)
h_es_handler.logger.setLevel(logging.DEBUG)
h_evaluator.engine.add_event_handler(Events.COMPLETED, h_es_handler)
return (h_trainer, h_train_evaluator, h_evaluator)
evaluator = HebbianEvaluator(model=model, score_name='accuracy', score_function=(lambda engine: engine.state.metrics['accuracy']), epochs=500, init_function=init_function, supervised_from=(- 1))
trainer = HebbianTrainer(model=model, learning_rule=learning_rule, optimizer=optimizer, supervised_from=(- 1), freeze_layers=freeze_layers, device=device)
tb_logger = attach_handlers(run, model, optimizer, learning_rule, trainer, evaluator, train_loader, val_loader, params)
trainer.run(train_loader=train_loader, epochs=params['epochs'])
tb_logger.close()
|
def main():
model = models.create_fc1_model([(28 ** 2), 2000])
transform = transforms.Compose([transforms.ToTensor()])
dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, transform=transform)
train_loader = DataLoader(dataset, batch_size=1024, shuffle=True)
learning_rule = KrotovsRule()
optimizer = Local(named_params=model.named_parameters(), lr=0.01)
trainer = HebbianTrainer(model=model, learning_rule=learning_rule, optimizer=optimizer)
trainer.run(train_loader=train_loader, epochs=10)
|
def create_fc1_model(hu: List, n: float=1.0, batch_norm=False):
modules = [('flatten', Flatten()), ('linear1', nn.Linear(hu[0], hu[1], bias=False))]
if batch_norm:
modules.append(('batch_norm', nn.BatchNorm1d(num_features=hu[1])))
modules.append(('repu', RePU(n)))
linear2 = nn.Linear(hu[1], 10)
modules.append(('linear2', linear2))
return nn.Sequential(OrderedDict(modules))
|
def create_fc2_model(hu: List, n: float=1.0, batch_norm=False):
modules = [('flatten', Flatten()), ('linear1', nn.Linear(hu[0], hu[1], bias=False))]
if batch_norm:
modules.append(('batch_norm', nn.BatchNorm1d(num_features=hu[1])))
modules.append(('repu1', RePU(n)))
modules.append(('linear2', nn.Linear(hu[1], hu[2], bias=False)))
modules.append(('repu2', RePU(n)))
linear3 = nn.Linear(hu[2], 10)
modules.append(('linear3', linear3))
return nn.Sequential(OrderedDict(modules))
|
def create_conv1_model(input_dim, input_channels=1, num_kernels=8, kernel_size=5, pool_size=2, n=1, batch_norm=False, dropout=None):
modules = [('conv1', nn.Conv2d(input_channels, num_kernels, kernel_size, bias=False))]
if batch_norm:
modules.append(('batch_norm', nn.BatchNorm2d(num_features=num_kernels)))
modules.extend([('repu', RePU(n)), ('pool1', nn.MaxPool2d(pool_size))])
if (dropout is not None):
modules.append(('dropout1', nn.Dropout2d(dropout)))
modules.extend([('flatten', Flatten()), ('linear1', nn.Linear((num_kernels * (int(((input_dim - (kernel_size - 1)) / 2)) ** 2)), 10))])
return nn.Sequential(OrderedDict(modules))
|
def create_conv2_model(input_dim, input_channels=1, num_kernels=None, kernel_size=4, pool_size=2, n=1):
if (num_kernels is None):
num_kernels = [8, 16]
modules = [('conv1', nn.Conv2d(input_channels, num_kernels[0], kernel_size, bias=False)), ('repu1', RePU(n)), ('pool1', nn.MaxPool2d(pool_size)), ('conv2', nn.Conv2d(num_kernels[0], num_kernels[1], kernel_size, bias=False)), ('repu2', RePU(n)), ('pool2', nn.MaxPool2d(pool_size)), ('flatten', Flatten()), ('linear1', nn.Linear((num_kernels[1] * (int(((((input_dim - (kernel_size - 1)) / 2) - (kernel_size - 1)) / 2)) ** 2)), 10))]
return nn.Sequential(OrderedDict(modules))
|
def attach_handlers(run, model, optimizer, trainer, train_evaluator, evaluator, train_loader, val_loader, params):
pbar = ProgressBar(persist=True, bar_format=config.IGNITE_BAR_FORMAT)
pbar.attach(trainer.engine, metric_names='all')
tqdm_logger = TqdmLogger(pbar=pbar)
tqdm_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(trainer.engine))
tqdm_logger.attach_output_handler(train_evaluator.engine, event_name=Events.COMPLETED, tag='train', global_step_transform=global_step_from_engine(trainer.engine))
train_evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED, train_loader)
evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED, data=val_loader)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=5, factor=0.5)
evaluator.engine.add_event_handler(Events.COMPLETED, (lambda engine: lr_scheduler.step(engine.state.metrics['accuracy'])))
es_handler = EarlyStopping(patience=15, score_function=(lambda engine: engine.state.metrics['accuracy']), trainer=trainer.engine, cumulative_delta=True, min_delta=0.0001)
if (('train_all' in params) and params['train_all']):
train_evaluator.engine.add_event_handler(Events.COMPLETED, es_handler)
else:
evaluator.engine.add_event_handler(Events.COMPLETED, es_handler)
es_handler.logger.setLevel(logging.DEBUG)
name = run.replace('/', '-')
mc_handler = ModelCheckpoint(config.MODELS_DIR, name, n_saved=1, create_dir=True, require_empty=False, score_name='acc', score_function=(lambda engine: engine.state.metrics['accuracy']), global_step_transform=global_step_from_engine(trainer.engine))
evaluator.engine.add_event_handler(Events.EPOCH_COMPLETED, mc_handler, {'m': model})
tb_logger = TensorboardLogger(log_dir=os.path.join(config.TENSORBOARD_DIR, run))
(images, labels) = next(iter(train_loader))
tb_logger.writer.add_graph(copy.deepcopy(model).cpu(), images)
tb_logger.writer.add_hparams(params, {'hparam/dummy': 0})
tb_logger.attach_output_handler(train_evaluator.engine, event_name=Events.COMPLETED, tag='train', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine))
tb_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine))
input_shape = tuple(next(iter(train_loader))[0].shape[1:])
tb_logger.attach(trainer.engine, log_handler=WeightsImageHandler(model, input_shape), event_name=Events.EPOCH_COMPLETED)
tb_logger.attach(trainer.engine, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.EPOCH_STARTED)
return (es_handler, tb_logger)
|
def main(params, dataset_name, transfer_learning=False):
identifier = time.strftime('%Y%m%d-%H%M%S')
run = '{}/sup/{}'.format(dataset_name, identifier)
if transfer_learning:
run += '-tl'
if (('train_all' in params) and params['train_all']):
run += '-test'
print("Starting run '{}'".format(run))
model = models.create_conv1_model(28, 1, num_kernels=400, n=1, batch_norm=True)
if transfer_learning:
weights_path = '../output/models/heb-mnist-fashion-20200607-015911_m_100_acc=0.855.pth'
model = utils.load_weights(model, os.path.join(PATH, weights_path), layer_names=['conv1'], freeze=True)
device = utils.get_device()
model.to(device)
print("Device set to '{}'.".format(device))
(train_loader, val_loader) = data.get_data(params, dataset_name, subset=params['train_subset'])
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=params['lr'])
train_evaluator = SupervisedEvaluator(model=model, criterion=criterion)
evaluator = SupervisedEvaluator(model=model, criterion=criterion)
trainer = SupervisedTrainer(model=model, optimizer=optimizer, criterion=criterion, device=device)
(es_handler, tb_logger) = attach_handlers(run, model, optimizer, trainer, train_evaluator, evaluator, train_loader, val_loader, params)
trainer.run(train_loader=train_loader, epochs=params['epochs'])
tb_logger.writer.add_hparams(params, {'hparam/accuracy': es_handler.best_score})
tb_logger.close()
|
def main(params):
model = models.create_fc1_model([(28 ** 2), 2000], n=1, batch_norm=True)
weights_path = '../output/models/heb-mnist-fashion-20200426-101420_m_500_acc=0.852.pth'
state_dict_path = os.path.join(PATH, weights_path)
model = load_weights(model, state_dict_path)
run = os.path.splitext(os.path.basename(weights_path))[0].split('_')[0]
run += '/test'
logging.info("Starting run '{}'.".format(run))
transform = transforms.Compose([transforms.ToTensor()])
dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, transform=transform, train=False)
test_loader = DataLoader(dataset, batch_size=params['val_batch_size'], shuffle=False)
criterion = torch.nn.CrossEntropyLoss()
evaluator = SupervisedEvaluator(model=model, criterion=criterion)
evaluator.run(test_loader)
print(evaluator.metrics)
|
class SimpleEngine(Engine):
'Custom engine with custom run function.\n\n This engine has only metrics in its state and only fires 2 events.\n '
def __init__(self, run_function: Callable):
super().__init__(process_function=(lambda x, y: None))
self._allowed_events = [Events.STARTED, Events.COMPLETED]
self._run_function = run_function
def run(self, *args, **kwargs):
self._fire_event(Events.STARTED)
self._run_function(*args, **kwargs)
self._fire_event(Events.COMPLETED)
|
class Evaluator(ABC):
def __init__(self):
self.engine = None
self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__))
def attach(self, engine, event_name, *args, **kwargs):
if (event_name not in State.event_to_attr):
raise RuntimeError("Unknown event name '{}'".format(event_name))
return engine.add_event_handler(event_name, self, *args, **kwargs)
def run(self, *args, **kwargs):
self.engine.run(*args, **kwargs)
def __call__(self, engine, *args, **kwargs):
self.run(*args, **kwargs)
|
class HebbianEvaluator(Evaluator):
def __init__(self, model: torch.nn.Module, score_name: str, score_function: Callable, init_function: Callable[([torch.nn.Module], tuple)]=None, epochs: int=100, supervised_from: int=None):
super().__init__()
self.model = model
self.score_name = score_name
self.score_function = score_function
if (init_function is None):
self.init_function = self._init_function
else:
self.init_function = init_function
self.epochs = epochs
self.supervised_from = supervised_from
self.engine = self.create_hebbian_evaluator(self._run)
self._init_metrics()
@staticmethod
def create_hebbian_evaluator(run_function) -> Engine:
return SimpleEngine(run_function=run_function)
@staticmethod
def _init_function(model):
'Default initialization function.'
criterion = torch.nn.CrossEntropyLoss()
evaluator = SupervisedEvaluator(model=model, criterion=criterion)
train_evaluator = SupervisedEvaluator(model=model, criterion=criterion)
optimizer = torch.optim.Adam(params=model.parameters())
trainer = SupervisedTrainer(model=model, optimizer=optimizer, criterion=criterion)
es_handler = EarlyStopping(patience=5, min_delta=0.0001, score_function=(lambda engine: (- engine.state.metrics['loss'])), trainer=trainer.engine, cumulative_delta=True)
evaluator.engine.add_event_handler(Events.COMPLETED, es_handler)
return (trainer, train_evaluator, evaluator)
def _init_metrics(self):
self.best_score = None
def _init(self, train_loader, val_loader):
(self._trainer, self._train_evaluator, self._evaluator) = self.init_function(self.model)
self._train_evaluator.attach(self._trainer.engine, Events.EPOCH_COMPLETED, train_loader)
self._evaluator.attach(self._trainer.engine, Events.EPOCH_COMPLETED, val_loader)
@self._evaluator.engine.on(Events.COMPLETED)
def save_best_metrics(eval_engine):
current_score = self.score_function(eval_engine)
if ((self.best_score is None) or (current_score > self.best_score)):
self.best_score = current_score
self.engine.state.metrics = eval_engine.state.metrics
self.logger.info('New best validation {} = {:.4f}.'.format(self.score_name, self.best_score))
self._init_metrics()
def _run(self, train_loader, val_loader):
self.logger.info("Supervised training from layer '{}'.".format(list(self.model.named_children())[self.supervised_from][0]))
self._init(train_loader, val_loader)
layers = list(self.model.children())
for layer in layers[:self.supervised_from]:
for param in layer.parameters():
param.requires_grad = False
for lyr in layers[self.supervised_from:]:
try:
lyr.reset_parameters()
except AttributeError:
pass
self._trainer.run(train_loader=train_loader, epochs=self.epochs)
|
class SupervisedEvaluator(Evaluator):
def __init__(self, model, criterion, metrics=None, device=None):
super().__init__()
self.device = utils.get_device(device)
if (metrics is None):
metrics = {'accuracy': Accuracy(), 'loss': Loss(criterion)}
self.engine = create_supervised_evaluator(model, metrics=metrics, device=self.device)
|
class OutputHandler(BaseOutputHandler):
'Helper handler to log engine\'s output and/or metrics.\n\n Args:\n tag (str): common title for all produced plots. For example, \'training\'\n metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{\'loss\': loss1, \'another_loss\': loss2}` to label the plot\n with corresponding keys.\n global_step_transform (callable, optional): global step transform function to output a desired global step.\n '
def __init__(self, tag, metric_names='all', output_transform=None, global_step_transform=None):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine, logger, event_name):
if (not isinstance(logger, TqdmLogger)):
raise RuntimeError("Handler 'OutputHandler' works only with TqdmLogger")
metrics = self._setup_output_metrics(engine)
global_step = self.global_step_transform(engine, event_name)
if (not isinstance(global_step, int)):
raise TypeError('global_step must be int, got {}. Please check the output of global_step_transform.'.format(type(global_step)))
message = '{} epoch {}: '.format(self.tag.capitalize(), global_step)
metrics_str = []
for (key, value) in metrics.items():
if (isinstance(value, numbers.Number) or (isinstance(value, torch.Tensor) and (value.ndimension() == 0))):
if (value > 10000.0):
metrics_str.append('{}={:.4e}'.format(key, value))
else:
metrics_str.append('{}={:.4f}'.format(key, value))
elif (isinstance(value, torch.Tensor) and (value.ndimension() == 1)):
for (i, v) in enumerate(value):
metrics_str.append('{}{}={}'.format(key, i, v.item()))
else:
warnings.warn('TqdmLogger output_handler can not log metrics value type {}'.format(type(value)))
logger.pbar.log_message((message + ', '.join(metrics_str)))
|
class TqdmLogger(BaseLogger):
'Tqdm logger to log messages using the progress bar.'
def __init__(self, pbar):
self.pbar = pbar
def close(self):
if self.pbar:
self.pbar.close()
self.pbar = None
def _create_output_handler(self, *args, **kwargs):
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args, **kwargs):
'Intentionally empty'
pass
|
class HebbsRule(LearningRule):
def __init__(self, c=0.1):
super().__init__()
self.c = c
def update(self, inputs, w):
d_ws = torch.zeros(inputs.size(0))
for (idx, x) in enumerate(inputs):
y = torch.dot(w, x)
d_w = torch.zeros(w.shape)
for i in range(y.shape[0]):
for j in range(x.shape[0]):
d_w[(i, j)] = ((self.c * x[j]) * y[i])
d_ws[idx] = d_w
return torch.mean(d_ws, dim=0)
|
class KrotovsRule(LearningRule):
'Krotov-Hopfield Hebbian learning rule fast implementation.\n\n Original source: https://github.com/DimaKrotov/Biological_Learning\n\n Args:\n precision: Numerical precision of the weight updates.\n delta: Anti-hebbian learning strength.\n norm: Lebesgue norm of the weights.\n k: Ranking parameter\n '
def __init__(self, precision=1e-30, delta=0.4, norm=2, k=2, normalize=False):
super().__init__()
self.precision = precision
self.delta = delta
self.norm = norm
self.k = k
self.normalize = normalize
def init_layers(self, layers: list):
for layer in [lyr.layer for lyr in layers]:
if ((type(layer) == torch.nn.Linear) or (type(layer) == torch.nn.Conv2d)):
layer.weight.data.normal_(mean=0.0, std=1.0)
def update(self, inputs: torch.Tensor, weights: torch.Tensor):
batch_size = inputs.shape[0]
num_hidden_units = weights.shape[0]
input_size = inputs[0].shape[0]
assert (self.k <= num_hidden_units), 'The amount of hidden units should be larger or equal to k!'
if self.normalize:
norm = torch.norm(inputs, dim=1)
norm[(norm == 0)] = 1
inputs = torch.div(inputs, norm.view((- 1), 1))
inputs = torch.t(inputs)
tot_input = torch.matmul((torch.sign(weights) * (torch.abs(weights) ** (self.norm - 1))), inputs)
(_, indices) = torch.topk(tot_input, k=self.k, dim=0)
activations = torch.zeros((num_hidden_units, batch_size))
activations[(indices[0], torch.arange(batch_size))] = 1.0
activations[(indices[(self.k - 1)], torch.arange(batch_size))] = (- self.delta)
xx = torch.sum(torch.mul(activations, tot_input), 1)
norm_factor = torch.mul(xx.view(xx.shape[0], 1).repeat((1, input_size)), weights)
ds = (torch.matmul(activations, torch.t(inputs)) - norm_factor)
nc = torch.max(torch.abs(ds))
if (nc < self.precision):
nc = self.precision
d_w = torch.true_divide(ds, nc)
return d_w
|
class LearningRule(ABC):
def __init__(self):
self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__))
def init_layers(self, model):
pass
@abstractmethod
def update(self, x, w):
pass
|
class OjasRule(LearningRule):
def __init__(self, c=0.1):
super().__init__()
self.c = c
def update(self, inputs, w):
d_ws = torch.zeros(inputs.size(0), *w.shape)
for (idx, x) in enumerate(inputs):
y = torch.mm(w, x.unsqueeze(1))
d_w = torch.zeros(w.shape)
for i in range(y.shape[0]):
for j in range(x.shape[0]):
d_w[(i, j)] = ((self.c * y[i]) * (x[j] - (y[i] * w[(i, j)])))
d_ws[idx] = d_w
return torch.mean(d_ws, dim=0)
|
class UnitConvergence(Metric):
def __init__(self, layer: torch.nn.Module, norm: int, tolerance: int=0.1, output_transform=(lambda x: x), device=None):
self.layer = layer
self.norm = norm
self.tolerance = tolerance
super(UnitConvergence, self).__init__(output_transform=output_transform, device=device)
def reset(self):
super(UnitConvergence, self).reset()
def update(self, output):
pass
def compute(self):
if (type(self.layer) == torch.nn.Linear):
weights = self.layer.weight.detach()
elif (type(self.layer) == torch.nn.Conv2d):
weights = self.layer.weight.detach()
weights = weights.view((- 1), (self.layer.kernel_size[0] * self.layer.kernel_size[1]))
else:
raise TypeError("Layer type '{}' not supported!".format(type(self.layer)))
sums = torch.sum(torch.pow(torch.abs(weights), self.norm), 1)
num_converged = torch.sum((sums < (1 + self.tolerance)))
num = sums.shape[0]
return (float(num_converged) / num)
|
class Flatten(nn.Module):
def forward(self, x: torch.Tensor):
return x.view(x.size(0), (- 1))
|
class RePU(nn.ReLU):
def __init__(self, n):
super(RePU, self).__init__()
self.n = n
def forward(self, x: torch.Tensor):
return (torch.relu(x) ** self.n)
|
class SPELoss(Module):
def __init__(self, m=1, beta=0.1):
super(SPELoss, self).__init__()
self.m = m
self.beta = beta
def forward(self, output, target):
output = torch.tanh((self.beta * output))
target = ((torch.nn.functional.one_hot(target, num_classes=output.shape[1]) * 2) - 1)
loss = torch.sum((torch.abs((output - target)) ** self.m))
return loss
|
class Local(Optimizer):
def __init__(self, named_params, lr=required):
(self.param_names, params) = zip(*named_params)
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
defaults = dict(lr=lr)
super(Local, self).__init__(params, defaults)
def local_step(self, d_p, layer_name, closure=None):
'Performs a single local optimization step.'
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
layer_index = self.param_names.index((layer_name + '.weight'))
p = group['params'][layer_index]
p.data.add_((group['lr'] * d_p))
try:
self._step_count += 1
except AttributeError:
pass
return loss
|
class Trainer(ABC):
'Abstract base trainer class.\n\n Supports (optional) evaluating and visualizing by default.\n '
def __init__(self, engine, model: torch.nn.Module, device: Optional[Union[(str, torch.device)]]=None):
self.engine = engine
self.model = model
self.device = utils.get_device(device)
self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__))
def run(self, train_loader: DataLoader, epochs: int=10):
self.engine.run(train_loader, max_epochs=epochs)
|
class SupervisedTrainer(Trainer):
'Trains a model using classical supervised backpropagation.\n\n Args:\n model: The model to be trained.\n optimizer: The optimizer used to train the model.\n criterion: The criterion used for calculating the loss.\n device: The device to be used.\n '
def __init__(self, model: torch.nn.Module, optimizer: Optimizer, criterion, device: Optional[Union[(str, torch.device)]]=None):
device = utils.get_device(device)
engine = create_supervised_trainer(model, optimizer, criterion, device=device)
RunningAverage(output_transform=(lambda x: x)).attach(engine, 'loss')
super().__init__(engine=engine, model=model, device=device)
|
class HebbianTrainer(Trainer):
'Trains a model using unsupervised local learning rules also known as Hebbian learning.\n\n The specified learning rule is used to perform local weight updates after each batch of data. Per batch all\n trainable layers are updated in sequence.\n\n Args:\n model (torch.nn.Sequential): The model to be trained.\n learning_rule (LearningRule | Dict[str, LearningRule]):\n The learning rule(s) used to update the model weights.\n optimizer (Optimizer): The optimizer used to perform the weight updates.\n supervised_from (int): From which layer (name) the training should be performed supervised.\n freeze_layers (list): Layers (names) to freeze during training.\n device (Optional[Union[str, torch.device]]): The device to perform the training on.\n\n Attributes:\n supervised_from: See the supervised_from arg.\n freeze_layers: See the freeze_layers arg.\n layers: The Hebbian trainable layers.\n '
def __init__(self, model: torch.nn.Sequential, learning_rule: Union[(LearningRule, Dict[(str, LearningRule)])], optimizer: Optimizer, supervised_from: int=(- 1), freeze_layers: List[str]=None, complete_forward: bool=False, single_forward: bool=False, device: Optional[Union[(str, torch.device)]]=None):
device = utils.get_device(device)
engine = self.create_hebbian_trainer(model, learning_rule, optimizer, device=device)
self.supervised_from = supervised_from
self.freeze_layers = freeze_layers
self.complete_forward = complete_forward
self.single_forward = single_forward
if (self.freeze_layers is None):
self.freeze_layers = []
Layer = namedtuple('Layer', ['idx', 'name', 'layer'])
self.layers = []
for (idx, (name, layer)) in enumerate(list(model.named_children())[:self.supervised_from]):
if (((type(layer) == torch.nn.Linear) or (type(layer) == torch.nn.Conv2d)) and (name not in self.freeze_layers)):
self.layers.append(Layer(idx, name, layer))
self.learning_rule = learning_rule
if (type(self.learning_rule) == dict):
for rule in self.learning_rule.values():
rule.init_layers(self.layers)
else:
self.learning_rule.init_layers(self.layers)
super().__init__(engine=engine, model=model, device=device)
self.logger.info('Received {} trainable layer(s): {}.'.format(len(self.layers), [lyr.name for lyr in self.layers]))
if self.single_forward:
self._hooks = {}
self._inputs = {}
self._outputs = {}
for lyr in self.layers:
self._hooks[lyr.name] = lyr.layer.register_forward_hook(partial(self._store_data_hook, layer_name=lyr.name))
def _store_data_hook(self, _, inp, output, layer_name):
self._inputs[layer_name] = inp[0]
self._outputs[layer_name] = output
def _prepare_data(self, inputs, model, layer_index):
'Prepare the inputs and layer weights to be passed to the learning rule.\n\n Args:\n inputs: The input to the model.\n model: The model to be trained.\n layer_index: The index of the layer currently being trained.\n '
layers = list(model.children())
layer = layers[layer_index]
if (layer_index == 0):
x = inputs
else:
x = inputs
for lyr in layers[:layer_index]:
x = lyr(x)
if self.complete_forward:
for lyr in layers[layer_index:]:
x = lyr(x)
if (type(layer) == torch.nn.Linear):
w = layer.weight
elif (type(layer) == torch.nn.Conv2d):
w = layer.weight
w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1]))
x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation)
else:
raise TypeError('Unsupported layer type!')
x = x.view((x.shape[0], (- 1)))
self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape)))
return (x, w)
def _prepare_data2(self, layer, layer_name):
x = self._inputs[layer_name]
y = self._outputs[layer_name]
if (type(layer) == torch.nn.Linear):
w = layer.weight
elif (type(layer) == torch.nn.Conv2d):
w = layer.weight
w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1]))
x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation)
else:
raise TypeError('Unsupported layer type!')
x = x.view((x.shape[0], (- 1)))
self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape)))
return (x, y, w)
def _forward(self, inputs, model):
if self.complete_forward:
model(inputs)
else:
layers = list(model.children())
x = inputs
for lyr in layers[:(self.supervised_from - 1)]:
x = lyr(x)
def create_hebbian_trainer(self, model: torch.nn.Module, learning_rule, optimizer, device=None, non_blocking=False, prepare_batch=utils.prepare_batch, output_transform=(lambda x, y: 0)):
def _update(_, batch: Sequence[torch.Tensor]):
model.train()
with torch.no_grad():
(x, y) = prepare_batch(batch, device=device, non_blocking=non_blocking)
if self.single_forward:
self._forward(x, model)
for (layer_index, layer_name, layer) in self.layers:
self.logger.debug("Updating layer '{}' with shape {}.".format(layer, layer.weight.shape))
if self.single_forward:
(inputs, _, weights) = self._prepare_data2(layer, layer_name)
else:
(inputs, weights) = self._prepare_data(x, model, layer_index)
if (type(learning_rule) == dict):
try:
rule = learning_rule[layer_name]
except KeyError:
self.logger.error("No learning rule was specified for layer '{}'!".format(layer_name))
raise
else:
rule = learning_rule
d_p = rule.update(inputs, weights)
d_p = d_p.view(*layer.weight.size())
optimizer.local_step(d_p, layer_name=layer_name)
return output_transform(x, y)
return Engine(_update)
|
def plot_to_img(fig):
'Takes a matplotlib figure handle and converts it using canvas and string-casts to a numpy array that can be\n visualized in TensorBoard using the add_image function\n '
fig.canvas.draw()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,)))
img = (img / 255.0)
img = np.transpose(img, (2, 0, 1))
plt.close(fig)
return img
|
def extract_image_patches(x, kernel_size, stride=(1, 1), dilation=1, padding=0):
(b, c, h, w) = x.shape
patches = x.unfold(2, kernel_size[0], stride[0]).unfold(3, kernel_size[1], stride[1])
patches = patches.permute(0, 4, 5, 1, 2, 3).contiguous()
return patches.view((- 1), kernel_size[0], kernel_size[1])
|
def split_dataset(dataset, val_split):
val_size = int((val_split * len(dataset)))
train_size = (len(dataset) - val_size)
(train_dataset, val_dataset) = random_split(dataset, [train_size, val_size])
return (train_dataset, val_dataset)
|
def prepare_batch(batch, device=None, non_blocking=False):
'Prepare batch for training: pass to a device with options.'
(x, y) = batch
return (convert_tensor(x, device=device, non_blocking=non_blocking), convert_tensor(y, device=device, non_blocking=non_blocking))
|
def load_weights(model: torch.nn.Module, state_dict_path, layer_names: List=None, freeze=False):
'Load model weights from a stored state dict. Optionally only load weights for the specified layer.\n\n Args:\n model: The model acquiring the weights.\n state_dict_path: The path of the source state dict\n layer_names: The names of the layer to load. Each name can also be a tuple specifying a source, destination\n weight name mapping.\n freeze: Freeze the loaded weights.\n '
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
state_dict = torch.load(state_dict_path, map_location=torch.device(device))
if (layer_names is not None):
state_dict = extract_layers_from_state_dict(state_dict, layer_names=layer_names)
model.load_state_dict(state_dict, strict=(False if (layer_names is not None) else True))
logger.info("Loaded initial model weights for layer(s) {} from '{}'.".format(layer_names, state_dict_path))
if freeze:
layers = []
for layer_name in layer_names:
if (type(layer_name) == tuple):
layers.append(dict(model.named_children())[layer_name[1]])
else:
layers.append(dict(model.named_children())[layer_name])
for layer in layers:
for param in layer.parameters():
param.requires_grad = False
logger.info('Freezed layer(s) {}.'.format([(ln[0] if (type(ln) == tuple) else ln) for ln in layer_names]))
return model
|
def extract_layers_from_state_dict(state_dict: dict, layer_names: List[str]):
'Extract layers from a state dict.'
new_state_dict = {}
for layer_name in layer_names:
if (type(layer_name) == tuple):
old_layer_name = layer_name[0]
new_layer_name = layer_name[1]
else:
old_layer_name = new_layer_name = layer_name
old_layer_name = '{}.weight'.format(old_layer_name)
new_layer_name = '{}.weight'.format(new_layer_name)
new_state_dict[new_layer_name] = state_dict[old_layer_name]
return new_state_dict
|
def get_device(device=None):
if (device is None):
if torch.cuda.is_available():
device = 'cuda'
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = 'cpu'
elif (device == 'cuda'):
if torch.cuda.is_available():
device = 'cuda'
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = 'cpu'
else:
device = 'cpu'
if (device == 'cuda'):
logger.info("CUDA device set to '{}'.".format(torch.cuda.get_device_name(0)))
return device
|
def has_file_allowed_extension(filename: str, extensions: Tuple[(str, ...)]) -> bool:
return filename.lower().endswith(extensions)
|
def make_subsampled_dataset(directory, class_to_idx, extensions=None, is_valid_file=None, sampling_ratio=1.0, nb_classes=None):
instances = []
directory = os.path.expanduser(directory)
both_none = ((extensions is None) and (is_valid_file is None))
both_something = ((extensions is not None) and (is_valid_file is not None))
if (both_none or both_something):
raise ValueError('Both extensions and is_valid_file cannot be None or not None at the same time')
if (extensions is not None):
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[(str, ...)], extensions))
is_valid_file = cast(Callable[([str], bool)], is_valid_file)
for (i, target_class) in enumerate(sorted(class_to_idx.keys())):
if ((nb_classes is not None) and (i >= nb_classes)):
break
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if (not os.path.isdir(target_dir)):
continue
num_imgs = int((len(os.listdir(target_dir)) * sampling_ratio))
imgs = 0
for (root, _, fnames) in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
if (imgs == num_imgs):
break
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_index)
instances.append(item)
imgs += 1
return instances
|
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
path_json = os.path.join(root, f"{('train' if train else 'val')}{year}.json")
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f'train{year}.json')
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if (king[0] not in targeter.keys()):
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
|
class SubsampledDatasetFolder(DatasetFolder):
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None, sampling_ratio=1.0, nb_classes=None):
super(DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform)
(classes, class_to_idx) = self._find_classes(self.root)
samples = make_subsampled_dataset(self.root, class_to_idx, extensions, is_valid_file, sampling_ratio=sampling_ratio, nb_classes=nb_classes)
if (len(samples) == 0):
msg = 'Found 0 files in subfolders of: {}\n'.format(self.root)
if (extensions is not None):
msg += 'Supported extensions are: {}'.format(','.join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
|
class ImageNetDataset(SubsampledDatasetFolder):
def __init__(self, root, loader=default_loader, is_valid_file=None, **kwargs):
super(ImageNetDataset, self).__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), is_valid_file=is_valid_file, **kwargs)
self.imgs = self.samples
|
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if (args.data_set == 'CIFAR10'):
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 10
if (args.data_set == 'CIFAR100'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif (args.data_set == 'IMNET'):
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = ImageNetDataset(root, transform=transform, sampling_ratio=(args.sampling_ratio if is_train else 1.0), nb_classes=args.nb_classes)
nb_classes = (args.nb_classes if (args.nb_classes is not None) else 1000)
elif (args.data_set == 'INAT'):
dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif (args.data_set == 'INAT19'):
args.data_path = '/datasets01/inaturalist/090619/'
dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return (dataset, nb_classes)
|
def build_transform(is_train, args):
resize_im = (args.input_size > 32)
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount)
if (not resize_im):
transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int(((256 / 224) * args.input_size))
t.append(transforms.Resize(size, interpolation=3))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
|
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
per_batch_time = []
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
s = time.time()
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
if ((outputs != outputs).nonzero().size(0) > 0):
print('output is NaN')
exit()
loss = criterion(outputs, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
per_batch_time.append((time.time() - s))
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for (images, target) in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def get_args_parser():
parser = argparse.ArgumentParser('PerViT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=256, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--model', default='pervit_tiny', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--load', type=str, default='')
parser.add_argument('--pretrained', action='store_true')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
parser.add_argument('--use_pos_embed', action='store_true', help='Absolute positional embedding')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
(parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),)
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--data-path', default='../Datasets_CLS/ILSVRC2012/', type=str, help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET', 'INAT', 'INAT19'], type=str, help='Image Net dataset path')
parser.add_argument('--sampling_ratio', default=1.0, type=float, help='fraction of samples to keep in the training set of imagenet')
parser.add_argument('--nb_classes', default=1000, type=int, help='number of classes in imagenet')
parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='logs/test/', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='yes', help='resume from checkpoint')
parser.add_argument('--save_every', default=1, type=int, help='save model every epochs')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--visualize', action='store_true', help='Visualize learned attentions')
return parser
|
def main(args):
utils.init_distributed_mode(args)
if utils.is_main_process():
tbd_writer = SummaryWriter(os.path.join(args.output_dir, 'tbd/runs'))
print(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
print('seed: ', seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
cudnn.benchmark = True
if (not args.eval):
(dataset_train, args.nb_classes) = build_dataset(is_train=True, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
(dataset_val, _) = build_dataset(is_train=False, args=args)
data_loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=int((1.5 * args.batch_size)), shuffle=False, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
if mixup_active:
mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f'Creating model: {args.model}')
model = create_model(args.model, pretrained=args.pretrained, num_classes=args.nb_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, use_pos_embed=args.use_pos_embed)
print(model)
model.to(device)
if args.pretrained:
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['model'])
model.init_rpe()
with torch.cuda.device(0):
(macs, params) = get_model_complexity_info(model, (3, 224, 224), as_strings=True, print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
if args.visualize:
basepath = ('vis/%s' % args.load.split('/')[(- 2)])
os.makedirs(('%s/weight' % basepath), exist_ok=True)
vis_attention(model, basepath, side=14, q=(7, 7))
print(('Attention visualized at %s' % basepath))
model_ema = None
if args.model_ema:
model_ema = ModelEma(model, decay=args.model_ema_decay, device=('cpu' if args.model_ema_force_cpu else ''), resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('number of params:', n_parameters)
linear_scaled_lr = (((args.lr * args.batch_size) * utils.get_world_size()) / 512.0)
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model)
loss_scaler = NativeScaler()
(lr_scheduler, _) = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if (args.mixup > 0.0):
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
output_dir = Path(args.output_dir)
if utils.is_main_process():
torch.save(args, (output_dir / 'args.pyT'))
if (args.resume and utils.is_main_process()):
if str(args.resume).startswith('https'):
shutil.rmtree(('%s/tbd/' % args.output_dir))
os.remove(('%s/args.pyT' % args.output_dir))
file_id = args.resume[(args.resume.index('/') + 2):]
utils.download_from_google_drive(file_id, args.output_dir)
resume_path = os.path.join(args.output_dir, 'checkpoint_latest.pth')
latest_exist = os.path.exists(resume_path)
if latest_exist:
checkpoint = torch.load(resume_path, map_location='cpu')
if (latest_exist and (not args.eval)):
model_without_ddp.load_state_dict(checkpoint['model'])
if ((not args.eval) and ('optimizer' in checkpoint) and ('lr_scheduler' in checkpoint) and ('epoch' in checkpoint)):
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = (checkpoint['epoch'] + 1)
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if args.eval:
throughput = utils.compute_throughput(model, resolution=args.input_size)
print(f'Throughput : {throughput:.2f}')
model.initialize()
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print('Start training')
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
gc.collect()
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [(output_dir / 'checkpoint.pth')]
if (args.save_every is not None):
if ((epoch % args.save_every) == 0):
checkpoint_paths.append((output_dir / 'checkpoint_latest.pth'))
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'model_ema': (get_state_dict(model_ema) if model_ema else None), 'args': args}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats['acc1'])
print(f'Max accuracy: {max_accuracy:.2f}%')
if utils.is_main_process():
tbd_writer.add_scalars('data/loss', {'trn_loss': train_stats['loss'], 'test_loss': test_stats['loss']}, (epoch + 1))
tbd_writer.add_scalars('data/acc1', {'test_acc1': test_stats['acc1']}, (epoch + 1))
tbd_writer.add_scalars('data/acc5', {'test_acc5': test_stats['acc5']}, (epoch + 1))
tbd_writer.add_scalars('data/lr', {'lr': train_stats['lr']}, (epoch + 1))
tbd_writer.flush()
if utils.is_main_process():
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
tbd_writer.close()
print('Training time {}'.format(total_time_str))
|
@register_model
def pervit_tiny(pretrained=False, **kwargs):
num_heads = 4
kwargs['emb_dims'] = [128, 192, 224, 280]
kwargs['convstem_dims'] = [3, 48, 64, 96, 128]
model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs)
model.default_cfg = _cfg()
return model
|
@register_model
def pervit_small(pretrained=False, **kwargs):
num_heads = 8
kwargs['emb_dims'] = [272, 320, 368, 464]
kwargs['convstem_dims'] = [3, 64, 128, 192, 272]
model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs)
model.default_cfg = _cfg()
return model
|
@register_model
def pervit_medium(pretrained=False, **kwargs):
num_heads = 12
kwargs['emb_dims'] = [312, 468, 540, 684]
kwargs['convstem_dims'] = [3, 64, 192, 256, 312]
model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs)
model.default_cfg = _cfg()
return model
|
class OffsetGenerator():
@classmethod
def initialize(cls, n_patch_side, pad_size):
grid_1d = torch.linspace((- 1), 1, n_patch_side).to('cuda')
if (pad_size > 0):
pad_dist = torch.cumsum((grid_1d[(- 1)] - grid_1d[(- 2)]).repeat(pad_size), dim=0)
grid_1d = torch.cat([((- 1) - pad_dist).flip(dims=[0]), grid_1d, (1 + pad_dist)])
n_patch_side += (pad_size * 2)
n_tokens = (n_patch_side ** 2)
grid_y = grid_1d.view((- 1), 1).repeat(1, n_patch_side)
grid_x = grid_1d.view(1, (- 1)).repeat(n_patch_side, 1)
grid = torch.stack([grid_y, grid_x], dim=(- 1)).view((- 1), 2)
grid_q = grid.view((- 1), 1, 2).repeat(1, n_tokens, 1)
grid_k = grid.view(1, (- 1), 2).repeat(n_tokens, 1, 1)
cls.qk_vec = (grid_k - grid_q)
@classmethod
def get_qk_vec(cls):
return cls.qk_vec.clone()
|
class RASampler(torch.utils.data.Sampler):
'Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n '
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 3.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.num_selected_samples = int(math.floor((((len(self.dataset) // 256) * 256) / self.num_replicas)))
self.shuffle = shuffle
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
def _load_checkpoint_for_ema(model_ema, checkpoint):
'\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n '
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0))
|
@torch.no_grad()
def compute_throughput(model, batch_size=128, resolution=224):
torch.cuda.empty_cache()
warmup_iters = 3
num_iters = 30
model.eval()
model.to('cuda')
timing = []
inputs = torch.randn(batch_size, 3, resolution, resolution, device='cuda')
for _ in range(warmup_iters):
model(inputs)
torch.cuda.synchronize()
for _ in range(num_iters):
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append((time.time() - start))
timing = torch.as_tensor(timing, dtype=torch.float32)
return (batch_size / timing.mean())
|
def download_from_google_drive(file_id, output_dir):
url = ('https://drive.google.com/uc?id=%s' % file_id)
output = os.path.join(output_dir, 'tmp.tar.gz')
gdown.download(url, output, quiet=False)
file = tarfile.open(output, 'r:gz')
file.extractall(output_dir)
file.close()
os.remove(output)
target_dir = glob.glob(('%s/*' % output_dir))[0]
files_to_move = glob.glob(('%s/*' % target_dir))
for f in files_to_move:
shutil.move(f, output_dir)
os.rmdir(target_dir)
print()
|
def align_and_split(files):
for f in tqdm(files):
get_ipython().system('/notebooks/MotionCor2_1.3.0-Cuda101 -InMrc {f} -OutMrc tmp/aligned.mrc -Patch 5 5 5 -OutStack 1 >> motioncor2.log')
aligned_stack = mrcfile.open(glob('tmp/*_Stk.mrc')[0], permissive=True)
save_mrc(join(data_path, 'even', basename(f)), np.sum(aligned_stack.data[::2], axis=0), pixel_spacing)
save_mrc(join(data_path, 'odd', basename(f)), np.sum(aligned_stack.data[1::2], axis=0), pixel_spacing)
remove_files('tmp', extension='.mrc')
|
def copy_etomo_files(src, name, target):
if exists(join(src, (name + 'local.xf'))):
cp(join(src, (name + 'local.xf')), target)
cp(join(src, (name + '.xf')), target)
cp(join(src, 'eraser.com'), target)
cp(join(src, 'ctfcorrection.com'), target)
cp(join(src, 'tilt.com'), target)
cp(join(src, 'newst.com'), target)
cp(join(src, (name + '.xtilt')), target)
cp(join(src, (name + '.tlt')), target)
cp(join(src, (name + '.defocus')), target)
cp(join(src, 'rotation.xf'), target)
|
def augment(x, y):
rot_k = np.random.randint(0, 4, x.shape[0])
X = x.copy()
Y = y.copy()
for i in range(X.shape[0]):
if (np.random.rand() < 0.5):
X[i] = np.rot90(x[i], k=rot_k[i], axes=(0, 2))
Y[i] = np.rot90(y[i], k=rot_k[i], axes=(0, 2))
else:
X[i] = np.rot90(y[i], k=rot_k[i], axes=(0, 2))
Y[i] = np.rot90(x[i], k=rot_k[i], axes=(0, 2))
return (X, Y)
|
class CryoDataWrapper(Sequence):
def __init__(self, X, Y, batch_size):
(self.X, self.Y) = (X, Y)
self.batch_size = batch_size
self.perm = np.random.permutation(len(self.X))
def __len__(self):
return int(np.ceil((len(self.X) / float(self.batch_size))))
def on_epoch_end(self):
self.perm = np.random.permutation(len(self.X))
def __getitem__(self, i):
idx = slice((i * self.batch_size), ((i + 1) * self.batch_size))
idx = self.perm[idx]
return self.__augment__(self.X[idx], self.Y[idx])
def __augment__(self, x, y):
return augment(x, y)
|
class CryoCARE(CARE):
def train(self, X, Y, validation_data, epochs=None, steps_per_epoch=None):
'Train the neural network with the given data.\n Parameters\n ----------\n X : :class:`numpy.ndarray`\n Array of source images.\n Y : :class:`numpy.ndarray`\n Array of target images.\n validation_data : tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`)\n Tuple of arrays for source and target validation images.\n epochs : int\n Optional argument to use instead of the value from ``config``.\n steps_per_epoch : int\n Optional argument to use instead of the value from ``config``.\n Returns\n -------\n ``History`` object\n See `Keras training history <https://keras.io/models/model/#fit>`_.\n '
((isinstance(validation_data, (list, tuple)) and (len(validation_data) == 2)) or _raise(ValueError('validation_data must be a pair of numpy arrays')))
(n_train, n_val) = (len(X), len(validation_data[0]))
frac_val = ((1.0 * n_val) / (n_train + n_val))
frac_warn = 0.05
if (frac_val < frac_warn):
warnings.warn(('small number of validation images (only %.1f%% of all images)' % (100 * frac_val)))
axes = axes_check_and_normalize(('S' + self.config.axes), X.ndim)
ax = axes_dict(axes)
for (a, div_by) in zip(axes, self._axes_div_by(axes)):
n = X.shape[ax[a]]
if ((n % div_by) != 0):
raise ValueError(('training images must be evenly divisible by %d along axis %s (which has incompatible size %d)' % (div_by, a, n)))
if (epochs is None):
epochs = self.config.train_epochs
if (steps_per_epoch is None):
steps_per_epoch = self.config.train_steps_per_epoch
if (not self._model_prepared):
self.prepare_for_training()
training_data = CryoDataWrapper(X, Y, self.config.train_batch_size)
history = self.keras_model.fit_generator(generator=training_data, validation_data=validation_data, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=self.callbacks, verbose=1)
if (self.basedir is not None):
self.keras_model.save_weights(str((self.logdir / 'weights_last.h5')))
if (self.config.train_checkpoint is not None):
print()
self._find_and_load_weights(self.config.train_checkpoint)
try:
(self.logdir / 'weights_now.h5').unlink()
except FileNotFoundError:
pass
return history
|
@contextmanager
def cd(newdir):
'Context manager to temporarily change the working directory'
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
(yield)
finally:
os.chdir(prevdir)
|
def save_mrc(path, data, pixel_spacing):
'\n Save data in a mrc-file and set the pixel spacing with the `alterheader` command from IMOD.\n\n Parameters\n ----------\n path : str\n Path of the new file.\n data : array(float)\n The data to save.\n pixel_spacing : float\n The pixel spacing in Angstrom.\n '
mrc = mrcfile.open(path, mode='w+')
mrc.set_data(data)
mrc.close()
cmd = ['alterheader', '-del', '{0},{0},{0}'.format(pixel_spacing), path]
result = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
result.check_returncode()
|
def remove_files(dir, extension='.mrc'):
"\n Removes all files in a directory with the given extension.\n\n Parameters\n ----------\n dir : str\n The directory to clean.\n extension : str\n The file extension. Default: ``'.mrc'``\n "
files = glob(join(dir, ('*' + extension)))
for f in files:
os.remove(f)
|
def modify_newst(path, bin_factor):
'\n Modifies the bin-factor of a given newst.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the newst.com file.\n bin_factor : int\n The new bin-factor.\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
bin_fac_idx = [i for (i, s) in enumerate(content) if ('BinByFactor' in s)][0]
content[bin_fac_idx] = ('BinByFactor ' + str(bin_factor))
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_ctfcorrection(path, bin_factor, pixel_spacing):
'\n Modifies the bin-factor of a given ctfcorrection.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the ctfcorrection.com file.\n bin_factor : int\n The new bin-factor.\n pixel_spacing : float\n The pixel-spacing of the input tilt-angles in Angstrom.\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
ps_idx = [i for (i, s) in enumerate(content) if ('PixelSize' in s)][0]
content[ps_idx] = ('PixelSize ' + str(np.round((bin_factor * pixel_spacing), decimals=3)))
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_tilt(path, bin_factor, exclude_angles=[]):
'\n Modifies the bin-factor and exclude-angles of a given tilt.com file.\n\n Note: This will overwrite the file!\n\n Parameters\n ----------\n path : str\n Path to the tilt.com file.\n bin_factor : int\n The new bin-factor.\n exclude_angles : List<int>\n List of the tilt-angles to exclude during reconstruction. Default: ``[]``\n '
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
if (not ('UseGPU 0' in content)):
content.insert((len(content) - 1), 'UseGPU 0')
binned_idx = [i for (i, s) in enumerate(content) if ('IMAGEBINNED' in s)][0]
content[binned_idx] = ('IMAGEBINNED ' + str(bin_factor))
if (len(exclude_angles) > 0):
exclude_idx = [i for (i, s) in enumerate(content) if ('EXCLUDELIST2 ' in s)]
if (len(exclude_idx) == 0):
exclude_idx = (len(content) - 1)
else:
exclude_idx = exclude_idx[0]
content[exclude_idx] = ('EXCLUDELIST2 ' + str(exclude_angles)[1:(- 1)])
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close()
|
def modify_com_scripts(path, bin_factor, pixel_spacing, exclude_angles=[]):
'\n Modifies the bin-factor and exclude-angles of the newst.com, ctfcorrection.com and tilt.com scripts.\n\n Note: This will overwrite the files!\n\n Parameters\n ----------\n path : str\n Path to the parent directory of the scripts.\n bin_factor : int\n The new bin-factor.\n pixel_spacing : float\n The pixel-spacing of the input tilt-angles in Angstrom.\n exclude_angles : List<int>\n List of the tilt-angles to exclude during reconstruction. Default: ``[]``\n '
print("Modified 'newst.com' file:")
modify_newst(join(path, 'newst.com'), bin_factor)
print('')
print('------------------------------------------------------------------------')
print('')
print("Modified 'ctfcorrection.com' file:")
modify_ctfcorrection(join(path, 'ctfcorrection.com'), bin_factor, pixel_spacing)
print('')
print('------------------------------------------------------------------------')
print('')
print("Modified 'tilt.com' file:")
modify_tilt(join(path, 'tilt.com'), bin_factor, exclude_angles)
|
def reconstruct_tomo(path, name, dfix, init, volt=300, rotate_X=True):
'\n Reconstruct a tomogram with IMOD-com scripts. This also applies mtffilter after ctfcorrection.\n\n A reconstruction log will be placed in the reconstruction-directory.\n\n Parameters\n ----------\n path : str\n Path to the reconstruction-directory.\n name : str\n Name of the tomogram (the prefix).\n dfix : float\n dfixed parameter of mtffilter: Fixed dose for each image of the input file, in electrons/square Angstrom\n init : float\n initial parameter of mtffilter: Dose applied before any of the images in the input file were taken\n volt : int\n volt parameter of mtffilter. Microscope voltage in kV; must be either 200 or 300. Default: ``300``\n rotate_X : bool\n If the reconstructed tomogram should be rotated 90 degree about X. Default: ``True``\n '
with cd(path):
mrc_files = glob('*.mrc')
mrc_files.sort()
with open((name + '_reconstruction.log'), 'a') as log:
cmd = ((['newstack'] + mrc_files) + [(name + '.st')])
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'eraser.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
mv((name + '.st'), (name + '_orig.st'))
mv((name + '_fixed.st'), (name + '.st'))
cmd = ['submfg', 'newst.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'ctfcorrection.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['mtffilter', '-dfixed', str(dfix), '-initial', str(init), '-volt', str(volt), (name + '_ctfcorr.ali'), (name + '.ali')]
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
cmd = ['submfg', 'tilt.com']
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
if rotate_X:
cmd = ['trimvol', '-rx', (name + '_full.rec'), (name + '.rec')]
print(' '.join(cmd))
result = subprocess.run(cmd, stdout=log, stderr=log)
result.check_returncode()
else:
print('mv {0}_full.rec {0}.rec'.format(name))
mv((name + '_full.rec'), (name + '.rec'))
os.remove((name + '.st'))
os.remove((name + '_full.rec'))
mv((name + '_orig.st'), (name + '.st'))
os.remove((name + '.ali'))
os.remove((name + '_ctfcorr.ali'))
tomName = (name + '.rec')
split_name = os.path.basename(os.path.normpath(path))
tomRename = (((name + '_') + split_name) + '.rec')
mv(tomName, tomRename)
|
def get_datasets():
return [ousidhoum2019.Ousidhoum2019(), mulki2019.Mulki2019(), mubarak2017twitter.Mubarak2017twitter(), mubarak2017aljazeera.Mubarak2017aljazeera(), davidson2017.Davidson2017(), gibert2018.Gibert2018(), gao2018.Gao2018(), chung2019.Chung2019(), qian2019.Qian2019(), waseem2016.Waseem2016(), jha2017.Jha2017(), elsherief2018.Elsherief2018(), mandl2019en.Mandl2019en(), mandl2019ger.Mandl2019ger(), mandl2019hind.Mandl2019hind(), bretschneider2017.Bretschneider2017(), ross2017.Ross2017(), wiegand2018.Wiegand2018(), pitenis2020.Pitenis2020(), mathur2018.Mathur2018(), alfina2017.Alfina2017(), ibrohim2019.Ibrohim2019(), ibrohim2018.Ibrohim2018(), sanguinetti2018.Sanguinetti2018(), fortuna2019.Fortuna2019(), coltekin2019.Coltekin2019(), albadi2018.Albadi2018(), basile2019.Basile2019(), founta2018.Founta2018(), wulczyn2017toxic.Wulczyn2017toxic(), wulczyn2017aggressive.Wulczyn2017aggressive(), wulczyn2017attack.Wulczyn2017attack(), sigurbergsson2019.Sigurbergsson2019(), kulkarni2021.Kulkarni2021(), novak2021.Novak2021(), kumar2018.Kumar2018(), zampieri2019.Zampieri2019(), bretschneider2016wow.Bretschneider2016wow(), bretschneider2016lol.Bretschneider2016lol()]
|
def get_dataset_by_name(name):
all_datasets = get_datasets()
return next(filter((lambda dataset: (dataset.name == name)), all_datasets), None)
|
class Albadi2018(dataset.Dataset):
name = 'albadi2018'
url = 'https://github.com/nuhaalbadi/Arabic_hatespeech/archive/refs/heads/master.zip'
hash = '7f7d87384b4b715655ec0e2d329bc234bbc965ad116290f2e2d0b11e26e272b3'
files = [{'name': 'albadi2018ar_train.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}, {'name': 'albadi2018ar_test.csv', 'language': 'ar', 'type': 'test', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
file_dir = helpers.unzip_file(tmp_file_path)
train_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/train.csv'), 'id', api_config)
test_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/test.csv'), 'id', api_config)
helpers.copy_file(train_file, os.path.join(dataset_folder, 'albadi2018ar_train.csv'))
helpers.copy_file(test_file, os.path.join(dataset_folder, 'albadi2018ar_test.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['hate'] == 1):
labels.append('hate')
else:
labels.append('noHate')
row['labels'] = labels
row = row.drop(['hate'])
return row
|
class Alfina2017(dataset.Dataset):
name = 'alfina2017'
url = 'https://github.com/ialfina/id-hatespeech-detection/raw/master/IDHSD_RIO_unbalanced_713_2017.txt'
hash = '4ee1d9cc1f1fdd27fb4298207fabb717f4e09281bd68fa5dcbcf720d75f1d4ed'
files = [{'name': 'alfina2017id.csv', 'language': 'id', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'The dataset may be used freely, but if you want to publish paper/publication using the dataset, please cite this publication:\nIka Alfina, Rio Mulia, Mohamad Ivan Fanany, and Yudo Ekanata, "Hate Speech Detection in Indonesian Language: A Dataset and Preliminary Study ", in Proceeding of 9th International Conference on Advanced Computer Science and Information Systems 2017(ICACSIS 2017). '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'alfina2017id.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = []
labels.append(row['Label'])
row['labels'] = labels
row = row.drop(['Label', 'Tweet'])
return row
|
class Basile2019(dataset.Dataset):
name = 'basile2019'
url = 'https://github.com/cicl2018/HateEvalTeam/raw/master/Data%20Files/Data%20Files/%232%20Development-English-A/train_dev_en_merged.tsv'
hash = 'fdd34bf56f0afa744ee7484774d259d83a756033cd8049ded81bd55d2fcb1272'
files = [{'name': 'basile2019en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
comment = 'HS - a binary value indicating if HS is occurring against one of the given targets (women or immigrants): 1 if occurs, 0 if not.\nTarget Range - if HS occurs (i.e. the value for the feature HS is 1), a binary value indicating if the target is a generic group of people (0) or a specific individual (1).\nAggressiveness- if HS occurs (i.e. the value for the feature HS is 1), a binary value indicating if the tweeter is aggressive (1) or not (0).'
license = ''
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'basile2019en.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['HS'] == 1):
labels.append('HS')
if (row['TR'] == 1):
labels.append('TR')
if (row['AG'] == 1):
labels.append('AG')
row['labels'] = labels
row = row.drop(['HS', 'TR', 'AG', 'id'])
return row
|
class Bretschneider2016lol(dataset.Dataset):
name = 'bretschneider2016lol'
url = 'http://ub-web.de/research/resources/lol_anonymized.zip'
hash = '901e0d51428f34b94bf6b3f59b0e9cf71dabe94fc74fd81fd1e9be199d2902bc'
files = [{'name': 'bretschneider2016en_lol.csv', 'language': 'en', 'type': 'training', 'platform': 'League of Legends'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
tmp_file_path = helpers.extract_sql_tables(os.path.join(tmp_file_path, 'lol_anonymized.sql'))
tmp_file_path = helpers.join_csvs(os.path.join(tmp_file_path, 'posts.csv'), ['topic_id', 'post_number'], os.path.join(tmp_file_path, 'annotations.csv'), ['topic_id', 'post_number'], how='left')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2016en_lol.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['html_message']
labels = []
if (type(row['offender']) != float):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['topic_id', 'post_number', 'annotator', 'offender', 'victim', 'author', 'html_message', 'timestamp'])
return row
|
class Bretschneider2016wow(dataset.Dataset):
name = 'bretschneider2016wow'
url = 'http://www.ub-web.de/research/resources/wow_anonymized.zip'
hash = '0f5d67879306cd67154c31583b6e8750b9290f54c0065cc8cdf11ab6a8d1a26d'
files = [{'name': 'bretschneider2016en_wow.csv', 'language': 'en', 'type': 'training', 'platform': 'World of Warcraft'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
tmp_file_path = helpers.extract_sql_tables(os.path.join(tmp_file_path, 'wow_anonymized.sql'))
tmp_file_path = helpers.join_csvs(os.path.join(tmp_file_path, 'posts.csv'), ['topic_id', 'post_number'], os.path.join(tmp_file_path, 'annotations.csv'), ['topic_id', 'post_number'], how='left')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2016en_wow.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['html_message']
labels = []
if (type(row['offender']) != float):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['topic_id', 'post_number', 'annotator', 'offender', 'victim', 'author', 'html_message', 'timestamp'])
return row
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.