code stringlengths 101 5.91M |
|---|
def _create_losses(input_queue, create_model_fn):
detection_model = create_model_fn()
(images, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list) = _get_inputs(input_queue, detection_model.num_classes)
images = [detection_model.preprocess(image) for image in images]
images = tf.concat(images, 0)
if any(((mask is None) for mask in groundtruth_masks_list)):
groundtruth_masks_list = None
detection_model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list)
prediction_dict = detection_model.predict(images)
losses_dict = detection_model.loss(prediction_dict)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor) |
def var(xNp, volatile=False, cuda=False):
x = Variable(t.from_numpy(xNp), volatile=volatile)
if cuda:
x = x.cuda()
return x |
class TarDataset(data.Dataset):
def download_or_unzip(cls, root):
path = os.path.join(root, cls.dirname)
if (not os.path.isdir(path)):
tpath = os.path.join(root, cls.filename)
os.makedirs(root, exist_ok=True)
if (not os.path.isfile(tpath)):
print('downloading')
urllib.request.urlretrieve(cls.url, tpath)
with tarfile.open(tpath, 'r') as tfile:
print('extracting')
tfile.extractall(root)
return os.path.join(path, '') |
def prod(iterable):
if (len(list(iterable)) > 0):
return reduce(operator.mul, iterable)
else:
return 1 |
class AvgPool2dSame(nn.AvgPool2d):
def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
kernel_size = tup_pair(kernel_size)
stride = tup_pair(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
def forward(self, x):
return avg_pool2d_same(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) |
class SkipConnectRNNCell(VarRNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, nonlinearity='tanh', p=(0.5, 0.5)):
super(SkipConnectRNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.nonlinearity = nonlinearity
self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(hidden_size, (hidden_size * 2)))
if bias:
self.bias_ih = Parameter(torch.Tensor(hidden_size))
self.bias_hh = Parameter(torch.Tensor(hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
(p_in, p_hidden) = p
if ((p_in < 0) or (p_in > 1)):
raise ValueError('input dropout probability has to be between 0 and 1, but got {}'.format(p_in))
if ((p_hidden < 0) or (p_hidden > 1)):
raise ValueError('hidden state dropout probability has to be between 0 and 1, but got {}'.format(p_hidden))
self.p_in = p_in
self.p_hidden = p_hidden
self.noise_in = None
self.noise_hidden = None
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_hh)
nn.init.xavier_uniform_(self.weight_ih)
if self.bias:
nn.init.constant_(self.bias_hh, 0.0)
nn.init.constant_(self.bias_ih, 0.0)
def reset_noise(self, batch_size):
if self.training:
if self.p_in:
noise = self.weight_ih.new_empty(batch_size, self.input_size)
self.noise_in = (noise.bernoulli_((1.0 - self.p_in)) / (1.0 - self.p_in))
else:
self.noise_in = None
if self.p_hidden:
noise = self.weight_hh.new_empty(batch_size, (self.hidden_size * 2))
self.noise_hidden = (noise.bernoulli_((1.0 - self.p_hidden)) / (1.0 - self.p_hidden))
else:
self.noise_hidden = None
else:
self.noise_in = None
self.noise_hidden = None
def forward(self, input, hx, hs):
if (self.nonlinearity == 'tanh'):
func = rnn_F.SkipConnectRNNTanhCell
elif (self.nonlinearity == 'relu'):
func = rnn_F.SkipConnectRNNReLUCell
else:
raise RuntimeError('Unknown nonlinearity: {}'.format(self.nonlinearity))
return func(input, hx, hs, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, self.noise_in, self.noise_hidden) |
class r2plus1d_18(nn.Module):
def __init__(self, pretrained=True, num_classes=500, dropout_p=0.5):
super(r2plus1d_18, self).__init__()
self.pretrained = pretrained
self.num_classes = num_classes
model = torchvision.models.video.r2plus1d_18(pretrained=self.pretrained)
modules = list(model.children())[:(- 1)]
self.r2plus1d_18 = nn.Sequential(*modules)
convert_relu_to_swish(self.r2plus1d_18)
self.fc1 = nn.Linear(model.fc.in_features, self.num_classes)
self.dropout = nn.Dropout(dropout_p, inplace=True)
def forward(self, x):
out = self.r2plus1d_18(x)
out = out.flatten(1)
out = self.dropout(out)
out = self.fc1(out)
return out |
def mandelbrot(render_size, center, zoom, cycles):
f = (zoom / render_size[0])
real_start = (center[0] - ((render_size[0] / 2) * f))
real_end = (real_start + (render_size[0] * f))
imag_start = (center[1] - ((render_size[1] / 2) * f))
imag_end = (imag_start + (render_size[1] * f))
real_range = tf.range(real_start, real_end, f, dtype=tf.float64)
imag_range = tf.range(imag_start, imag_end, f, dtype=tf.float64)
(real, imag) = tf.meshgrid(real_range, imag_range)
grid_c = tf.constant(tf.complex(real, imag))
current_values = tf.Variable(grid_c)
counts = tf.Variable(tf.zeros_like(grid_c, tf.float32))
mandelbrot_helper(grid_c, current_values, counts, cycles)
return counts.numpy() |
def dump_result(args, sample_id, feat_pred):
out_root = Path(args.results_path)
feat_dir = (out_root / 'feat')
feat_dir.mkdir(exist_ok=True, parents=True)
np.save((feat_dir / f'{sample_id}.npy'), feat_pred.transpose(1, 0)) |
def test_eval_hmean():
metrics = set(['hmean-iou', 'hmean-ic13'])
results = [{'boundary_result': [[50, 70, 80, 70, 80, 100, 50, 100, 1], [120, 140, 200, 140, 200, 200, 120, 200, 1]]}]
img_infos = [{'file_name': 'sample1.jpg'}]
ann_infos = _create_dummy_ann_infos()
with pytest.raises(AssertionError):
eval_hmean(results, [[]], ann_infos, metrics=metrics)
with pytest.raises(AssertionError):
eval_hmean(results, img_infos, [[]], metrics=metrics)
with pytest.raises(AssertionError):
eval_hmean([[]], img_infos, ann_infos, metrics=metrics)
with pytest.raises(AssertionError):
eval_hmean(results, img_infos, ann_infos, metrics='hmean-iou')
eval_results = eval_hmean(results, img_infos, ann_infos, metrics=metrics)
assert (eval_results['hmean-iou:hmean'] == 1)
assert (eval_results['hmean-ic13:hmean'] == 1) |
def node_name_from_input(node_name):
if node_name.startswith('^'):
node_name = node_name[1:]
m = re.search('(.*):\\d+$', node_name)
if m:
node_name = m.group(1)
return node_name |
class UVTrianglesRenderer():
def __init__(self, ctx: MGL.Context, output_size: Tuple[(int, int)]):
self.ctx = ctx
self.output_size = output_size
self.shader = self.ctx.program(vertex_shader=VERTEX_SHADER, fragment_shader=FRAGMENT_SHADER)
self.fbo = self.ctx.framebuffer(self.ctx.renderbuffer(self.output_size, dtype='f4'))
self.background_color = (0, 0, 0)
def __del__(self):
self.ctx.release()
self.fbo.release()
self.shader.release()
def with_standalone_ctx(output_size: Tuple[(int, int)]) -> 'UVTrianglesRenderer':
return UVTrianglesRenderer(MGL.create_standalone_context(require=330), output_size)
def with_window_ctx(output_size: Tuple[(int, int)]) -> 'UVTrianglesRenderer':
return UVTrianglesRenderer(MGL.create_context(require=330), output_size)
def _init_ctx_object(self, tex_coords, tri_indices, texture):
resources = []
tex_coords_buffer = self.ctx.buffer(tex_coords.astype('f4').tobytes())
resources.append(tex_coords_buffer)
tri_indices_buffer = self.ctx.buffer(tri_indices.astype('i4').tobytes())
resources.append(tri_indices_buffer)
texture_height = texture.shape[0]
texture_width = texture.shape[1]
if (len(texture.shape) == 3):
components = texture.shape[2]
else:
components = 1
texture_object = self.ctx.texture((texture_width, texture_height), components, texture.astype('f4').tobytes(), dtype='f4')
resources.append(texture_object)
content = (tex_coords_buffer, '2f4', 'point_uv')
self.shader['texture_color'] = 0
texture_object.use(0)
vao = self.ctx.vertex_array(self.shader, [content], tri_indices_buffer)
resources.append(vao)
return (vao, resources)
def _render(self, vao):
self.fbo.use()
self.ctx.clear(*self.background_color)
vao.render()
self.ctx.finish()
def _get_fbo_image(self):
fbo_image = self.fbo.read(dtype='f4')
fbo_image = np.frombuffer(fbo_image, dtype='f4').reshape(self.output_size[1], self.output_size[0], 3)
fbo_image = np.flipud(fbo_image)
return fbo_image
def render(self, tex_coords: NDArray[((Any, 2), float)], tri_indices: NDArray[((Any, 3), int)], texture: NDArray[((Any, Any, 3), float)], flip_y: bool=True) -> NDArray[((Any, Any, 3), float)]:
assert isinstance(tex_coords, NDArray[((Any, 2), float)])
assert isinstance(tri_indices, NDArray[((Any, 3), int)])
assert isinstance(texture, NDArray[((Any, Any, 3), float)])
if flip_y:
texture = np.flipud(texture)
resources = []
try:
(vao, resources) = self._init_ctx_object(tex_coords, tri_indices, texture)
self._render(vao)
result = self._get_fbo_image()
return result
finally:
for resource in resources:
resource.release() |
class FMASmall(Dataset):
_ext_audio = '.mp3'
def __init__(self, root: Union[(str, Path)], audio_transform: Callable=None, subset: Optional[str]='training') -> None:
super().__init__()
self.subset = subset
self.random_crop = (self.subset != 'testing')
assert ((subset is None) or (subset in ['training', 'validation', 'testing'])), ('When `subset` not None, it must take a value from ' + "{'training', 'validation', 'testing'}.")
self._path = os.fspath(root)
if (not os.path.isdir(self._path)):
raise RuntimeError('Dataset not found. Please use `download=True` to download it.')
tracks = parse_annotation_file(os.path.join(self._path, 'fma_metadata/tracks.csv'))
subset = (tracks[('set', 'subset')] <= 'small')
tracks = tracks.loc[subset]
if (self.subset == 'training'):
self.file_list = tracks.index[(tracks[('set', 'split')] == 'training')]
elif (self.subset == 'validation'):
self.file_list = tracks.index[(tracks[('set', 'split')] == 'validation')]
elif (self.subset == 'testing'):
self.file_list = tracks.index[(tracks[('set', 'split')] == 'validation')]
self.file_list = [i for i in self.file_list if (i not in [108925, 98567, 98569, 133297, 98565, 99134])]
self.annotations = tracks[('track', 'genre_top')]
self.lb = preprocessing.LabelEncoder()
self.lb.fit(self.annotations)
def load_audio(self, sample_id):
tid_str = '{:06d}'.format(sample_id)
path_to_audio = os.path.join(self._path, 'fma_small', tid_str[:3], (tid_str + FMASmall._ext_audio))
if self.random_crop:
(waveform, sample_rate) = load_random_slice(path_to_audio, slice_length=3)
else:
(waveform, sample_rate) = torchaudio.load(path_to_audio)
waveform = torch.mean(waveform, dim=0)
if (sample_rate != 16000):
waveform = resample(waveform, sample_rate)
return waveform
def get_label(self, sample_id):
label = self.lb.transform([self.annotations[sample_id]])[0]
return label
def __len__(self) -> int:
return len(self.file_list)
def __getitem__(self, n: int) -> Tuple[(Tensor, int, str)]:
sample_id = self.file_list[n]
waveform = self.load_audio(sample_id)
label = self.get_label(sample_id)
return (waveform, label)
def num_classes(cls):
return 8 |
class PixelNormLayer(nn.Module):
def __init__(self):
super(PixelNormLayer, self).__init__()
def forward(self, x):
return (x * torch.rsqrt((torch.mean((x ** 2), dim=1, keepdim=True) + 1e-08)))
def __repr__(self):
return self.__class__.__name__ |
def train(loader_src, loader_tgt, net, opt_net, opt_dis, epoch):
log_interval = 100
N = min(len(loader_src.dataset), len(loader_tgt.dataset))
joint_loader = zip(loader_src, loader_tgt)
net.train()
last_update = (- 1)
for (batch_idx, ((data_s, _), (data_t, _))) in enumerate(joint_loader):
info_str = '[Train Adda] Epoch: {} [{}/{} ({:.2f}%)]'.format(epoch, (batch_idx * len(data_t)), N, ((100 * batch_idx) / N))
data_s = make_variable(data_s, requires_grad=False)
data_t = make_variable(data_t, requires_grad=False)
opt_dis.zero_grad()
score_s = net.src_net(data_s)
score_t = net.tgt_net(data_t)
f = torch.cat((score_s, score_t), 0)
pred_concat = net.discriminator(f)
target_dom_s = make_variable(torch.ones(len(data_s)).long(), requires_grad=False)
target_dom_t = make_variable(torch.zeros(len(data_t)).long(), requires_grad=False)
label_concat = torch.cat((target_dom_s, target_dom_t), 0)
loss_dis = net.gan_criterion(pred_concat, label_concat)
loss_dis.backward()
opt_dis.step()
pred_dis = torch.squeeze(pred_concat.max(1)[1])
acc = (pred_dis == label_concat).float().mean()
info_str += ' acc: {:0.1f} D: {:.3f}'.format((acc.item() * 100), loss_dis.item())
if (acc.item() > 0.6):
last_update = batch_idx
opt_dis.zero_grad()
opt_net.zero_grad()
score_t = net.tgt_net(data_t)
pred_tgt = net.discriminator(score_t)
label_tgt = make_variable(torch.ones(pred_tgt.size(0)).long(), requires_grad=False)
loss_gan_t = net.gan_criterion(pred_tgt, label_tgt)
loss_gan_t.backward()
opt_net.step()
info_str += ' G: {:.3f}'.format(loss_gan_t.item())
if ((batch_idx % log_interval) == 0):
print(info_str)
return last_update |
class TextModelTrainer(object):
def __init__(self, hparams, name=''):
self.hparams = hparams
print(hparams)
self.name = name
random.seed(0)
(self.train_loader, self.valid_loader, self.test_loader, self.classes, self.vocab) = get_text_dataloaders(hparams['dataset_name'], valid_size=hparams['valid_size'], batch_size=hparams['batch_size'], subtrain_ratio=hparams['subtrain_ratio'], dataroot=hparams['dataset_dir'])
random.seed()
self.device = torch.device((hparams['gpu_device'] if torch.cuda.is_available() else 'cpu'))
print()
print('### Device ###')
print(self.device)
(self.net, self.z_size, self.file_name) = build_model(hparams['model_name'], self.vocab, len(self.classes))
self.net = self.net.to(self.device)
self.criterion = nn.CrossEntropyLoss()
if (hparams['mode'] in ['train', 'search']):
self.optimizer = optim.Adam(self.net.parameters(), 0.001)
self.loss_dict = {'train': [], 'valid': []}
if hparams['use_modals']:
print('\n=> ### Policy ###')
raw_policy = RawPolicy(mode=hparams['mode'], num_epochs=hparams['num_epochs'], hp_policy=hparams['hp_policy'], policy_path=hparams['policy_path'])
transformations = aug_trans
self.pm = PolicyManager(transformations, raw_policy, len(self.classes), self.device)
print('\n### Loss ###')
print('Classification Loss')
if hparams['mixup']:
print('Mixup')
if hparams['enforce_prior']:
print('Adversarial Loss')
self.EPS = 1e-15
self.D = Discriminator(self.z_size)
self.D = self.D.to(self.device)
self.D_optimizer = optim.SGD(self.D.parameters(), lr=0.01, momentum=hparams['momentum'], weight_decay=hparams['wd'])
if hparams['metric_learning']:
margin = hparams['metric_margin']
metric_loss = hparams['metric_loss']
metric_weight = hparams['metric_weight']
print(f'Metric Loss (margin: {margin} loss: {metric_loss} weight: {metric_weight})')
self.M_optimizer = optim.SGD(self.net.parameters(), momentum=0.9, lr=0.001, weight_decay=1e-08)
self.metric_weight = hparams['metric_weight']
if (metric_loss == 'random'):
self.metric_loss = OnlineTripletLoss(margin, RandomNegativeTripletSelector(margin))
elif (metric_loss == 'hardest'):
self.metric_loss = OnlineTripletLoss(margin, HardestNegativeTripletSelector(margin))
elif (metric_loss == 'semihard'):
self.metric_loss = OnlineTripletLoss(margin, SemihardNegativeTripletSelector(margin))
def reset_model(self, z_size=256):
(self.net, self.z_size, self.file_name) = build_model(self.hparams['model_name'], self.vocab, len(self.classes), z_size)
self.net = self.net.to(self.device)
self.optimizer = optim.Adam(self.net.parameters(), 0.001)
self.loss_dict = {'train': [], 'valid': []}
def reset_discriminator(self, z_size=256):
self.D = Discriminator(z_size)
self.D = self.D.to(self.device)
self.D_optimizer = optim.SGD(self.D.parameters(), lr=0.01, momentum=self.hparams['momentum'], weight_decay=self.hparams['wd'])
def update_policy(self, policy):
raw_policy = RawPolicy(mode='train', num_epochs=1, hp_policy=policy, policy_path=None)
self.pm.update_policy(raw_policy)
def _train(self, cur_epoch):
self.net.train()
self.net.training = True
self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, len(self.train_loader))
train_losses = 0.0
clf_losses = 0.0
metric_losses = 0.0
d_losses = 0.0
g_losses = 0.0
correct = 0
total = 0
n_batch = len(self.train_loader)
print(f'''
=> Training Epoch #{cur_epoch}''')
for (batch_idx, batch) in enumerate(self.train_loader):
(inputs, seq_lens, labels) = (batch.text[0].to(self.device), batch.text[1].to(self.device), batch.label.to(self.device))
labels -= 1
seed_features = self.net.extract_features(inputs, seq_lens)
features = seed_features
if self.hparams['manifold_mixup']:
(features, targets_a, targets_b, lam) = mixup_data(features, labels, 0.2, use_cuda=True)
(features, targets_a, targets_b) = map(Variable, (features, targets_a, targets_b))
if self.hparams['use_modals']:
features = self.pm.apply_policy(features, labels, cur_epoch, batch_idx, verbose=1).to(self.device)
outputs = self.net.classify(features)
if self.hparams['mixup']:
(inputs, targets_a, targets_b, lam) = mixup_data(outputs, labels, self.hparams['alpha'], use_cuda=True)
(inputs, targets_a, targets_b) = map(Variable, (outputs, targets_a, targets_b))
if self.hparams['enforce_prior']:
for p in self.D.parameters():
p.requires_grad = False
if (self.hparams['mixup'] or self.hparams['manifold_mixup']):
c_loss = mixup_criterion(self.criterion, outputs, targets_a, targets_b, lam)
else:
c_loss = self.criterion(outputs, labels)
clf_losses += c_loss.item()
loss = c_loss
if self.hparams['metric_learning']:
m_loss = self.metric_loss(seed_features, labels)[0]
metric_losses += m_loss.item()
loss = ((self.metric_weight * m_loss) + ((1 - self.metric_weight) * c_loss))
train_losses += loss.item()
if self.hparams['enforce_prior']:
for p in self.D.parameters():
p.requires_grad = False
self.net.train()
d_fake = self.D(features)
g_loss = (self.hparams['prior_weight'] * adverserial_loss(d_fake, self.EPS))
g_losses += g_loss.item()
loss += g_loss
self.optimizer.zero_grad()
loss.backward()
clip_grad_norm_(self.net.parameters(), 5.0)
self.optimizer.step()
if self.hparams['enforce_prior']:
for p in self.D.parameters():
p.requires_grad = True
features = self.net.extract_features(inputs, seq_lens)
d_real = self.D(torch.randn(features.size()).to(self.device))
d_fake = self.D(F.softmax(features, dim=0))
d_loss = discriminator_loss(d_real, d_fake, self.EPS)
self.D_optimizer.zero_grad()
d_loss.backward()
self.D_optimizer.step()
d_losses += d_loss.item()
(_, predicted) = torch.max(outputs.data, 1)
total += labels.size(0)
if self.hparams['mixup']:
correct += ((lam * predicted.eq(targets_a.data).cpu().sum().float()) + ((1 - lam) * predicted.eq(targets_b.data).cpu().sum().float()))
else:
correct += (predicted == labels).sum().item()
step = (((cur_epoch - 1) * len(self.train_loader)) + batch_idx)
total_steps = (self.hparams['num_epochs'] * len(self.train_loader))
display = f"| Epoch [{cur_epoch}/{self.hparams['num_epochs']}] Iter[{step}/{total_steps}] Loss: {(train_losses / n_batch):.4f} : {(correct / total):.4f} clf_loss: {(clf_losses / n_batch):.4f}"
if self.hparams['enforce_prior']:
display += f' d_loss: {(d_losses / n_batch):.4f} g_loss: {(g_losses / n_batch):.4f}'
if self.hparams['metric_learning']:
display += f' metric_loss: {(metric_losses / n_batch):.4f}'
print(display)
return ((correct / total), (train_losses / total))
def _test(self, cur_epoch, mode):
self.net.eval()
self.net.training = False
correct = 0
total = 0
test_loss = 0.0
data_loader = (self.valid_loader if (mode == 'valid') else self.test_loader)
with torch.no_grad():
for (batch_idx, batch) in enumerate(data_loader):
(inputs, seq_lens, labels) = (batch.text[0].to(self.device), batch.text[1].to(self.device), batch.label.to(self.device))
labels -= 1
outputs = self.net(inputs, seq_lens)
loss = self.criterion(outputs, labels)
test_loss += loss.item()
(_, predicted) = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
torch.cuda.empty_cache()
print(f'| ({mode}) Epoch #{cur_epoch} Loss: {(test_loss / total):.4f} : {(correct / total):.4f}')
return ((correct / total), (test_loss / total))
def run_model(self, epoch):
if self.hparams['use_modals']:
self.pm.reset_text_data_pool(self.net, self.train_loader, self.hparams['temperature'], self.hparams['distance_metric'], self.hparams['dataset_name'])
(train_acc, tl) = self._train(epoch)
self.loss_dict['train'].append(tl)
if (self.hparams['valid_size'] > 0):
(val_acc, vl) = self._test(epoch, mode='valid')
self.loss_dict['valid'].append(vl)
else:
val_acc = 0.0
return (train_acc, val_acc)
def save_checkpoint(self, ckpt_dir, epoch):
path = os.path.join(ckpt_dir, self.hparams['dataset_name'], f'{self.name}_{self.file_name}')
if (not os.path.exists(ckpt_dir)):
os.makedirs(ckpt_dir)
torch.save({'state': self.net.state_dict(), 'epoch': epoch, 'loss': self.loss_dict, 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict()}, path)
print(f'=> saved the model {self.file_name} to {path}')
return path
def save_model(self, ckpt_dir, epoch):
print(self.file_name)
print(ckpt_dir)
path = os.path.join(ckpt_dir, self.file_name)
if (not os.path.exists(ckpt_dir)):
os.makedirs(ckpt_dir)
torch.save({'state': self.net.state_dict(), 'epoch': epoch, 'loss': self.loss_dict, 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict()}, path)
print(f'=> saved the model {self.file_name} to {path}')
return path
def load_model(self, ckpt):
checkpoint = torch.load(ckpt, map_location=torch.device('cpu'))
self.net.load_state_dict(checkpoint['state'])
self.loss_dict = checkpoint['loss']
if (self.hparams['mode'] != 'test'):
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
print(f'=> loaded checkpoint of {self.file_name} from {ckpt}')
return (checkpoint['epoch'], checkpoint['loss'])
def reset_config(self, new_hparams):
self.hparams = new_hparams
new_policy = RawPolicy(mode=self.hparams['mode'], num_epochs=self.hparams['num_epochs'], hp_policy=self.hparams['hp_policy'])
self.pm.update_policy(new_policy)
return |
class TestOffPolicyVectorizedSampler(TfGraphTestCase):
.mujoco
def test_no_reset(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = ContinuousMLPPolicy(env_spec=env.spec, hidden_sizes=[64, 64], hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec, policy, sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec, hidden_sizes=[64, 64], hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1000000.0))
algo = DDPG(env_spec=env.spec, policy=policy, policy_lr=0.0001, qf_lr=0.001, qf=qf, replay_buffer=replay_buffer, target_update_tau=0.01, n_train_steps=50, discount=0.9, min_buffer_size=int(10000.0), exploration_policy=exploration_policy)
sampler = OffPolicyVectorizedSampler(algo, env, 1, no_reset=True)
sampler.start_worker()
runner.initialize_tf_vars()
paths1 = sampler.obtain_samples(0, 5)
paths2 = sampler.obtain_samples(0, 5)
len1 = sum([len(path['rewards']) for path in paths1])
len2 = sum([len(path['rewards']) for path in paths2])
assert ((len1 == 5) and (len2 == 5)), 'Sampler should respect batch_size'
case1 = ((len(paths1[(- 1)]['rewards']) + len(paths2[0]['rewards'])) == paths2[0]['running_length'])
case2 = (len(paths2[0]['rewards']) == paths2[0]['running_length'])
done = paths1[(- 1)]['dones'][(- 1)]
assert (((not done) and case1) or (done and case2)), 'Running length should be the length of full path'
case1 = np.isclose((paths1[(- 1)]['rewards'].sum() + paths2[0]['rewards'].sum()), paths2[0]['undiscounted_return'])
case2 = np.isclose(paths2[0]['rewards'].sum(), paths2[0]['undiscounted_return'])
assert (((not done) and case1) or (done and case2)), 'Undiscounted_return should be the sum of rewards of full path' |
class PrioritizedReplayBuffer():
def __init__(self, max_length, task_ids, p1=0.8):
self.task_ids = task_ids
self.memory_task = dict(((task_id, {0: [], 1: []}) for task_id in self.task_ids))
self.stack = []
self.max_length = max_length
self.p1 = p1
def get_memory_length(self):
return len(self.stack)
def append_trace(self, trace):
for tuple in trace:
reward = (0 if (tuple[4] <= 0.0) else 1)
if (len(self.stack) >= self.max_length):
t_id = self.stack[0][1]
r = (0 if (self.stack[0][4] <= 0.0) else 1)
del self.memory_task[t_id][r][0]
del self.stack[0]
task_id = tuple[1]
self.memory_task[task_id][reward].append(tuple)
self.stack.append(tuple)
def _sample_sub_batch(self, batch_size, memory):
indices = np.arange(len(memory))
sampled_indices = np.random.choice(indices, size=batch_size, replace=(batch_size > len(memory)))
batch = [[], [], [], [], []]
for i in sampled_indices:
for k in range(5):
batch[k].append(memory[i][k])
return batch
def sample_batch(self, batch_size):
memory_0 = []
memory_1 = []
for task_id in self.memory_task:
if (len(self.memory_task[task_id][1]) > 0):
memory_0 += self.memory_task[task_id][0]
memory_1 += self.memory_task[task_id][1]
if ((len(memory_0) == 0) and (len(memory_1) == 0)):
return None
elif ((len(memory_1) > 0) and (len(memory_0) == 0)):
batch = self._sample_sub_batch(batch_size, memory_1)
elif ((len(memory_0) > 0) and (len(memory_1) == 0)):
batch = self._sample_sub_batch(batch_size, memory_0)
else:
buffer_binomial_distrib = np.random.binomial(1, self.p1, batch_size)
sub_batch_r1_size = sum(buffer_binomial_distrib)
sub_batch_r0_size = (batch_size - sub_batch_r1_size)
assert ((sub_batch_r1_size + sub_batch_r0_size) == batch_size), 'problem with batch sizes!'
batch = self._sample_sub_batch(sub_batch_r1_size, memory_1)
batch += self._sample_sub_batch(sub_batch_r0_size, memory_0)
return (batch if batch else None)
def empty_memory(self):
self.memory_task = dict(((task_id, {0: [], 1: []}) for task_id in self.task_ids))
self.stack = [] |
class CriterionAdvForG(nn.Module):
def __init__(self, adv_type):
super(CriterionAdvForG, self).__init__()
if ((adv_type != 'wgan-gp') and (adv_type != 'hinge')):
raise ValueError('adv_type should be wgan-gp or hinge')
self.adv_loss = adv_type
def forward(self, d_out_S):
g_out_fake = d_out_S[0]
if (self.adv_loss == 'wgan-gp'):
g_loss_fake = (- g_out_fake.mean())
elif (self.adv_loss == 'hinge'):
g_loss_fake = (- g_out_fake.mean())
else:
raise ValueError('args.adv_loss should be wgan-gp or hinge')
return g_loss_fake |
def aggregate(X, G, F, Y=None):
device = X.device
if (Y is None):
Y = torch.zeros((F.shape + (X.shape[(- 1)],)), device=device, dtype=X.dtype)
else:
Y.zero_()
if (device.type == 'cpu'):
aggregate_cpu(X, G, F, Y)
else:
aggregate_gpu(X, G, F, Y)
return Y |
def json_dump(obj):
import json
return json.dumps(obj, sort_keys=True, separators=(',', ':')) |
class TestPytorchWeightOnlyAdaptor(unittest.TestCase):
approach = 'weight_only'
def setUpClass(self):
self.dataloader = SimpleDataLoader()
self.gptj = transformers.AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-GPTJForCausalLM', torchscript=True)
self.gptj_no_jit = transformers.AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-GPTJForCausalLM')
self.llm_dataloader = LLMDataLoader()
self.lm_input = torch.ones([1, 10], dtype=torch.long)
def tearDownClass(self):
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_RTN_int_quant(self):
input = torch.randn(3, 30)
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only')
q_model = quantization.fit(model, conf)
q_model.save('saved')
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out3 = compressed_model(input)
self.assertTrue(('fc1.qweight' in compressed_model.state_dict().keys()))
self.assertTrue(('fc1.qzeros' not in compressed_model.state_dict().keys()))
shape2 = compressed_model.state_dict()['fc1.scales']
self.assertTrue(torch.all((out3 == out2)))
model = Model()
new_model = load('saved', model, weight_only=True)
inc_model = INCModel(new_model)
inc_model.export_compressed_model(qweight_config_path='saved/qconfig.json', use_optimum_format=True)
out4 = inc_model.model(input)
self.assertTrue(('fc1.qzeros' in inc_model.model.state_dict().keys()))
model = Model()
compressed_model = export_compressed_model(model, saved_dir='saved', use_optimum_format=True)
self.assertTrue(('fc1.qzeros' in inc_model.model.state_dict().keys()))
self.assertTrue(torch.allclose(out3, out4, atol=0.001))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', recipes={'rtn_args': {'enable_full_range': True}})
q_model = quantization.fit(model, conf)
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
compressed_model = q_model.export_compressed_model(use_optimum_format=False, enable_full_range=True)
out3 = compressed_model(input)
self.assertTrue(torch.all((out3 == out2)))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'dtype': 'int4'}}}, recipes={'rtn_args': {'enable_full_range': True, 'enable_mse_search': True}})
q_model = quantization.fit(model, conf)
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', recipes={'rtn_args': {'group_dim': 0}})
q_model = quantization.fit(model, conf)
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 8, 'group_size': (- 1), 'scheme': 'sym', 'algorithm': 'RTN'}}}, recipes={'rtn_args': {'return_int': True}})
q_model = quantization.fit(model, conf, eval_func=eval_func)
out2 = q_model(input)
self.assertTrue(isinstance(q_model.model.fc1, WeightOnlyLinear))
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'RTN'}}})
q_model = quantization.fit(model, conf, eval_func=eval_func)
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
model = Model()
out1 = model(input)
conf = PostTrainingQuantConfig(approach='weight_only', op_name_dict={'fc1': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'sym', 'algorithm': 'RTN'}}, 'fc2': {'weight': {'bits': 3, 'group_size': 16, 'scheme': 'asym', 'algorithm': 'RTN'}}, 'fc3': {'weight': {'dtype': 'fp32'}}})
q_model = quantization.fit(model, conf, eval_func=eval_func)
out2 = q_model(input)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
self.assertFalse(torch.all((out1 == out2)))
q_model.save('saved')
new_model = load('saved', model, weight_only=True)
out1 = new_model(input)
self.assertTrue(torch.all((out1 == out2)))
model_size1 = (os.path.getsize('saved/best_model.pt') / 1024)
print('FP32 Model size:{:.3f}M'.format(model_size1))
inc_model = INCModel(new_model)
inc_model.export_compressed_model(use_optimum_format=False, qweight_config_path='saved/qconfig.json')
torch.save(inc_model.state_dict(), 'saved/tmp.pt')
model_size2 = (os.path.getsize('saved/tmp.pt') / 1024)
print('WeightOnlyLinear Model size:{:.3f}M'.format(model_size2))
self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear))
self.assertTrue(((model_size1 / model_size2) > 2))
def test_RTN_4bit_quant(self):
for dtype in ['int4', 'nf4', 'fp4', 'fp4_e2m1_bnb', 'fp4_e2m1']:
model = copy.deepcopy(self.gptj)
out1 = model(self.lm_input)
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'dtype': dtype, 'group_size': 64, 'algorithm': 'RTN'}}})
q_model = quantization.fit(model, conf)
out2 = q_model(self.lm_input)
self.assertTrue(torch.all(torch.isclose(out1[0], out2[0], atol=0.1)))
self.assertFalse(torch.all((out1[0] == out2[0])))
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out3 = compressed_model(self.lm_input)
self.assertTrue(torch.all((out3[0] == out2[0])))
def test_AWQ_quant(self):
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'AWQ'}}}, op_name_dict={'.*3.*': {'weight': {'dtype': 'fp32'}}, '.*4.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'RTN'}}, '.*lm_head': {'weight': {'dtype': 'fp32'}}}, recipes={'awq_args': {'enable_auto_scale': True, 'enable_mse_search': True, 'folding': False}})
fp32_model = copy.deepcopy(self.gptj)
q_model = quantization.fit(fp32_model, conf, calib_dataloader=self.llm_dataloader)
q_model.save('saved')
input = torch.ones([1, 10], dtype=torch.long)
out1 = q_model(input)
from neural_compressor.utils.pytorch import load
fp32_model = copy.deepcopy(self.gptj)
reload_model = load('saved', fp32_model, weight_only=True)
out2 = reload_model(input)
q_model.export_compressed_model(use_optimum_format=False)
out3 = q_model(input)
self.assertTrue(torch.allclose(out1[0], out2[0], atol=1e-05))
self.assertTrue(torch.allclose(out1[0], out3[0], atol=1e-05))
self.assertTrue(isinstance(q_model.model.transformer.h[0].mlp.fc_in, WeightOnlyLinear))
self.assertTrue(isinstance(q_model.model.lm_head, torch.nn.Linear))
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'AWQ'}}}, op_name_dict={'.*3.*': {'weight': {'dtype': 'fp32'}}, '.*4.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'RTN'}}, '.*lm_head': {'weight': {'dtype': 'fp32'}}}, recipes={'rtn_args': {'return_int': True}, 'awq_args': {'enable_auto_scale': True, 'enable_mse_search': True, 'folding': False}})
fp32_model = copy.deepcopy(self.gptj)
q_model = quantization.fit(fp32_model, conf, calib_dataloader=self.llm_dataloader)
self.assertTrue(isinstance(q_model.model.transformer.h[0].mlp.fc_out, MulLinear))
self.assertTrue(isinstance(q_model.model.transformer.h[3].mlp.fc_out, torch.nn.Linear))
self.assertTrue(isinstance(q_model.model.transformer.h[4].mlp.fc_out, WeightOnlyLinear))
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'asym', 'algorithm': 'AWQ'}}})
fp32_model = copy.deepcopy(self.gptj_no_jit)
q_model = quantization.fit(fp32_model, conf, calib_dataloader=self.llm_dataloader)
self.assertTrue(isinstance(q_model.model.transformer.h[0].mlp.fc_in, MulLinear))
self.assertTrue(isinstance(q_model.model.transformer.h[0].mlp.fc_out, MulLinear))
def test_AWQ_nf4_quant(self):
input = torch.ones([1, 10], dtype=torch.long)
fp32_model = copy.deepcopy(self.gptj)
out1 = fp32_model(input)
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'dtype': 'nf4', 'group_size': 32, 'algorithm': 'RTN'}}}, op_name_dict={'lm_head': {'weight': {'dtype': 'fp32'}}})
q_model = quantization.fit(fp32_model, conf, calib_dataloader=self.llm_dataloader)
out2 = q_model(input)
self.assertTrue(torch.allclose(out1[0], out2[0], atol=0.1))
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out3 = compressed_model(input)
self.assertTrue(torch.all((out3[0] == out2[0])))
def test_AWQ_util(self):
from neural_compressor.adaptor.torch_utils.util import get_module_input_output
class DemoModel(torch.nn.Module):
def __init__(self):
super(DemoModel, self).__init__()
self.fc1 = torch.nn.Linear(3, 4)
self.fc2 = torch.nn.Linear(4, 3)
def forward(self, x):
out = self.fc1(x)
out = self.fc2(out)
return out
tmp = torch.randn([3, 3])
class DemoCalibDataloader():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(3):
(yield tmp)
module_hook_config = {'fc1': ['output'], 'fc2': ['input', 'output']}
model = DemoModel()
out = model(tmp)
values = get_module_input_output(model, module_hook_config, DemoCalibDataloader())
self.assertTrue(torch.allclose(values['fc1']['output'][0], values['fc2']['input'][0]))
self.assertTrue(torch.allclose(values['fc2']['output'][0], out))
def test_GPTQ_fixed_length_quant(self):
class GPTQLLMDataLoader():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
(yield torch.ones([1, 512], dtype=torch.long))
class GPTQLLMDataLoaderList():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
(yield (torch.ones([1, 512], dtype=torch.long), torch.ones([1, 512], dtype=torch.long)))
class GPTQLLMDataLoaderDict():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
(yield {'input_ids': torch.ones([1, 512], dtype=torch.long), 'attention_mask': torch.ones([1, 512], dtype=torch.long)})
dataloader = GPTQLLMDataLoader()
dataloader_list = GPTQLLMDataLoaderList()
dataloader_dict = GPTQLLMDataLoaderDict()
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 8, 'scheme': 'sym', 'algorithm': 'GPTQ'}}}, op_name_dict={'.*lm_head': {'weight': {'dtype': 'fp32'}}}, recipes={'gptq_args': {'percdamp': 0.01, 'act_order': False, 'use_max_length': True, 'pad_max_length': 512}})
model_1 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
out0 = model_1(input)
q_model = quantization.fit(model_1, conf, calib_dataloader=dataloader)
q_model.save('saved')
out1 = q_model.model(input)
self.assertTrue(torch.allclose(out1[0], out0[0], atol=0.01))
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out2 = compressed_model(input)
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=1e-05))
model_2 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
conf.op_type_dict = {'.*': {'weight': {'bits': 4, 'group_size': 8, 'scheme': 'asym', 'algorithm': 'GPTQ'}}}
q_model = quantization.fit(model_2, conf, calib_dataloader=dataloader_list)
q_model.save('saved')
out1 = q_model.model(input)
compressed_model = q_model.export_compressed_model(use_optimum_format=True)
out2 = compressed_model(input)
print(out1[0])
print(out2[0])
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=0.0002))
model_3 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
q_model = quantization.fit(model_3, conf, calib_dataloader=dataloader_dict)
q_model.save('saved')
out1 = q_model.model(input)
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out2 = compressed_model(input)
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=1e-05))
print('GPTQ with fixed length Done')
def test_GPTQ_unfixed_length_quant(self):
import random
class GPTQLLMDataLoader():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
length = random.randint(1, 1024)
(yield torch.ones([1, length], dtype=torch.long))
class GPTQLLMDataLoaderList():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
length = random.randint(1, 1024)
(yield (torch.ones([1, length], dtype=torch.long), torch.ones([1, length], dtype=torch.long)))
class GPTQLLMDataLoaderDict():
def __init__(self):
self.batch_size = 1
def __iter__(self):
for i in range(10):
length = random.randint(1, 1024)
(yield {'input_ids': torch.ones([1, length], dtype=torch.long), 'attention_mask': torch.ones([1, length], dtype=torch.long)})
dataloader = GPTQLLMDataLoader()
dataloader_list = GPTQLLMDataLoaderList()
dataloader_dict = GPTQLLMDataLoaderDict()
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 8, 'scheme': 'sym', 'algorithm': 'GPTQ'}}}, op_name_dict={'.*lm_head': {'weight': {'dtype': 'fp32'}}}, recipes={'gptq_args': {'percdamp': 0.01, 'act_order': False, 'use_max_length': False, 'pad_max_length': 512}})
model_1 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
out0 = model_1(input)
q_model = quantization.fit(model_1, conf, calib_dataloader=dataloader)
q_model.save('saved')
out1 = q_model.model(input)
self.assertTrue(torch.allclose(out1[0], out0[0], atol=0.01))
compressed_model = q_model.export_compressed_model()
out2 = compressed_model(input)
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=0.0002))
model_2 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
q_model = quantization.fit(model_2, conf, calib_dataloader=dataloader_list)
q_model.save('saved')
out1 = q_model.model(input)
compressed_model = q_model.export_compressed_model(use_optimum_format=False)
out2 = compressed_model(input)
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=1e-05))
model_3 = copy.deepcopy(self.gptj)
input = torch.ones([1, 512], dtype=torch.long)
q_model = quantization.fit(model_3, conf, calib_dataloader=dataloader_dict)
q_model.save('saved')
out1 = q_model.model(input)
compressed_model = q_model.export_compressed_model()
out2 = compressed_model(input)
torch.save(compressed_model.state_dict(), 'saved/compressed_model.pt')
self.assertTrue(torch.allclose(out1[0], out2[0], atol=0.0002))
print('GPTQ with unfixed length Done')
def test_TEQ_quant(self):
class teq_inc_loader(object):
def __init__(self, nsamples=32):
self.batch_size = 1
self.nsamples = nsamples
def __len__(self):
return (self.nsamples // self.batch_size)
def __iter__(self):
for i in range(self.nsamples):
(yield (torch.ones([1, 512], dtype=torch.long), torch.ones([1, 512], dtype=torch.long)))
conf = PostTrainingQuantConfig(approach='weight_only', op_type_dict={'.*': {'weight': {'bits': 4, 'group_size': 32, 'scheme': 'sym', 'algorithm': 'TEQ'}}}, op_name_dict={'.*lm_head': {'weight': {'dtype': 'fp32'}}}, recipes={'teq_args': {'folding': True}})
input = torch.ones([1, 512], dtype=torch.long)
dataloader = teq_inc_loader()
fp32_model = copy.deepcopy(self.gptj)
out1 = fp32_model(input)
q_model = quantization.fit(fp32_model, conf, calib_dataloader=dataloader)
out2 = q_model.model(input)
self.assertTrue(torch.allclose(out1[0], out2[0], atol=0.1)) |
class SIDER(MoleculeCSVDataset):
def __init__(self, smiles_to_graph=smiles_2_dgl, load=False, log_every=1000, cache_file_path='./sider_dglgraph.bin', n_jobs=1):
self._url = 'dataset/sider.zip'
data_path = (get_download_dir() + '/sider.zip')
dir_path = (get_download_dir() + '/sider')
download(_get_dgl_url(self._url), path=data_path, overwrite=False)
extract_archive(data_path, dir_path)
df = pd.read_csv((dir_path + '/sider.csv'))
super(SIDER, self).__init__(df=df, smiles_to_graph=smiles_to_graph, smiles_column='smiles', cache_file_path=cache_file_path, load=load, log_every=log_every, init_mask=True, n_jobs=n_jobs)
def __getitem__(self, item):
return (self.smiles[item], self.graphs[item], self.labels[item], self.mask[item]) |
class LastLevelP6(nn.Module):
def __init__(self, in_channels, out_channels, in_features='res5'):
super().__init__()
self.num_levels = 1
self.in_feature = in_features
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
for module in [self.p6]:
weight_init.c2_xavier_fill(module)
def forward(self, x):
p6 = self.p6(x)
return [p6] |
class GetDataFrameCallable(Protocol):
def __call__(self, filename: str, parse_dates: _ParseDates=False) -> pd.DataFrame:
... |
class SoftminusFlow(Flow):
def __init__(self, set_restrictions=False) -> None:
super(SoftminusFlow, self).__init__()
self.softplus = torch.nn.Softplus()
self.set_restrictions = False
def forward(self, f0: torch.tensor, X: torch.tensor=None) -> torch.tensor:
return gpytorch.utils.transforms.inv_softplus((f0 + 1e-08))
def inverse(self, f: torch.tensor) -> torch.tensor:
return self.softplus((f + 1e-08)) |
def build_linknet(backbone, decoder_block, skip_connection_layers, decoder_filters=(256, 128, 64, 32, 16), n_upsample_blocks=5, classes=1, activation='sigmoid', use_batchnorm=True, dropout=None):
input_ = backbone.input
x = backbone.output
skips = [(backbone.get_layer(name=i).output if isinstance(i, str) else backbone.get_layer(index=i).output) for i in skip_connection_layers]
if isinstance(backbone.layers[(- 1)], layers.MaxPooling2D):
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block1')(x)
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block2')(x)
for i in range(n_upsample_blocks):
if (i < len(skips)):
skip = skips[i]
else:
skip = None
x = decoder_block(decoder_filters[i], stage=i, use_batchnorm=use_batchnorm)(x, skip)
if dropout:
x = layers.SpatialDropout3D(dropout, name='pyramid_dropout')(x)
x = layers.Conv3D(filters=classes, kernel_size=(3, 3, 3), padding='same', use_bias=True, kernel_initializer='glorot_uniform')(x)
x = layers.Activation(activation, name=activation)(x)
model = models.Model(input_, x)
return model |
_faiss
_datasets
_torch
class RagTokenizerTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
dpr_tokenizer_path = os.path.join(self.tmpdirname, 'dpr_tokenizer')
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
bart_tokenizer_path = os.path.join(self.tmpdirname, 'bart_tokenizer')
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'dpr_tokenizer'))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'bart_tokenizer'))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
_tokenizers
def test_save_load_pretrained_with_saved_config(self):
save_dir = os.path.join(self.tmpdirname, 'rag_tokenizer')
rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(save_dir)
rag_tokenizer.save_pretrained(save_dir)
new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config)
self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
def test_pretrained_token_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-token-nq')
input_strings = ['who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium']
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
def test_pretrained_sequence_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-sequence-nq')
input_strings = ['who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium']
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict) |
def split_data(data, output_file, days_test=DAYS_TEST, last_nth=None):
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
test_from = (data_end - timedelta(days_test))
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[(session_max_times < test_from.timestamp())].index
session_test = session_max_times[(session_max_times >= test_from.timestamp())].index
train = data[np.in1d(data.SessionId, session_train)]
if (last_nth is not None):
train.sort_values(['SessionId', 'Time'], inplace=True)
session_data = list(train['SessionId'].values)
lenth = int((len(session_data) / last_nth))
session_data = session_data[(- lenth):]
for i in range(len(session_data)):
if (session_data[i] != session_data[(i + 1)]):
break
train = train.reset_index()
train = train[(((- lenth) + i) + 1):]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[(tslength >= 2)].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(((output_file + (str(last_nth) if (last_nth is not None) else '')) + '_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(((output_file + (str(last_nth) if (last_nth is not None) else '')) + '_test.txt'), sep='\t', index=False)
data_end = datetime.fromtimestamp(train.Time.max(), timezone.utc)
test_from = (data_end - timedelta(days_test))
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[(session_max_times < test_from.timestamp())].index
session_valid = session_max_times[(session_max_times >= test_from.timestamp())].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[(tslength >= 2)].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(((output_file + (str(last_nth) if (last_nth is not None) else '')) + '_train_tr.txt'), sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(((output_file + (str(last_nth) if (last_nth is not None) else '')) + '_train_valid.txt'), sep='\t', index=False) |
class TextDatasetSplitter(DatasetSplitter):
STORAGE_TYPE = 'text'
def __init__(self, dataset_name, dataset_size, shard_size, num_epochs, shuffle=False):
super(TextDatasetSplitter, self).__init__(dataset_name, dataset_size, shard_size, num_epochs)
self._dataset_name = dataset_name
self._shuffle = shuffle
self._shards: List[Shard] = []
def get_epoch(self):
return self.epoch
def get_shards(self) -> List[Shard]:
return self._shards
def create_shards(self):
self._shards = self._create_shards_with_indices(0, self._dataset_size)
self.epoch += 1
return self._shards
def _create_shards_with_indices(self, start_idx, end_idx) -> List[Shard]:
shards = []
record_indices = list(range(self._dataset_size))
if self._shuffle:
random.shuffle(record_indices)
for shard_start_idx in range(start_idx, end_idx, self._shard_size):
shard_end_idx = min((shard_start_idx + self._shard_size), end_idx)
if self._shuffle:
size = (shard_end_idx - shard_start_idx)
shard_indices = record_indices[0:size]
record_indices = record_indices[size:]
else:
shard_indices = []
shards.append(Shard(name=self._dataset_name, start=shard_start_idx, end=shard_end_idx, record_indices=shard_indices))
return shards |
class PosAttTextualResEncoder(nn.Module):
def __init__(self, input_nc=3, ngf=32, z_nc=256, img_f=256, L=6, layers=5, norm='none', activation='ReLU', use_spect=True, use_coord=False, image_dim=256, text_dim=256, multi_peak=True, pool_attention='max'):
super(PosAttTextualResEncoder, self).__init__()
self.layers = layers
self.z_nc = z_nc
self.L = L
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
self.block0 = ResBlockEncoderOptimized(input_nc, ngf, norm_layer, nonlinearity, use_spect, use_coord)
self.word_attention = ImageTextAttention(idf=image_dim, cdf=text_dim, multi_peak=multi_peak, pooling=pool_attention)
mult = 1
for i in range((layers - 1)):
mult_prev = mult
mult = min((2 ** (i + 2)), (img_f // ngf))
block = ResBlock((ngf * mult_prev), (ngf * mult), (ngf * mult_prev), norm_layer, nonlinearity, 'down', use_spect, use_coord)
setattr(self, ('encoder' + str(i)), block)
for i in range(self.L):
block = ResBlock((ngf * mult), (ngf * mult), (ngf * mult), norm_layer, nonlinearity, 'none', use_spect, use_coord)
setattr(self, ('infer_prior' + str(i)), block)
self.posterior = ResBlock(((ngf * mult) + (2 * text_dim)), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
self.prior = ResBlock(((ngf * mult) + (2 * text_dim)), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
def forward(self, img_m, sentence_embedding, word_embeddings, text_mask, image_mask, img_c=None):
if (type(img_c) != type(None)):
img = torch.cat([img_m, img_c], dim=0)
else:
img = img_m
out = self.block0(img)
feature = [out]
for i in range((self.layers - 1)):
model = getattr(self, ('encoder' + str(i)))
out = model(out)
feature.append(out)
image_mask = task.scale_img(image_mask, size=[feature[(- 1)].size(2), feature[(- 1)].size(3)])
if (image_mask.size(1) == 3):
image_mask = image_mask.chunk(3, dim=1)[0]
if (type(img_c) != type(None)):
(f_m_g, f_m_rec) = feature[(- 1)].chunk(2)
img_mask_g = image_mask
img_mask_rec = (1 - img_mask_g)
weighted_word_embedding_rec = self.word_attention(f_m_rec, word_embeddings, mask=text_mask, image_mask=img_mask_rec, inverse_attention=True)
weighted_word_embedding_g = self.word_attention(f_m_g, word_embeddings, mask=text_mask, image_mask=img_mask_g, inverse_attention=False)
weighted_word_embedding = torch.cat([weighted_word_embedding_g, weighted_word_embedding_rec])
(distribution, f_text) = self.two_paths(out, sentence_embedding, weighted_word_embedding)
return (distribution, feature, f_text)
else:
f_m = feature[(- 1)]
weighted_word_embedding = self.word_attention(f_m, word_embeddings, mask=text_mask, image_mask=image_mask, inverse_attention=False)
(distribution, f_m_text) = self.one_path(out, sentence_embedding, weighted_word_embedding)
f_text = torch.cat([f_m_text, weighted_word_embedding], dim=1)
return (distribution, feature, f_text)
def one_path(self, f_in, sentence_embedding, weighted_word_embedding):
f_m = f_in
distribution = []
for i in range(self.L):
infer_prior = getattr(self, ('infer_prior' + str(i)))
f_m = infer_prior(f_m)
(ix, iw) = (f_m.size(2), f_m.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_m_sent = torch.cat([f_m, sentence_embedding_replication], dim=1)
f_m_text = torch.cat([f_m_sent, weighted_word_embedding], dim=1)
o = self.prior(f_m_text)
(q_mu, q_std) = torch.split(o, self.z_nc, dim=1)
distribution.append([q_mu, F.softplus(q_std)])
return (distribution, f_m_sent)
def two_paths(self, f_in, sentence_embedding, weighted_word_embedding):
(f_m, f_c) = f_in.chunk(2)
(weighted_word_embedding_m, weighted_word_embedding_c) = weighted_word_embedding.chunk(2)
distributions = []
(ix, iw) = (f_c.size(2), f_c.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_c_sent = torch.cat([f_c, sentence_embedding_replication], dim=1)
f_c_text = torch.cat([f_c_sent, weighted_word_embedding_c], dim=1)
o = self.posterior(f_c_text)
(p_mu, p_std) = torch.split(o, self.z_nc, dim=1)
(distribution, f_m_sent) = self.one_path(f_m, sentence_embedding, weighted_word_embedding_m)
distributions.append([p_mu, F.softplus(p_std), distribution[0][0], distribution[0][1]])
f_m_text = torch.cat([f_m_sent, weighted_word_embedding_m], dim=1)
f_c_text = torch.cat([f_m_sent, weighted_word_embedding_c], dim=1)
return (distributions, torch.cat([f_m_text, f_c_text], dim=0)) |
def gen_save_feat(audio_model, val_loader, save_path):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
if (not isinstance(audio_model, nn.DataParallel)):
audio_model = nn.DataParallel(audio_model)
audio_model = audio_model.to(device)
audio_model.eval()
with torch.no_grad():
for (i, (audio_input, labels, filename)) in enumerate(val_loader):
audio_input = audio_input.to(device)
audio_output = audio_model.module.feature_extract(audio_input)
predictions = audio_output.to('cpu').detach()
for j in range(len(filename)):
cur_filename = filename[j].split('/')[(- 1)]
cur_filename = (((save_path + '/') + cur_filename[:(- 5)]) + '.npy')
np.save(cur_filename, predictions[j])
print('processe {:d} of {:d}'.format((i * audio_output.shape[0]), (len(val_loader) * audio_output.shape[0])))
exit()
return 0 |
class GraphConvolution(object):
def __init__(self, input_dim, output_dim, placeholders, dropout=0.0, sparse_inputs=False, act=tf.nn.relu, bias=False, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert (kwarg in allowed_kwargs), ('Invalid keyword argument: ' + kwarg)
name = kwargs.get('name')
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.0
self.act = act
self.sparse_inputs = sparse_inputs
self.bias = bias
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope((self.name + '_vars')):
self.vars['weights'] = glorot([input_dim, output_dim], name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs, adj):
x = inputs
if self.sparse_inputs:
x = sparse_dropout(x, (1 - self.dropout), self.num_features_nonzero)
else:
x = tf.nn.dropout(x, (1 - self.dropout))
pre_sup = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
output = dot(adj, pre_sup, sparse=True)
if self.bias:
output += self.vars['bias']
return self.act(output)
def __call__(self, inputs, adj):
with tf.name_scope(self.name):
if (self.logging and (not self.sparse_inputs)):
tf.summary.histogram((self.name + '/inputs'), inputs)
outputs = self._call(inputs, adj)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(((self.name + '/vars/') + var), self.vars[var]) |
def VGG(input_shape, nbstages, nblayers, nbfilters, nbclasses, weight_decay=0.0, kernel_constraint=None, kernel_initializer='glorot_uniform', include_top=True, use_batchnorm=True, batchnorm_training=True, use_bias=True, act='relu', dropout=0.0, kernel_size=(3, 3), batchnorm_momentum=0.99, use_skips=False):
if (K.image_data_format() == 'channels_last'):
if (len(input_shape) == 2):
input_shape = (input_shape + (3,))
channel_axis = (- 1)
elif (K.image_data_format() == 'channels_first'):
if (len(input_shape) == 2):
input_shape = ((3,) + input_shape)
channel_axis = 1
if (len(nblayers) != nbstages):
raise ValueError('nblayers should contain one element per stage.')
if (len(nbfilters) != nbstages):
raise ValueError('nbfilters should contain one element per stage.')
regularizer = None
if (weight_decay > 0.0):
regularizer = l2(weight_decay)
input_model = Input(shape=input_shape)
x = input_model
layer_counter = 0
for s in range(nbstages):
for l in range(nblayers[s]):
inp = x
x = Conv2D(nbfilters[s], kernel_size=kernel_size, padding='same', name=(((('stage' + str(s)) + '_layer') + str(l)) + '_conv'), kernel_constraint=kernel_constraint, kernel_initializer=kernel_initializer, kernel_regularizer=regularizer, use_bias=use_bias)(x)
if (dropout > 0.0):
x = Dropout(dropout)(x)
if (act is not 'leaky'):
x = Activation('relu', name=(((('stage' + str(s)) + '_layer') + str(l)) + '_relu'))(x)
else:
x = LeakyReLU(alpha=0.3, name=(((('stage' + str(s)) + '_layer') + str(l)) + '_relu'))(x)
if use_batchnorm:
x = BatchNormalization(axis=channel_axis, name=(((('stage' + str(s)) + '_layer') + str(l)) + '_batch'), center=batchnorm_training, scale=batchnorm_training, momentum=batchnorm_momentum)(x)
if (use_skips and (l != 0)):
x = add([x, inp])
layer_counter += 1
if (s != (nbstages - 1)):
x = MaxPooling2D((2, 2), strides=(2, 2), name=(('stage' + str(s)) + '_pool'))(x)
if include_top:
x = GlobalAveragePooling2D(name='global_pool')(x)
x = Dense(nbclasses, name='last_dense', kernel_initializer=kernel_initializer, use_bias=use_bias, kernel_regularizer=regularizer)(x)
x = Activation('softmax', name='predictions')(x)
return Model(input_model, x) |
def main(config):
model = Classifier(10, classifier_name='lenet', dataset='mnist', pretrained=False)
data_classifier_state = torch.load(os.path.join(config.path, 'Classifier.pth'), map_location=None)
if ('state_dict' in data_classifier_state):
data_classifier_state = data_classifier_state['state_dict']
bad_classifier_state = {}
for (k, v) in data_classifier_state.items():
if k.startswith('1.'):
bad_classifier_state[k[2:]] = v
else:
bad_classifier_state[k] = v
starts_with_module = False
for key in bad_classifier_state.keys():
if key.startswith('module.'):
starts_with_module = True
break
if starts_with_module:
correct_classifier_state = {k[7:]: v for (k, v) in bad_classifier_state.items()}
else:
correct_classifier_state = bad_classifier_state
starts_with_feature_extractor = False
for k in correct_classifier_state.keys():
if k.startswith('feature_extractor.'):
starts_with_feature_extractor = True
break
if (not starts_with_feature_extractor):
correct_classifier_state = {('feature_extractor.' + k): v for (k, v) in correct_classifier_state.items()}
model.load_state_dict(correct_classifier_state)
normalizer = utils.IdentityNormalize()
adv_eval_object = adveval.AdversarialEvaluation(model, normalizer)
surrogate = model
normalizer_surr = normalizer
attack_loss = plf.VanillaXentropy(surrogate, normalizer_surr)
linf_3_threat = ap.ThreatModel(ap.DeltaAddition, {'lp_style': 'inf', 'lp_bound': 0.3})
fgsm_attack = aa.FGSM(surrogate, normalizer_surr, linf_3_threat, attack_loss)
fgsm_attack_kwargs = {'step_size': 0.3, 'verbose': False}
fgsm_attack_params = advtrain.AdversarialAttackParameters(fgsm_attack, attack_specific_params={'attack_kwargs': fgsm_attack_kwargs})
pgd10_attack = aa.PGD(surrogate, normalizer_surr, linf_3_threat, attack_loss)
pgd10_attack_kwargs = {'step_size': (0.3 / 4.0), 'num_iterations': 10, 'keep_best': True, 'random_init': True, 'verbose': False}
pgd10_attack_params = advtrain.AdversarialAttackParameters(pgd10_attack, attack_specific_params={'attack_kwargs': pgd10_attack_kwargs})
pgd100_attack = aa.PGD(surrogate, normalizer_surr, linf_3_threat, attack_loss)
pgd100_attack_kwargs = {'step_size': (0.3 / 12.0), 'num_iterations': 100, 'keep_best': True, 'random_init': True, 'verbose': False}
pgd100_attack_params = advtrain.AdversarialAttackParameters(pgd100_attack, attack_specific_params={'attack_kwargs': pgd100_attack_kwargs})
cwloss6 = lf.CWLossF6
distance_fxn = lf.SoftLInfRegularization
cw100_attack = aa.CarliniWagner(surrogate, normalizer_surr, linf_3_threat, distance_fxn, cwloss6)
cw100_attack_kwargs = {'num_optim_steps': 100, 'verbose': False}
cw100_attack_params = advtrain.AdversarialAttackParameters(cw100_attack, attack_specific_params={'attack_kwargs': cw100_attack_kwargs})
cwloss6 = lf.CWLossF6
distance_fxn = lf.SoftLInfRegularization
cw1000_attack = aa.CarliniWagner(surrogate, normalizer_surr, linf_3_threat, distance_fxn, cwloss6)
cw1000_attack_kwargs = {'num_optim_steps': 1000, 'verbose': False}
cw1000_attack_params = advtrain.AdversarialAttackParameters(cw1000_attack, attack_specific_params={'attack_kwargs': cw1000_attack_kwargs})
to_eval_dict = {'top1': 'top1', 'avg_loss_value': 'avg_loss_value', 'avg_successful_ssim': 'avg_successful_ssim'}
fgsm_eval = adveval.EvaluationResult(fgsm_attack_params, to_eval=to_eval_dict)
pgd10_eval = adveval.EvaluationResult(pgd10_attack_params, to_eval=to_eval_dict)
pgd100_eval = adveval.EvaluationResult(pgd100_attack_params, to_eval=to_eval_dict)
cw100_eval = adveval.EvaluationResult(cw100_attack_params, to_eval=to_eval_dict)
cw1000_eval = adveval.EvaluationResult(cw1000_attack_params, to_eval=to_eval_dict)
attack_ensemble = {'fgsm': fgsm_eval, 'pgd10': pgd10_eval, 'pgd100': pgd100_eval, 'cw100': cw100_eval, 'cw1000': cw1000_eval}
ensemble_out = adv_eval_object.evaluate_ensemble(test_dataloader, attack_ensemble, verbose=True, num_minibatches=None)
sort_order = {'ground': 1, 'fgsm': 2, 'pgd10': 3, 'pgd100': 4, 'cw100': 5, 'cw1000': 6}
def pretty_printer(fd, eval_ensemble, result_type):
print(('~' * 10), result_type, ('~' * 10))
fd.write((((('~' * 10) + result_type) + ('~' * 10)) + '\n'))
for key in sorted(list(eval_ensemble.keys()), key=(lambda k: sort_order[k])):
eval_result = eval_ensemble[key]
pad = (6 - len(key))
if (result_type not in eval_result.results):
continue
avg_result = eval_result.results[result_type].avg
print(key, (pad * ' '), ': ', avg_result)
fd.write(((((key + (pad * ' ')) + ': ') + str(avg_result)) + '\n'))
with open(os.path.join(config.path, 'base_eval_result.txt'), 'w') as fd:
fd.write(('Result for {}'.format(config.path) + '\n'))
fd.write('\n')
pretty_printer(fd, ensemble_out, 'top1')
pretty_printer(fd, ensemble_out, 'avg_loss_value')
pretty_printer(fd, ensemble_out, 'avg_successful_ssim') |
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if ((not FLAGS.do_train) and (not FLAGS.do_eval) and (not FLAGS.do_predict)):
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if (FLAGS.max_seq_length > bert_config.max_position_embeddings):
raise ValueError(('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings)))
tf.gfile.MakeDirs(FLAGS.output_dir)
processor = CommonsenseQAProcessor(split=FLAGS.split)
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if (FLAGS.use_tpu and FLAGS.tpu_name):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(((len(train_examples) / FLAGS.train_batch_size) * FLAGS.num_train_epochs))
num_warmup_steps = int((num_train_steps * FLAGS.warmup_proportion))
model_fn = model_fn_builder(bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, 'train.tf_record')
train_seq_length = file_based_convert_examples_to_features(train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info('***** Running training *****')
tf.logging.info(' Num examples = %d', len(train_examples))
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
tf.logging.info(' Num steps = %d', num_train_steps)
tf.logging.info(' Longest training sequence = %d', train_seq_length)
train_input_fn = file_based_input_fn_builder(input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, 'eval.tf_record')
eval_seq_length = file_based_convert_examples_to_features(eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Num examples = %d', len(eval_examples))
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
tf.logging.info(' Longest eval sequence = %d', eval_seq_length)
eval_steps = None
if FLAGS.use_tpu:
eval_steps = int((len(eval_examples) / FLAGS.eval_batch_size))
eval_drop_remainder = (True if FLAGS.use_tpu else False)
eval_input_fn = file_based_input_fn_builder(input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, 'predict.tf_record')
predict_seq_length = file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file)
tf.logging.info('***** Running prediction*****')
tf.logging.info(' Num examples = %d', len(predict_examples))
tf.logging.info(' Batch size = %d', FLAGS.predict_batch_size)
tf.logging.info(' Longest predict sequence = %d', predict_seq_length)
if FLAGS.use_tpu:
raise ValueError('Prediction in TPU not supported')
predict_drop_remainder = (True if FLAGS.use_tpu else False)
predict_input_fn = file_based_input_fn_builder(input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
test_predictions_file = os.path.join(FLAGS.output_dir, 'test_results.csv')
with tf.gfile.GFile(test_predictions_file, 'w') as writer:
tf.logging.info('***** Predict results *****')
for (example, prediction) in zip(predict_examples, result):
output_line = (','.join(([str(example.qid), str(CommonsenseQAProcessor.LABELS[np.argmax(prediction)])] + [str(class_probability) for class_probability in prediction])) + '\n')
writer.write(output_line) |
def process_single_pred(args):
(target, pred, file) = args
pred = upsample(pred, target)
pred = align(pred, target)
save_depth_image(file, pred) |
def ensure_list(s: Optional[Union[(str, List[str], Tuple[str], Set[str])]]) -> List[str]:
return (s if isinstance(s, list) else (list(s) if isinstance(s, (tuple, set)) else ([] if (s is None) else [s]))) |
def name_parts(name):
assert isinstance(name, str), 'name must be a str'
a = name.split('/')
ff = a[(- 1)]
b = ff.split(':')
if (len(b) == 1):
f = ff
ext = ''
else:
f = ':'.join(b[:(- 1)])
ext = (':' + b[(- 1)])
p = '/'.join(a[:(- 1)])
return (p, f, ext) |
def get_dataloader(batch_size=64, dataset='co3dv1', category=('apple',), split='train', shuffle=True, num_workers=8, debug=False, num_images=2):
if debug:
num_workers = 0
if (dataset == 'co3dv1'):
dataset = Co3dv1Dataset(category=category, split=split, num_images=num_images, debug=debug)
elif (dataset in ['co3d', 'co3dv2']):
dataset = Co3dDataset(category=category, split=split, num_images=num_images, debug=debug)
else:
raise Exception(f'Unknown dataset: {dataset}')
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, drop_last=True) |
class KernelPCA(AutotabularPreprocessingAlgorithm):
def __init__(self, n_components, kernel, degree=3, gamma=0.25, coef0=0.0, random_state=None):
self.n_components = n_components
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.random_state = random_state
def fit(self, X, Y=None):
import scipy.sparse
import sklearn.decomposition
self.n_components = int(self.n_components)
self.degree = int(self.degree)
self.gamma = float(self.gamma)
self.coef0 = float(self.coef0)
self.preprocessor = sklearn.decomposition.KernelPCA(n_components=self.n_components, kernel=self.kernel, degree=self.degree, gamma=self.gamma, coef0=self.coef0, remove_zero_eig=True, random_state=self.random_state)
if scipy.sparse.issparse(X):
X = X.astype(np.float64)
with warnings.catch_warnings():
warnings.filterwarnings('error')
self.preprocessor.fit(X)
if (len((self.preprocessor.alphas_ / self.preprocessor.lambdas_)) == 0):
raise ValueError('KernelPCA removed all features!')
return self
def transform(self, X):
if (self.preprocessor is None):
raise NotImplementedError()
with warnings.catch_warnings():
warnings.filterwarnings('error')
X_new = self.preprocessor.transform(X)
if (X_new.shape[1] == 0):
raise ValueError('KernelPCA removed all features!')
return X_new
def get_properties(dataset_properties=None):
return {'shortname': 'KernelPCA', 'name': 'Kernel Principal Component Analysis', 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'is_deterministic': False, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (DENSE, UNSIGNED_DATA)}
def get_hyperparameter_search_space(dataset_properties=None):
n_components = UniformIntegerHyperparameter('n_components', 10, 2000, default_value=100)
kernel = CategoricalHyperparameter('kernel', ['poly', 'rbf', 'sigmoid', 'cosine'], 'rbf')
gamma = UniformFloatHyperparameter('gamma', 3.e-05, 8, log=True, default_value=0.01)
degree = UniformIntegerHyperparameter('degree', 2, 5, 3)
coef0 = UniformFloatHyperparameter('coef0', (- 1), 1, default_value=0)
cs = ConfigurationSpace()
cs.add_hyperparameters([n_components, kernel, degree, gamma, coef0])
degree_depends_on_poly = EqualsCondition(degree, kernel, 'poly')
coef0_condition = InCondition(coef0, kernel, ['poly', 'sigmoid'])
gamma_condition = InCondition(gamma, kernel, ['poly', 'rbf'])
cs.add_conditions([degree_depends_on_poly, coef0_condition, gamma_condition])
return cs |
class MetaConvModel(MetaModule):
def __init__(self, in_channels, out_features, hidden_size=64, feature_size=64, drop_p=0.0):
super(MetaConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.drop_p = drop_p
self.anil = False
kwargs = {}
if (self.drop_p > 0.0):
conv = conv_drop_block
kwargs['drop_p'] = self.drop_p
self.drop_classifer = nn.Identity()
else:
conv = conv_block
self.drop_classifer = nn.Identity()
self.classifier = MetaLinear(feature_size, out_features, bias=True)
self.features = MetaSequential(OrderedDict([('layer1', conv(in_channels, hidden_size, kernel_size=3, stride=1, padding=1, bias=True, **kwargs)), ('layer2', conv(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1, bias=True, **kwargs)), ('layer3', conv(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1, bias=True, **kwargs)), ('layer4', conv(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1, bias=True, **kwargs))]))
def forward(self, inputs, params=None):
if self.anil:
params_feature = None
else:
params_feature = self.get_subdict(params, 'features')
features = self.features(inputs, params=params_feature)
features = features.view((features.size(0), (- 1)))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
logits = self.drop_classifer(logits)
return logits |
def create_loader(dataset, input_size, batch_size, is_training=False, use_prefetcher=True, no_aug=False, re_prob=0.0, re_mode='const', re_count=1, re_split=False, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, num_aug_repeats=0, num_aug_splits=0, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_workers=1, distributed=False, crop_pct=None, collate_fn=None, pin_memory=False, fp16=False, tf_preprocessing=False, use_multi_epochs_loader=False, persistent_workers=True, worker_seeding='all'):
re_num_splits = 0
if re_split:
re_num_splits = (num_aug_splits or 2)
dataset.transform = create_transform(input_size, is_training=is_training, use_prefetcher=use_prefetcher, no_aug=no_aug, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, tf_preprocessing=tf_preprocessing, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, separate=(num_aug_splits > 0))
sampler = None
if (distributed and (not isinstance(dataset, torch.utils.data.IterableDataset))):
if is_training:
if num_aug_repeats:
sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats)
else:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = OrderedDistributedSampler(dataset)
else:
assert (num_aug_repeats == 0), 'RepeatAugment not currently supported in non-distributed or IterableDataset use'
if (collate_fn is None):
collate_fn = (fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate)
loader_class = torch.utils.data.DataLoader
if use_multi_epochs_loader:
loader_class = MultiEpochsDataLoader
loader_args = dict(batch_size=batch_size, shuffle=((not isinstance(dataset, torch.utils.data.IterableDataset)) and (sampler is None) and is_training), num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=is_training, worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), persistent_workers=persistent_workers)
try:
loader = loader_class(dataset, **loader_args)
except TypeError as e:
loader_args.pop('persistent_workers')
loader = loader_class(dataset, **loader_args)
if use_prefetcher:
prefetch_re_prob = (re_prob if (is_training and (not no_aug)) else 0.0)
loader = PrefetchLoader(loader, mean=mean, std=std, fp16=fp16, re_prob=prefetch_re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits)
return loader |
def require_tf2onnx(test_case):
return unittest.skipUnless(is_tf2onnx_available(), 'test requires tf2onnx')(test_case) |
def check_optimizer_lr_wd(optimizer, gt_lr_wd):
assert isinstance(optimizer, torch.optim.AdamW)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups
print(param_groups)
assert (len(param_groups) == len(gt_lr_wd))
for (i, param_dict) in enumerate(param_groups):
assert (param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay'])
assert (param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale'])
assert (param_dict['lr_scale'] == param_dict['lr']) |
class ROIPool3d(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool3d, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
return roi_pool_3d(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += ')'
return tmpstr |
def _get_entity_spans(model, input_sentences, prefix_allowed_tokens_fn, redirections=None):
output_sentences = model.sample(get_entity_spans_pre_processing(input_sentences), prefix_allowed_tokens_fn=prefix_allowed_tokens_fn)
output_sentences = get_entity_spans_post_processing([e[0]['text'] for e in output_sentences])
return get_entity_spans_finalize(input_sentences, output_sentences, redirections=redirections) |
_torch
class SelectiveCommonTest(unittest.TestCase):
all_model_classes = ((MarianMTModel,) if is_torch_available() else ())
test_save_load__keys_to_ignore_on_save = ModelTesterMixin.test_save_load__keys_to_ignore_on_save
def setUp(self):
self.model_tester = ModelTester(self) |
def check_col_str_list_exists(df: 'SparkDataFrame', column: Union[(List[str], str)], arg_name: str) -> None:
if isinstance(column, str):
invalidInputError((column in df.columns), (((column + ' in ') + arg_name) + ' does not exist in Table'))
elif isinstance(column, list):
for single_column in column:
invalidInputError((single_column in df.columns), '{} in {} does not exist in Table'.format(single_column, arg_name))
else:
invalidInputError(False, ('elements in cat_cols should be str or list of str but get ' + str(column))) |
def namespace2dict(namespace):
d = dict(**namespace)
for (k, v) in d.items():
if isinstance(v, NamespaceMap):
d[k] = namespace2dict(v)
return d |
class LinearSpectStepper(SpectStepper):
def RightHandItemsSpect(self, u_spect, **kw):
if (self.dim != 2):
raise NotImplementedError
coe = self.coe
rhi = 0
for k in range(3):
for j in range((k + 1)):
if isinstance(coe[(j, (k - j))], (int, float)):
if (coe[(j, (k - j))] == 0):
continue
u_diff = spect_diff(u_spect, signal_ndim=2, order=(j, (k - j)), mesh_bound=self.mesh_bound)
u_diff = spect2time(u_diff, signal_ndim=2)
if (k == 1):
tmp = (- coe[(j, (k - j))])
else:
tmp = coe[(j, (k - j))]
if isinstance(tmp, torch.Tensor):
rhi = (rhi + SpectMul(tmp, u_diff, signal_ndim=2))
else:
rhi = (rhi + (u_diff * tmp))
return time2spect(rhi, signal_ndim=2) |
class GraphModule(object):
def __init__(self, name):
self.name = name
self._template = tf.make_template(name, self._build, create_scope_now_=True)
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
def _build(self, *args, **kwargs):
raise NotImplementedError
def __call__(self, *args, **kwargs):
return self._template(*args, **kwargs)
def variable_scope(self):
return tf.variable_scope(self._template.variable_scope) |
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Pd_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dropout(Pd_l[0]))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dropout(Pd_l[1]))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_summarization', model_args, data_args, framework='tensorflow')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity(logging.INFO)
transformers.utils.logging.set_verbosity(logging.INFO)
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, and/or `do_eval`.')
return
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if (data_args.text_column is None):
text_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0])
else:
text_column = data_args.text_column
if (text_column not in column_names):
raise ValueError(f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}")
if (data_args.summary_column is None):
summary_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1])
else:
summary_column = data_args.summary_column
if (summary_column not in column_names):
raise ValueError(f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}")
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
else:
train_dataset = None
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
else:
eval_dataset = None
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels]
return (preds, labels)
with training_args.strategy.scope():
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
embeddings = model.get_input_embeddings()
if hasattr(embeddings, 'embeddings'):
embedding_size = embeddings.embeddings.shape[0]
else:
embedding_size = embeddings.weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=128, return_tensors='np')
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = (training_args.per_device_train_batch_size * num_replicas)
total_eval_batch_size = (training_args.per_device_eval_batch_size * num_replicas)
tf_train_dataset = model.prepare_tf_dataset(train_dataset, collate_fn=data_collator, batch_size=total_train_batch_size, shuffle=True).with_options(dataset_options)
tf_eval_dataset = model.prepare_tf_dataset(eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False).with_options(dataset_options)
num_train_steps = int((len(tf_train_dataset) * training_args.num_train_epochs))
if (training_args.warmup_steps > 0):
num_warmup_steps = training_args.warmup_steps
elif (training_args.warmup_ratio > 0):
num_warmup_steps = int((num_train_steps * training_args.warmup_ratio))
else:
num_warmup_steps = 0
if training_args.do_train:
(optimizer, lr_schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm)
else:
optimizer = None
if training_args.do_eval:
metric = evaluate.load('rouge')
if (data_args.val_max_target_length is None):
data_args.val_max_target_length = data_args.max_target_length
gen_kwargs = {'max_length': (data_args.val_max_target_length if (data_args is not None) else config.max_length), 'num_beams': data_args.num_beams, 'no_repeat_ngram_size': 0}
def compute_metrics(preds):
(predictions, labels) = preds
if isinstance(predictions, tuple):
predictions = predictions[0]
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
metrics = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
metrics = {key: round((val.mid.fmeasure * 100), 4) for (key, val) in metrics.items()}
return metrics
metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, predict_with_generate=True, use_xla_generation=True, generate_kwargs=gen_kwargs)
callbacks = [metric_callback]
else:
callbacks = []
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split('/')[(- 1)]
if (not push_to_hub_model_id):
if (data_args.dataset_name is not None):
push_to_hub_model_id = f'{model_name}-finetuned-{data_args.dataset_name}'
else:
push_to_hub_model_id = f'{model_name}-finetuned-summarization'
model_card_kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'summarization'}
if (data_args.dataset_name is not None):
model_card_kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
model_card_kwargs['dataset_args'] = data_args.dataset_config_name
model_card_kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
model_card_kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
callbacks.append(PushToHubCallback(output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs))
model.compile(optimizer=optimizer, jit_compile=training_args.xla)
eval_metrics = None
if training_args.do_train:
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {training_args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}')
logger.info(f' Total train batch size = {total_train_batch_size}')
logger.info(f' Total optimization steps = {num_train_steps}')
if (training_args.xla and (not data_args.pad_to_max_length)):
logger.warning('XLA training may be slow at first when --pad_to_max_length is not set until all possible shapes have been compiled.')
history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks)
eval_metrics = {key: val[(- 1)] for (key, val) in history.history.items()}
if (training_args.do_eval and (not training_args.do_train)):
logger.info('Evaluation...')
(jit_compile=True)
def generate(**kwargs):
return model.generate(**kwargs)
for (batch, labels) in tf_eval_dataset:
batch.update(gen_kwargs)
generated_tokens = generate(**batch)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metrics = metric.compute(use_stemmer=True)
result = {key: round((val.mid.fmeasure * 100), 4) for (key, val) in eval_metrics.items()}
logger.info(result)
if ((training_args.output_dir is not None) and (eval_metrics is not None)):
output_eval_file = os.path.join(training_args.output_dir, 'all_results.json')
with open(output_eval_file, 'w') as writer:
writer.write(json.dumps(eval_metrics))
if ((training_args.output_dir is not None) and (not training_args.push_to_hub)):
model.save_pretrained(training_args.output_dir) |
class GroupedEpochBatchIterator(EpochBatchIterator):
def __init__(self, dataset, collate_fn, batch_samplers, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, mult_rate=1, buffer_size=0, skip_remainder_batch=False):
super().__init__(dataset, collate_fn, batch_samplers, seed, num_shards, shard_id, num_workers, epoch, buffer_size, skip_remainder_batch=skip_remainder_batch)
self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers])
self.step_size = (mult_rate * num_shards)
self.lengths = [((len(x) // self.step_size) * self.step_size) for x in self.frozen_batches]
def __len__(self):
return sum(self.lengths)
def first_batch(self):
if (len(self.frozen_batches) == 0):
raise Exception('The dataset is empty. This could indicate that all elements in the dataset have been skipped. Try increasing the max number of allowed tokens or using a larger dataset.')
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]])
else:
return 'DUMMY'
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def return_full_batches(batch_sets, seed, shuffle):
if shuffle:
batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets]
batch_sets = [batch_sets[i][:self.lengths[i]] for i in range(len(batch_sets))]
batches = list(itertools.chain.from_iterable(batch_sets))
if shuffle:
with data_utils.numpy_seed(seed):
idx = np.random.permutation((len(batches) // self.step_size))
if ((len(idx) * self.step_size) != len(batches)):
raise ValueError(('ERROR: %d %d %d %d' % (len(idx), self.step_size, len(batches), self.shard_id)), ':'.join([('%d' % x) for x in self.lengths]))
mini_shards = [batches[(i * self.step_size):((i + 1) * self.step_size)] for i in idx]
batches = list(itertools.chain.from_iterable(mini_shards))
return batches
if self._supports_prefetch:
raise NotImplementedError('To be implemented')
else:
batches = return_full_batches(self.frozen_batches, (self.seed + epoch), shuffle)
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
if ((offset > 0) and (offset >= len(batches))):
return None
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
itr = torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, persistent_workers=self.persistent_workers)
if (self.buffer_size > 0):
itr = BufferedIterator(self.buffer_size, itr)
return CountingIterator(itr, start=offset) |
_pruner('pt_pattern_lock')
class PytorchPatternLockPruner(PytorchBasePruner):
def __init__(self, config, modules):
super().__init__(config, modules)
self.pattern = get_pattern(self.config, modules)
assert (self.config.end_step == self.config.start_step), 'pattern_lock pruner only supports one shot mode'
def update_masks(self, local_step):
if (not self.check_is_pruned_step(self.global_step)):
return
self.masks = self.pattern.get_pattern_lock_masks(self.modules)
def on_after_optimizer_step(self):
self.mask_weights()
self.global_step += 1 |
def test_get_mseg_label_map_fpath_from_image_info() -> None:
label_maps_dir = '/path/to/label/maps'
log_id = 'abc__2020_06_01'
camera_name = 'ring_rear_left'
img_fname_stem = 'ring_rear_left_9999'
label_map_fpath = mseg_interface.get_mseg_label_map_fpath_from_image_info(label_maps_dir, log_id, camera_name, img_fname_stem)
assert (label_map_fpath == '/path/to/label/maps/mseg-3m-480_abc__2020_06_01_ring_rear_left_universal_ss/358/gray/ring_rear_left_9999.png') |
class ConvertTo32Bit(object):
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
(observ, reward, done, info) = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return (observ, reward, done, info)
def reset(self):
observ = self._env.reset()
observ = self._convert_observ(observ)
return observ
def _convert_observ(self, observ):
if (not np.isfinite(observ).all()):
raise ValueError('Infinite observation encountered.')
if (observ.dtype == np.float64):
return observ.astype(np.float32)
if (observ.dtype == np.int64):
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if (not np.isfinite(reward).all()):
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32) |
def compute_tflops(elapsed_time, accelerator, args):
config_model = accelerator.unwrap_model(model).config
checkpoint_factor = (4 if args.gradient_checkpointing else 3)
batch_size = ((args.train_batch_size * accelerator.state.num_processes) * args.gradient_accumulation_steps)
factor = (((((24 * checkpoint_factor) * batch_size) * args.seq_length) * config_model.n_layer) * (config_model.n_embd ** 2))
flops_per_iteration = (factor * ((1.0 + (args.seq_length / (6.0 * config_model.n_embd))) + (tokenizer.vocab_size / ((16.0 * config_model.n_layer) * config_model.n_embd))))
tflops = (flops_per_iteration / ((elapsed_time * accelerator.state.num_processes) * (10 ** 12)))
return tflops |
def worker_num_per_node():
if use_coworker():
return (nproc_per_node() - coworker_num_per_node())
else:
return nproc_per_node() |
def list_models(filter='', module='', pretrained=False, exclude_filters=''):
if module:
models = list(_module_to_models[module])
else:
models = _model_entrypoints.keys()
if filter:
models = fnmatch.filter(models, filter)
if exclude_filters:
if (not isinstance(exclude_filters, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf)
if len(exclude_models):
models = set(models).difference(exclude_models)
if pretrained:
models = _model_has_pretrained.intersection(models)
return list(sorted(models, key=_natural_key)) |
def fanin_init_weights_like(tensor):
size = tensor.size()
if (len(size) == 2):
fan_in = size[0]
elif (len(size) > 2):
fan_in = np.prod(size[1:])
else:
raise Exception('Shape must be have dimension at least 2.')
bound = (1.0 / np.sqrt(fan_in))
new_tensor = FloatTensor(tensor.size())
new_tensor.uniform_((- bound), bound)
return new_tensor |
class QueryResponseDataset(Dataset):
def __init__(self, df: pd.DataFrame, prompt_dict: dict, tokenizer: transformers.PreTrainedTokenizer, query_len: int, df_postprocessor: Optional[Callable]=None):
super(QueryResponseDataset, self).__init__()
if (df_postprocessor is not None):
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient='records')
prompts = [format_prompt(example=dict_data, prompt_dict=prompt_dict) for dict_data in list_dict_data]
queries = [tokenizer(prompt, return_tensors='pt', truncation=False).input_ids.squeeze(dim=0) for prompt in prompts]
filtered_queries = [query for query in queries if (len(query) <= query_len)]
logger.warning(f'Filtered out {(len(queries) - len(filtered_queries))} instances out of {len(queries)} that exceed length limit. These examples are not used for training, but will still be used in evaluation. ')
queries = torch.stack([torch_ops.left_pad(query, target_size=(query_len,), value=tokenizer.pad_token_id) for query in filtered_queries])
self.queries = queries
self.query_attn_masks = queries.ne(tokenizer.pad_token_id).long()
self.prompts = prompts
self.list_dict_data = list_dict_data
def __getitem__(self, i):
return_dict = dict(queries=self.queries[i], query_attn_masks=self.query_attn_masks[i])
return return_dict
def __len__(self):
return len(self.queries) |
def get_events(instrument, filter='note'):
ret = []
for item in instrument:
if ((filter == 'note') and (type(item) == miditoolkit.midi.containers.Note)):
ret += [item]
elif ((filter == 'pitch_bends') and (type(item) == miditoolkit.midi.containers.PitchBend)):
ret += [item]
return ret |
def getHyper_bolT():
hyperDict = {'weightDecay': 0, 'lr': 0.0002, 'minLr': 2e-05, 'maxLr': 0.0004, 'nOfLayers': 4, 'dim': 400, 'numHeads': 36, 'headDim': 20, 'windowSize': 20, 'shiftCoeff': (2.0 / 5.0), 'fringeCoeff': 2, 'focalRule': 'expand', 'mlpRatio': 1.0, 'attentionBias': True, 'drop': 0.1, 'attnDrop': 0.1, 'lambdaCons': 1, 'pooling': 'cls'}
return Option(hyperDict) |
def generate_dummy_code_boost(nclasses=10):
decl = ''
bindings = ''
for cl in range(nclasses):
decl += ('class cl%03i;\n' % cl)
decl += '\n'
for cl in range(nclasses):
decl += ('class cl%03i {\n' % cl)
decl += 'public:\n'
bindings += (' py::class_<cl%03i>("cl%03i")\n' % (cl, cl))
for fn in range(nfns):
ret = random.randint(0, (nclasses - 1))
params = [random.randint(0, (nclasses - 1)) for i in range(nargs)]
decl += (' cl%03i *fn_%03i(' % (ret, fn))
decl += ', '.join((('cl%03i *' % p) for p in params))
decl += ');\n'
bindings += (' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % (fn, cl, fn))
decl += '};\n\n'
bindings += ' ;\n'
result = '#include <boost/python.hpp>\n\n'
result += 'namespace py = boost::python;\n\n'
result += (decl + '\n')
result += 'BOOST_PYTHON_MODULE(example) {\n'
result += bindings
result += '}'
return result |
def main():
(cfg, training_args) = prepare_args()
(model, preprocessor) = load_pretrained(cfg.model_args, training_args)
(model, preprocessor) = smart_prepare_target_processor(model, preprocessor, cfg.model_args, training_args)
print_trainable_params(model)
collator_kwargs = cfg.data_args.collator_kwargs
(trainer_cls, data_collator_dict) = prepare_trainer_collator(cfg.model_args, preprocessor, collator_kwargs)
(dataset, compute_metrics) = prepare_data(cfg.data_args, cfg.model_args, training_args, preprocessor)
trainer = trainer_cls(model=model, args=training_args, tokenizer=preprocessor['text'], train_dataset=(dataset['train'] if training_args.do_train else None), eval_dataset=(dataset['validation'] if training_args.do_eval else None), compute_metrics=(compute_metrics if training_args.predict_with_generate else None), **data_collator_dict)
if training_args.do_train:
try:
if list(pathlib.Path(training_args.output_dir).glob('checkpoint-*')):
train_result = trainer.train(resume_from_checkpoint=True)
else:
train_result = trainer.train()
trainer.log_metrics('train', train_result.metrics)
trainer.save_metrics('train', train_result.metrics)
trainer.save_model()
except RuntimeError as e:
print(f'got RuntimeError: {e.args}')
try:
print(f'''#### device {training_args.local_rank} summary ####
{torch.cuda.memory_summary(training_args.local_rank)}''')
except Exception as inner_e:
print(f'get Exception when show cuda summary: {inner_e.args}')
raise e
finally:
trainer.save_state()
trainer.plot_loss()
try:
output_dir = training_args.output_dir
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
cfg.dump(os.path.join(output_dir, 'cfg.py'))
except Exception as e:
warnings.warn(f'try to save cfg to output_dir, but get exception {e.args}')
gen_kwargs = dict(cfg.data_args.gen_kwargs)
gen_kwargs.setdefault('use_cache', True)
if (hasattr(cfg.model_args, 'gen_kwargs_set_pad_token_id') and cfg.model_args.gen_kwargs_set_pad_token_id):
gen_kwargs['pad_token_id'] = preprocessor['text'].pad_token_id
if (hasattr(cfg.model_args, 'gen_kwargs_set_bos_token_id') and cfg.model_args.gen_kwargs_set_bos_token_id):
gen_kwargs['bos_token_id'] = preprocessor['text'].bos_token_id
if (hasattr(cfg.model_args, 'gen_kwargs_set_eos_token_id') and cfg.model_args.gen_kwargs_set_eos_token_id):
gen_kwargs['eos_token_id'] = preprocessor['text'].eos_token_id
if training_args.do_eval:
if (hasattr(trainer, '_test_collator') and hasattr(trainer, '_eval_collator') and (trainer._test_collator != trainer._eval_collator)):
warnings.warn('[WARNING!!!] use different collator for eval and test. but do_eval and do_predict both use trainer.predict (i.e. only test_collator is used.)')
eval_results = trainer.predict(dataset['validation'], metric_key_prefix='eval', **gen_kwargs)
trainer.log_metrics('eval', eval_results.metrics)
trainer.save_metrics('eval', eval_results.metrics)
trainer.save_prediction(eval_results, file_key_prefix='eval')
if training_args.do_predict:
predict_results = trainer.predict(dataset['test'], metric_key_prefix='test', **gen_kwargs)
trainer.log_metrics('test', predict_results.metrics)
trainer.save_metrics('test', predict_results.metrics)
trainer.save_prediction(predict_results, file_key_prefix='test')
if training_args.do_multi_predict:
old_compute_metrics = trainer.compute_metrics
multitest = dataset['multitest']
multitest = typing.cast(dict, multitest)
for (_idx, (k, item)) in enumerate(multitest.items()):
print(f'processing multitest set {_idx}/{len(multitest)}: {k}')
_ds = item['dataset']
_compute_metrics = item['compute_metric']
_prefix = f'multitest_{k}'
trainer.compute_metrics = _compute_metrics
_pred_results = trainer.predict(_ds, metric_key_prefix=_prefix, **gen_kwargs)
trainer.log_metrics(_prefix, _pred_results.metrics)
trainer.save_metrics(_prefix, _pred_results.metrics)
trainer.save_prediction(_pred_results, file_key_prefix=_prefix)
trainer.compute_metrics = old_compute_metrics |
class NystromformerForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class BasicMultiUpdateBlock(nn.Module):
def __init__(self, args, hidden_dims=[]):
super().__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
encoder_output_dim = 128
self.gru08 = ConvGRU(hidden_dims[2], (encoder_output_dim + (hidden_dims[1] * (args.n_gru_layers > 1))))
self.gru16 = ConvGRU(hidden_dims[1], ((hidden_dims[0] * (args.n_gru_layers == 3)) + hidden_dims[2]))
self.gru32 = ConvGRU(hidden_dims[0], hidden_dims[1])
self.flow_head = FlowHead(hidden_dims[2], hidden_dim=256, output_dim=2)
factor = (2 ** self.args.n_downsample)
self.mask = nn.Sequential(nn.Conv2d(hidden_dims[2], 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, ((factor ** 2) * 9), 1, padding=0))
def forward(self, net, inp, corr=None, flow=None, iter08=True, iter16=True, iter32=True, update=True):
if iter32:
net[2] = self.gru32(net[2], *inp[2], pool2x(net[1]))
if iter16:
if (self.args.n_gru_layers > 2):
net[1] = self.gru16(net[1], *inp[1], pool2x(net[0]), interp(net[2], net[1]))
else:
net[1] = self.gru16(net[1], *inp[1], pool2x(net[0]))
if iter08:
motion_features = self.encoder(flow, corr)
if (self.args.n_gru_layers > 1):
net[0] = self.gru08(net[0], *inp[0], motion_features, interp(net[1], net[0]))
else:
net[0] = self.gru08(net[0], *inp[0], motion_features)
if (not update):
return net
delta_flow = self.flow_head(net[0])
mask = (0.25 * self.mask(net[0]))
return (net, mask, delta_flow) |
def _test():
import torch
pretrained = False
models = [zfnet, zfnetb]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != zfnet) or (weight_count == ))
assert ((model != zfnetb) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def main():
args = parser.get_args()
args.use_gpu = torch.cuda.is_available()
if args.use_gpu:
torch.backends.cudnn.benchmark = True
if (args.launcher == 'none'):
args.distributed = False
else:
args.distributed = True
dist_utils.init_dist(args.launcher)
(_, world_size) = dist_utils.get_dist_info()
args.world_size = world_size
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = os.path.join(args.experiment_path, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, name=args.log_name)
if (not args.test):
if (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.tfboard_path, 'train'))
val_writer = SummaryWriter(os.path.join(args.tfboard_path, 'test'))
else:
train_writer = None
val_writer = None
config = get_config(args, logger=logger)
if args.distributed:
assert ((config.total_bs % world_size) == 0)
config.dataset.train.others.bs = (config.total_bs // world_size)
if config.dataset.get('extra_train'):
config.dataset.extra_train.others.bs = ((config.total_bs // world_size) * 2)
config.dataset.val.others.bs = ((config.total_bs // world_size) * 2)
if config.dataset.get('test'):
config.dataset.test.others.bs = (config.total_bs // world_size)
else:
config.dataset.train.others.bs = config.total_bs
if config.dataset.get('extra_train'):
config.dataset.extra_train.others.bs = (config.total_bs * 2)
config.dataset.val.others.bs = (config.total_bs * 2)
if config.dataset.get('test'):
config.dataset.test.others.bs = config.total_bs
log_args_to_file(args, 'args', logger=logger)
log_config_to_file(config, 'config', logger=logger)
logger.info(f'Distributed training: {args.distributed}')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
misc.set_random_seed((args.seed + args.local_rank), deterministic=args.deterministic)
if args.distributed:
assert (args.local_rank == torch.distributed.get_rank())
if (args.shot != (- 1)):
config.dataset.train.others.shot = args.shot
config.dataset.train.others.way = args.way
config.dataset.train.others.fold = args.fold
config.dataset.val.others.shot = args.shot
config.dataset.val.others.way = args.way
config.dataset.val.others.fold = args.fold
if args.test:
test_net(args, config)
elif (args.finetune_model or args.scratch_model):
finetune(args, config, train_writer, val_writer)
else:
pretrain(args, config, train_writer, val_writer) |
(frozen=True)
class SynthesisResult(JsonSerializable):
hunk: (str | None)
is_pruned_halfway: bool
is_unfinished: bool
def to_json(self) -> Any:
return {'hunk': self.hunk, 'is_pruned_halfway': self.is_pruned_halfway, 'is_unfinished': self.is_unfinished}
def from_json(cls, d: Any) -> 'SynthesisResult':
return SynthesisResult(d['hunk'], bool(d['is_pruned_halfway']), bool(d['is_unfinished'])) |
def load_pretrained(cfg, Module, stage, **kwargs):
save_path = Path(cfg.paths.pretrained.load)
filename = BEST_CHCKPNT.format(stage=stage)
chckpnt = get_latest_match((save_path / filename))
loaded_module = Module.load_from_checkpoint(chckpnt, **kwargs)
return loaded_module |
def compute_possible_shapes(low, high, depth):
possible_shapes = {}
for shape in range(low, (high + 1)):
shapes = compute_max_depth(shape, max_depth=depth, print_out=False)
if (len(shapes) == depth):
possible_shapes[shape] = shapes
return possible_shapes |
def getBestFont():
e = wx.FontEnumerator()
e.EnumerateFacenames()
fontnames = e.GetFacenames(fixedWidthOnly=True)
for name in ['DejaVu Sans Mono', 'Courier New']:
if (name in fontnames):
return name
return None |
def resnet101_ibn_a(pretrained=False, **kwargs):
model = ResNet_IBN(block=Bottleneck_IBN, layers=[3, 4, 23, 3], ibn_cfg=('a', 'a', 'a', None), **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet101_ibn_a']))
return model |
def evaluate(data_source):
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
for i in range(0, (data_source.size(0) - 1), args.bptt):
(data, targets) = get_batch(data_source, i, evaluation=True)
(output, hidden) = model(data, hidden)
output_flat = output.view((- 1), ntokens)
total_loss += (len(data) * criterion(output_flat, targets).data)
hidden = repackage_hidden(hidden)
return (total_loss[0] / len(data_source)) |
def string_tuple_to_string(strings):
if (len(strings) == 0):
string = ''
elif (len(strings) == 1):
string = strings[0]
else:
string = ' '.join([str(s) for s in strings])
return string |
class EarlyStopping(Callback):
def __init__(self, monitor: str='val_loss', min_delta: float=0.0, patience: int=10, verbose: int=0, mode: str='auto', baseline: Optional[float]=None, restore_best_weights: bool=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.mode = mode
self.baseline = baseline
self.restore_best_weights = restore_best_weights
self.wait = 0
self.stopped_epoch = 0
self.state_dict = None
if (self.mode not in ['auto', 'min', 'max']):
warnings.warn(('EarlyStopping mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min'):
self.monitor_op = np.less
elif (self.mode == 'max'):
self.monitor_op = np.greater
elif _is_metric(self.monitor):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if (self.monitor_op == np.greater):
self.min_delta *= 1
else:
self.min_delta *= (- 1)
def on_train_begin(self, logs: Optional[Dict]=None):
self.wait = 0
self.stopped_epoch = 0
if (self.baseline is not None):
self.best = self.baseline
else:
self.best = (np.Inf if (self.monitor_op == np.less) else (- np.Inf))
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
current = self.get_monitor_value(logs)
if (current is None):
return
if self.monitor_op((current - self.min_delta), self.best):
self.best = current
self.wait = 0
self.best_epoch = epoch
if self.restore_best_weights:
self.state_dict = copy.deepcopy(self.model.state_dict())
else:
self.wait += 1
if (self.wait >= self.patience):
self.stopped_epoch = epoch
self.trainer.early_stop = True
def on_train_end(self, logs: Optional[Dict]=None):
if ((self.stopped_epoch > 0) and (self.verbose > 0)):
print(f'Best Epoch: {(self.best_epoch + 1)}. Best {self.monitor}: {self.best:.5f}')
if (self.restore_best_weights and (self.state_dict is not None)):
if (self.verbose > 0):
print('Restoring model weights from the end of the best epoch')
self.model.load_state_dict(self.state_dict)
def get_monitor_value(self, logs):
monitor_value = logs.get(self.monitor)
if (monitor_value is None):
warnings.warn(('Early stopping conditioned on metric `%s` which is not available. Available metrics are: %s' % (self.monitor, ','.join(list(logs.keys())))), RuntimeWarning)
return monitor_value
def __getstate__(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if (k not in ['trainer', 'model'])}
return self_dict
def __setstate__(self, state):
self.__dict__ = state |
def _dist_train(model, dataset, cfg, validate=False):
data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True)]
model = MMDistributedDataParallel(model.cuda())
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level, mean_teacher=cfg.mean_teacher)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg)
else:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataset_cfg = cfg.data.val
eval_cfg = cfg.get('evaluation', {})
eval_hook = eval_cfg.pop('eval_hook', 'CocoDistEvalmAPHook')
EvalHook = getattr(core, eval_hook)
runner.register_hook(EvalHook(val_dataset_cfg, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
if cfg.mean_teacher:
runner.load_mean_teacher_checkpoint(cfg)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs) |
(reuse_venv=True)
def docs(session: nox.Session) -> None:
session.install('-r', 'docs/requirements.txt')
session.chdir('docs')
if ('pdf' in session.posargs):
session.run('sphinx-build', '-M', 'latexpdf', '.', '_build')
return
session.run('sphinx-build', '-M', 'html', '.', '_build')
if ('serve' in session.posargs):
session.log('Launching docs at - use Ctrl-C to quit')
session.run('python', '-m', ' '8000', '-d', '_build/html')
elif session.posargs:
session.error('Unsupported argument to docs') |
def load_gin_dataset(args):
dataset = GINDataset(args.dataset, self_loop=True)
return GraphDataLoader(dataset, batch_size=args.batch_size, collate_fn=collate, seed=args.seed, shuffle=True, split_name='fold10', fold_idx=args.fold_idx).train_valid_loader() |
class ResBase(nn.Module):
def __init__(self, res_name, pretrained=True):
super(ResBase, self).__init__()
model_resnet = res_dict[res_name](pretrained=pretrained)
self.conv1 = model_resnet.conv1
self.bn1 = model_resnet.bn1
self.relu = model_resnet.relu
self.maxpool = model_resnet.maxpool
self.layer1 = model_resnet.layer1
self.layer2 = model_resnet.layer2
self.layer3 = model_resnet.layer3
self.layer4 = model_resnet.layer4
self.avgpool = model_resnet.avgpool
self.in_features = model_resnet.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
return x |
def _create_Siamese_network(A, P, N, NOT_FISRT_CLONE):
if NOT_FISRT_CLONE:
featA = _image_to_feat(A, is_training=True, reuse=True)
featP = _image_to_feat(P, is_training=True, reuse=True)
featN = _image_to_feat(N, is_training=True, reuse=True)
else:
featA = _image_to_feat(A, is_training=True, reuse=False)
featP = _image_to_feat(P, is_training=True, reuse=True)
featN = _image_to_feat(N, is_training=True, reuse=True)
return (featA, featP, featN) |
def render(pieces, style):
if pieces['error']:
return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if ((not style) or (style == 'default')):
style = 'pep440'
if (style == 'pep440'):
rendered = render_pep440(pieces)
elif (style == 'pep440-pre'):
rendered = render_pep440_pre(pieces)
elif (style == 'pep440-post'):
rendered = render_pep440_post(pieces)
elif (style == 'pep440-old'):
rendered = render_pep440_old(pieces)
elif (style == 'git-describe'):
rendered = render_git_describe(pieces)
elif (style == 'git-describe-long'):
rendered = render_git_describe_long(pieces)
else:
raise ValueError(("unknown style '%s'" % style))
return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')} |
class Market1501(dataset.Dataset):
def id(file_path):
return int(file_path.split('/')[(- 1)].split('_')[0])
def camera(file_path):
return int(file_path.split('/')[(- 1)].split('_')[1][1])
def ids(self):
return [self.id(path) for path in self.imgs]
def unique_ids(self):
return sorted(set(self.ids))
def cameras(self):
return [self.camera(path) for path in self.imgs]
def __init__(self, root, transform=None, target_transform=None, loader=default_loader):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.imgs = [path for path in list_pictures(self.root) if (self.id(path) != (- 1))]
self._id2label = {_id: idx for (idx, _id) in enumerate(self.unique_ids)}
def __getitem__(self, index):
path = self.imgs[index]
target = self._id2label[self.id(path)]
img = self.loader(path)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return len(self.imgs) |
def get_model(args, eval=False, eval_path_weights=''):
p = Dict2Obj(args.model)
encoder_weights = p.encoder_weights
if (encoder_weights == 'None'):
encoder_weights = None
classes = args.num_classes
(encoder_depth, decoder_channels) = get_encoder_d_c(p.encoder_name)
spec_mth = [constants.METHOD_SPG, constants.METHOD_ACOL, constants.METHOD_ADL, constants.METHOD_TSCAM]
method = ''
support_background = (args.model['support_background'],)
if (args.task == constants.STD_CL):
aux_params = None
if (args.method in spec_mth):
if (args.method == constants.METHOD_ACOL):
model = create_model(task=args.task, arch=p.arch, method=args.method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, in_channels=p.in_channels, num_classes=args.num_classes, acol_drop_threshold=args.acol_drop_threshold, large_feature_map=args.acol_large_feature_map, scale_in=p.scale_in)
elif (args.method == constants.METHOD_SPG):
model = create_model(task=args.task, arch=p.arch, method=args.method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, in_channels=p.in_channels, num_classes=args.num_classes, large_feature_map=args.spg_large_feature_map, scale_in=p.scale_in)
elif (args.method == constants.METHOD_ADL):
model = create_model(task=args.task, arch=p.arch, method=args.method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, in_channels=p.in_channels, num_classes=args.num_classes, adl_drop_rate=args.adl_drop_rate, adl_drop_threshold=args.adl_drop_threshold, large_feature_map=args.adl_large_feature_map, scale_in=p.scale_in)
elif (args.method == constants.METHOD_TSCAM):
model = create_model(task=args.task, arch=p.arch, method=args.method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, num_classes=args.num_classes)
else:
raise ValueError
elif (args.method == constants.METHOD_MAXMIN):
aux_params = get_aux_params(args)
model = create_model(task=args.task, arch=p.arch, method=method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, in_channels=p.in_channels, encoder_depth=encoder_depth, scale_in=p.scale_in, aux_params=aux_params, w=args.maxmin_w, dataset_name=args.dataset)
else:
aux_params = get_aux_params(args)
model = create_model(task=args.task, arch=p.arch, method=method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, in_channels=p.in_channels, encoder_depth=encoder_depth, scale_in=p.scale_in, aux_params=aux_params)
elif (args.task == constants.F_CL):
aux_params = get_aux_params(args)
assert (args.seg_mode == constants.BINARY_MODE)
seg_h_out_channels = 2
model = create_model(task=args.task, arch=p.arch, method=method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, encoder_depth=encoder_depth, decoder_channels=decoder_channels, in_channels=p.in_channels, seg_h_out_channels=seg_h_out_channels, scale_in=p.scale_in, aux_params=aux_params, freeze_cl=p.freeze_cl, im_rec=args.im_rec, img_range=args.img_range)
elif (args.task == constants.NEGEV):
aux_params = get_aux_params(args)
assert (args.seg_mode == constants.BINARY_MODE)
seg_h_out_channels = 2
model = create_model(task=args.task, arch=p.arch, method=method, encoder_name=p.encoder_name, encoder_weights=encoder_weights, encoder_depth=encoder_depth, decoder_channels=decoder_channels, in_channels=p.in_channels, seg_h_out_channels=seg_h_out_channels, scale_in=p.scale_in, aux_params=aux_params, freeze_cl=p.freeze_cl, im_rec=args.im_rec, img_range=args.img_range)
elif (args.task == constants.SEG):
assert (args.dataset in [constants.GLAS, constants.CAMELYON512])
assert (args.seg_mode == constants.BINARY_MODE)
assert (classes == 2)
aux_params = None
model = create_model(task=args.task, arch=p.arch, method=method, encoder_name=p.encoder_name, encoder_depth=encoder_depth, encoder_weights=encoder_weights, decoder_channels=decoder_channels, in_channels=p.in_channels, classes=classes)
else:
raise NotImplementedError
DLLogger.log('`{}` was created. Nbr.params: {}'.format(model, count_nb_params(model)))
log = 'Arch: {}\ntask: {}\nencoder_name: {}\nencoder_weights: {}\nclasses: {}\naux_params: \n{}\nscale_in: {}\nfreeze_cl: {}\nim_rec: {}\nimg_range: {} \n'.format(p.arch, args.task, p.encoder_name, encoder_weights, classes, (format_dict_2_str(aux_params) if (aux_params is not None) else None), p.scale_in, p.freeze_cl, args.im_rec, args.img_range)
DLLogger.log(log)
DLLogger.log(model.get_info_nbr_params())
path_file = args.model['path_pre_trained']
if (path_file not in [None, 'None']):
msg = 'You have asked to load a specific pre-trained model from {} .... [OK]'.format(path_file)
warnings.warn(msg)
DLLogger.log(msg)
pre_tr_state = torch.load(path_file, map_location=get_cpu_device())
model.load_state_dict(pre_tr_state, strict=args.model['strict'])
path_cl = args.model['folder_pre_trained_cl']
if (path_cl not in [None, 'None', '']):
assert (args.task in [constants.F_CL, constants.NEGEV])
msg = "You have asked to set the classifier's weights from {} .... [OK]".format(path_cl)
warnings.warn(msg)
DLLogger.log(msg)
if (args.task == constants.NEGEV):
cl_cp = args.negev_ptretrained_cl_cp
std_cl_args = deepcopy(args)
std_cl_args.task = constants.STD_CL
tag = get_tag(std_cl_args, checkpoint_type=cl_cp)
else:
tag = get_tag(args)
if path_cl.endswith(os.sep):
source_tag = basename(path_cl[:(- 1)])
else:
source_tag = basename(path_cl)
assert (tag == source_tag), f'{tag}, {source_tag}'
if (args.method in spec_mth):
weights = torch.load(join(path_cl, 'model.pt'), map_location=get_cpu_device())
model.load_state_dict(weights, strict=True)
else:
encoder_w = torch.load(join(path_cl, 'encoder.pt'), map_location=get_cpu_device())
model.encoder.super_load_state_dict(encoder_w, strict=True)
header_w = torch.load(join(path_cl, 'classification_head.pt'), map_location=get_cpu_device())
model.classification_head.load_state_dict(header_w, strict=True)
if (args.model['freeze_cl'] and (not eval)):
assert (args.task in [constants.F_CL, constants.NEGEV])
assert (args.model['folder_pre_trained_cl'] not in [None, 'None', ''])
model.freeze_classifier()
model.assert_cl_is_frozen()
if eval:
if os.path.isdir(eval_path_weights):
path = eval_path_weights
else:
assert os.path.isdir(args.outd)
tag = get_tag(args, checkpoint_type=args.eval_checkpoint_type)
path = join(args.outd, tag)
cpu_device = get_cpu_device()
if (args.task == constants.STD_CL):
if (args.method in spec_mth):
weights = torch.load(join(path, 'model.pt'), map_location=get_cpu_device())
model.load_state_dict(weights, strict=True)
else:
weights = torch.load(join(path, 'encoder.pt'), map_location=cpu_device)
model.encoder.super_load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'classification_head.pt'), map_location=cpu_device)
model.classification_head.load_state_dict(weights, strict=True)
elif (args.task == constants.F_CL):
weights = torch.load(join(path, 'encoder.pt'), map_location=cpu_device)
model.encoder.super_load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'decoder.pt'), map_location=cpu_device)
model.decoder.load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'segmentation_head.pt'), map_location=cpu_device)
model.segmentation_head.load_state_dict(weights, strict=True)
if (model.reconstruction_head is not None):
weights = torch.load(join(path, 'reconstruction_head.pt'), map_location=cpu_device)
model.reconstruction_head.load_state_dict(weights, strict=True)
elif (args.task == constants.NEGEV):
weights = torch.load(join(path, 'encoder.pt'), map_location=cpu_device)
model.encoder.super_load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'decoder.pt'), map_location=cpu_device)
model.decoder.load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'classification_head.pt'), map_location=cpu_device)
model.classification_head.load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'segmentation_head.pt'), map_location=cpu_device)
model.segmentation_head.load_state_dict(weights, strict=True)
elif (args.task == constants.SEG):
weights = torch.load(join(path, 'encoder.pt'), map_location=cpu_device)
model.encoder.super_load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'decoder.pt'), map_location=cpu_device)
model.decoder.load_state_dict(weights, strict=True)
weights = torch.load(join(path, 'segmentation_head.pt'), map_location=cpu_device)
model.segmentation_head.load_state_dict(weights, strict=True)
else:
raise NotImplementedError
msg = 'EVAL-mode. Reset model weights to: {}'.format(path)
warnings.warn(msg)
DLLogger.log(msg)
return model |
class Classifier_Concat(nn.Module):
def __init__(self, cls_num):
super(Classifier_Concat, self).__init__()
self.fc1 = nn.Linear(1024, cls_num)
def forward(self, feat_img, feat_sound):
feat = torch.cat((feat_img, feat_sound), dim=(- 1))
g = self.fc1(feat)
return g |
def find_labels(model_class):
model_name = model_class.__name__
base_classes = str(inspect.getmro(model_class))
if ('keras.engine.training.Model' in base_classes):
signature = inspect.signature(model_class.call)
elif ('torch.nn.modules.module.Module' in base_classes):
signature = inspect.signature(model_class.forward)
else:
signature = inspect.signature(model_class.__call__)
if ('QuestionAnswering' in model_name):
return [p for p in signature.parameters if (('label' in p) or (p in ('start_positions', 'end_positions')))]
else:
return [p for p in signature.parameters if ('label' in p)] |
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=(- 1))
return (x[(window - 1):], yw_func) |
def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig):
tie_token = codec.encode_event(Event('tie', 0))
state_events_end_token = (tie_token if note_representation_config.include_ties else None)
features = extract_sequence_with_indices(features, state_events_end_token=state_events_end_token, feature_key='inputs')
features = map_midi_programs(features, codec)
features = run_length_encode_shifts_fn(features, codec, state_change_event_types=['velocity', 'program'])
return features |
class MountainCarEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30}
def __init__(self):
self.min_position = (- 1.2)
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, (- self.max_speed)])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self.seed()
self.reset()
def seed(self, seed=None):
(self.np_random, seed) = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), ('%r (%s) invalid' % (action, type(action)))
(position, velocity) = self.state
velocity += (((action - 1) * 0.001) + (math.cos((3 * position)) * (- 0.0025)))
velocity = np.clip(velocity, (- self.max_speed), self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if ((position == self.min_position) and (velocity < 0)):
velocity = 0
done = bool((position >= self.goal_position))
reward = (- 1.0)
self.state = (position, velocity)
return (np.array(self.state), reward, done, {})
def reset(self):
self.state = np.array([self.np_random.uniform(low=(- 0.6), high=(- 0.4)), 0])
return np.array(self.state)
def _height(self, xs):
return ((np.sin((3 * xs)) * 0.45) + 0.55)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = (self.max_position - self.min_position)
scale = (screen_width / world_width)
carwidth = 40
carheight = 20
if (self.viewer is None):
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip(((xs - self.min_position) * scale), (ys * scale)))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
(l, r, t, b) = (((- carwidth) / 2), (carwidth / 2), carheight, 0)
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle((carheight / 2.5))
frontwheel.set_color(0.5, 0.5, 0.5)
frontwheel.add_attr(rendering.Transform(translation=((carwidth / 4), clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle((carheight / 2.5))
backwheel.add_attr(rendering.Transform(translation=(((- carwidth) / 4), clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(backwheel)
flagx = ((self.goal_position - self.min_position) * scale)
flagy1 = (self._height(self.goal_position) * scale)
flagy2 = (flagy1 + 50)
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, (flagy2 - 10)), ((flagx + 25), (flagy2 - 5))])
flag.set_color(0.8, 0.8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(((pos - self.min_position) * scale), (self._height(pos) * scale))
self.cartrans.set_rotation(math.cos((3 * pos)))
return self.viewer.render(return_rgb_array=(mode == 'rgb_array'))
def close(self):
if self.viewer:
self.viewer.close() |
class TestBarrierBeforeMeasuremetsWhenABarrierIsAlreadyThere(QiskitTestCase):
def test_handle_redundancy(self):
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_remove_barrier_in_different_qregs(self):
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(2, 'c0')
circuit = QuantumCircuit(qr0, qr1, cr0)
circuit.barrier(qr0)
circuit.barrier(qr1)
circuit.measure(qr0, cr0[0])
circuit.measure(qr1, cr0[1])
expected = QuantumCircuit(qr0, qr1, cr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0[0])
expected.measure(qr1, cr0[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_preserve_barriers_for_measurement_ordering(self):
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_measures_followed_by_barriers_should_be_final(self):
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.h(qr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_smaller_duplicate_barrier(self):
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr[0], qr[1])
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_larger_duplicate_barrier(self):
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = circuit
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_barrier_doesnt_reorder_gates(self):
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circuit = QuantumCircuit(qr, cr)
circuit.u1(0, qr[0])
circuit.u1(1, qr[1])
circuit.u1(2, qr[2])
circuit.barrier(qr[2], qr[3])
circuit.u1(3, qr[3])
test_circuit = circuit.copy()
test_circuit.measure(qr, cr)
expected = circuit.copy()
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(test_circuit))
self.assertEqual(result, circuit_to_dag(expected)) |
def load_progress(progress_csv_path):
print(('Reading %s' % progress_csv_path))
entries = dict()
with open(progress_csv_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (k, v) in row.items():
if (k not in entries):
entries[k] = []
try:
entries[k].append(float(v))
except:
entries[k].append(0.0)
entries = dict([(k, np.array(v)) for (k, v) in entries.items()])
return entries |
def run(args):
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
output_path = os.path.join(args.output_dir, 'submission.csv')
with open(args.testpkl_path, 'rb') as fin:
pdbs = pickle.load(fin)[1]
with open(args.dcalphas_path, 'rb') as fin:
dcalphas = pickle.load(fin)
with open(args.psis_path, 'rb') as fin:
psis = pickle.load(fin)
with open(args.phis_path, 'rb') as fin:
phis = pickle.load(fin)
tb = []
for (pdb, d, ps, ph) in zip(pdbs, dcalphas, psis, phis):
for i in range(len(d)):
for j in range(len(d[i])):
tb += [('{}_d_{}_{}'.format(pdb, (i + 1), (j + 1)), d[i][j])]
for i in range(len(ps)):
tb += [('{}_psi_{}'.format(pdb, (i + 1)), ps[i])]
for i in range(len(ph)):
tb += [('{}_phi_{}'.format(pdb, (i + 1)), ph[i])]
tb = pd.DataFrame(tb, columns=['Id', 'Predicted'])
tb.to_csv(output_path, index=False) |
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed) |
class EqualizedLinear(ConstrainedLayer):
def __init__(self, nChannelsPrevious, nChannels, bias=True, **kwargs):
ConstrainedLayer.__init__(self, nn.Linear(nChannelsPrevious, nChannels, bias=bias), **kwargs) |
def assign_tp_fp_fn_tn(y_true: np.ndarray, y_pred: np.ndarray) -> Tuple[(int, int, int, int)]:
is_TP = np.logical_and((y_true == y_pred), (y_pred == 1))
is_FP = np.logical_and((y_true != y_pred), (y_pred == 1))
is_FN = np.logical_and((y_true != y_pred), (y_pred == 0))
is_TN = np.logical_and((y_true == y_pred), (y_pred == 0))
return (is_TP, is_FP, is_FN, is_TN) |
class Roomba(object):
def __init__(self):
self.tty = None
self.sci = None
self.safe = True
def start(self, tty='/dev/ttyUSB0', baudrate=57600):
self.tty = tty
self.sci = SerialCommandInterface(tty, baudrate)
self.sci.add_opcodes(ROOMBA_OPCODES)
def change_baud_rate(self, baud_rate):
if (baud_rate not in BAUD_RATES):
raise DriverError('Invalid baud rate specified.')
self.sci.baud(baud_rate)
self.sci = SerialCommandInterface(self.tty, baud_rate)
def passive(self):
self.sci.start()
time.sleep(0.5)
def control(self):
self.passive()
if (not self.safe):
self.sci.full()
else:
self.sci.safe()
time.sleep(0.5)
def direct_drive(self, velocity_left, velocity_right):
vl = (int(velocity_left) & 65535)
vr = (int(velocity_right) & 65535)
self.sci.direct_drive(*struct.unpack('4B', struct.pack('>2H', vr, vl)))
def drive(self, velocity, radius):
velocity = (int(velocity) & 65535)
radius = (int(radius) & 65535)
bytes = struct.unpack('4B', struct.pack('>2H', velocity, radius))
self.sci.drive(*bytes)
def stop(self):
self.drive(0, 0)
def slow_stop(self, velocity):
velocities = xrange(velocity, VELOCITY_SLOW, (- 25))
if (velocity < 0):
velocities = xrange(velocity, (- VELOCITY_SLOW), 25)
for v in velocities:
self.drive(v, RADIUS_STRAIGHT)
time.sleep(0.05)
self.stop()
def drive_straight(self, velocity):
self.drive(velocity, RADIUS_STRAIGHT)
def turn_in_place(self, velocity, direction):
valid_directions = {'cw': RADIUS_TURN_IN_PLACE_CW, 'ccw': RADIUS_TURN_IN_PLACE_CCW}
self.drive(velocity, valid_directions[direction])
def dock(self):
self.sci.start()
time.sleep(0.5)
self.sci.force_seeking_dock() |
def quad_double_newton_at_series(pols, lser, idx=1, maxdeg=4, nbr=4, checkin=True, vrblvl=0):
nbsym = number_of_symbols(pols)
if (vrblvl > 0):
print('the polynomials :')
for pol in pols:
print(pol)
print('Number of variables :', nbsym)
if checkin:
if (not checkin_newton_at_series(nbsym, lser, idx)):
return lser
set_quad_double_system(1, lser)
initialize_quad_double_syspool(1, vrblvl)
copy_to_quad_double_syspool(1, vrblvl)
set_quad_double_system(nbsym, pols)
phc = get_phcfun()
apars = int4a2nbr([idx, maxdeg, nbr], (vrblvl > 0))
bbb = pointer(c_int32(vrblvl))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> quad_double_newton_at_series calls phc ...')
print('apars =', nbr2int4a(apars))
retval = phc(696, apars, bbb, ccc, vrb)
fail = (retval > 0)
size = ((- 1) if fail else size_quad_double_syspool(vrblvl))
if (vrblvl > 0):
if (size == (- 1)):
print("An error occurred in the execution of Newton's method.")
else:
print('Computed one series solution.')
copy_from_quad_double_syspool(1, vrblvl)
result = get_quad_double_system()
result = substitute_symbol(result, idx)
clear_quad_double_syspool(vrblvl)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.