code stringlengths 17 6.64M |
|---|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
cpu_group = None
if (os.getenv('MDETR_CPU_REDUCE') == '1'):
cpu_group = _get_global_gloo_group()
buffer = io.BytesIO()
torch.save(data, buffer)
data_view = buffer.getbuffer()
device = ('cuda' if (cpu_group is None) else 'cpu')
tensor = torch.ByteTensor(data_view).to(device)
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
if (cpu_group is None):
dist.all_gather(size_list, local_size)
else:
print('gathering on cpu')
dist.all_gather(size_list, local_size, group=cpu_group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
assert isinstance(local_size.item(), int)
local_size = int(local_size.item())
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if (local_size != max_size):
padding = torch.empty(size=((max_size - local_size),), dtype=torch.uint8, device=device)
tensor = torch.cat((tensor, padding), dim=0)
if (cpu_group is None):
dist.all_gather(tensor_list, tensor)
else:
dist.all_gather(tensor_list, tensor, group=cpu_group)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
tensor = torch.split(tensor, [size, (max_size - size)], dim=0)[0]
buffer = io.BytesIO(tensor.cpu().numpy())
obj = torch.load(buffer)
data_list.append(obj)
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def is_dist_avail_and_initialized():
'\n Returns:\n True if distributed training is enabled\n '
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
'\n Returns:\n The number of processes in the process group\n '
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
'\n Returns:\n The rank of the current process within the global process group.\n '
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def get_local_rank() -> int:
'\n Returns:\n The rank of the current process within the local (per-machine) process group.\n '
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
|
def get_local_size() -> int:
'\n Returns:\n The size of the per-machine process group,\n i.e. the number of processes per machine.\n '
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
|
def is_main_process():
'Return true if the current process is the main one'
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
'Utility function to save only from the main process'
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
'Initialize distributed training, if appropriate'
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(0, 7200))
dist.barrier()
setup_for_distributed((args.debug or (args.rank == 0)))
|
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (args.n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
|
def train(args, train_dataset, model, tokenizer, eval_dataset=None):
' Train the model '
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=args.num_workers)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if (args.resume_dir is not None):
print('Resume training from: ', args.resume_dir)
if (not args.resume_dir.endswith('--1')):
args.model_name_or_path = args.resume_dir
print('Load Model Weight')
model.load_state_dict(torch.load((args.resume_dir + '/pytorch_model.bin'), map_location='cpu'))
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
num_warmup_steps = int((t_total * args.warmup_steps))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
print('Loading optimizer and scheduler from checkpoints', args.model_name_or_path)
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt'), map_location='cpu'))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt'), map_location='cpu'))
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
print('***** Running training *****')
print(' Num examples = %d', len(train_dataset))
print(' Num Epochs = %d', args.num_train_epochs)
print(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
print(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
print(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
print(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
epochs_trained = int(args.model_name_or_path.split('-')[(- 1)].split('/')[0])
epochs_trained += 1
global_step = ((epochs_trained * len(train_dataloader)) * args.gradient_accumulation_steps)
print(' Continuing training from checkpoint, will skip to saved global_step')
print(' Continuing training starting from epoch %d', epochs_trained)
print(' Continuing training from global step %d', global_step)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
train_meter = TrainingMeter()
epoch_num = epochs_trained
for _ in train_iterator:
epoch_iterator = train_dataloader
for (step, batch) in enumerate(tqdm(epoch_iterator)):
(batch, examples) = (batch[:(- 1)], batch[(- 1)])
if args.skip_training:
break
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if ((args.model_type != 'distilbert') and (args.model_type != 't5')):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
with torch.cuda.amp.autocast(enabled=args.use_autocast):
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
logits = outputs[1]
labels = batch[3]
acc = ((logits.argmax((- 1)) == labels).sum().float() / labels.view((- 1)).size(0))
train_meter.update({'loss': loss.item(), 'acc': acc.item()})
tr_loss += loss.item()
if ((((step + 1) % args.gradient_accumulation_steps) == 0) or ((len(epoch_iterator) <= args.gradient_accumulation_steps) and ((step + 1) == len(epoch_iterator)))):
if ((not args.fp16) and (not args.use_autocast)):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = ((tr_loss - logging_loss) / args.logging_steps)
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{'step': global_step}, **{'step_per_epoch': (len(train_dataloader) // args.gradient_accumulation_steps)}}))
train_meter.report()
train_meter.clean()
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
print('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank <= 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch_num))
print('Saving model checkpoint to ', output_dir)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
evaluate(args, model, tokenizer, eval_dataset=eval_dataset)
epoch_num += 1
return (global_step, (tr_loss / global_step))
|
def evaluate(args, model, tokenizer, prefix='', eval_dataset=None):
results = {}
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, collate_fn=eval_dataset.collate_fn, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
print('***** Running evaluation {} *****'.format(prefix))
print(' Num examples = %d', len(eval_dataset))
print(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
output_strings = []
label_strings = []
results_by_reasoning_depth = defaultdict(int)
counter_by_reasoning_depth = defaultdict(int)
def nested_defaultdict():
return defaultdict(int)
label_distribution_by_reasoning_depth = defaultdict(nested_defaultdict)
correct_or_not_all = defaultdict(list)
correct_counter = 0
total_counter = 0
for (_, batch) in enumerate(tqdm(eval_dataloader, desc='Evaluating')):
model.eval()
(batch, examples) = (batch[:(- 1)], batch[(- 1)])
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if ((args.model_type != 'distilbert') and (args.model_type != 't5')):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
if (args.model_type == 't5'):
if hasattr(model, 'module'):
_module = model.module
else:
_module = model
output_sequences = _module.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=False)
outputs = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
output_strings.extend(outputs)
label_strings.extend(tokenizer.batch_decode(inputs['labels'], skip_special_tokens=True))
nb_eval_steps += 1
else:
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
if (args.model_type == 't5'):
correct_or_not = [(output_strings[i] == label_strings[i]) for i in range(len(output_strings))]
correct_counter += sum(correct_or_not)
total_counter += len(correct_or_not)
if args.report_example_length:
correct_or_not = (logits.argmax((- 1)) == inputs['labels'].detach()).cpu().tolist()
for index in range(len(examples)):
results_by_reasoning_depth[examples[index]['depth']] += correct_or_not[index]
counter_by_reasoning_depth[examples[index]['depth']] += 1
label_distribution_by_reasoning_depth[examples[index]['depth']][examples[index]['label']] += 1
for index in range(len(examples)):
correct_or_not_all[examples[index]['example_index']].append(correct_or_not[index])
if args.report_example_length:
print()
keys = list(results_by_reasoning_depth.keys())
keys.sort()
for key in keys:
if (args.local_rank <= 0):
print(' Depth {}: {}'.format(key, (results_by_reasoning_depth[key] / counter_by_reasoning_depth[key])))
print(' Label_distribution {} : {}'.format(key, label_distribution_by_reasoning_depth[key]))
if ('t5' in args.model_name_or_path):
result = {'acc': (correct_counter / total_counter)}
results.update(result)
else:
eval_loss = (eval_loss / nb_eval_steps)
preds = np.argmax(preds, axis=1)
result = {'acc': (out_label_ids == preds).mean()}
results.update(result)
return results
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True)
parser.add_argument('--model_name_or_path', default=None, type=str, required=True)
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_visualization', action='store_true')
parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-06, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=float, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--from_scratch', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--nopooler', action='store_true', help='Do not load the pooler')
parser.add_argument('--seed', type=int, default=9595, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--custom_weight', type=str, default=None)
parser.add_argument('--custom_config', type=str, default=None)
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
parser.add_argument('--max_length', type=int, default=128)
parser.add_argument('--file_root', type=str, default=None)
parser.add_argument('--file_path', type=str)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--start_gradual_index', type=int, default=1)
parser.add_argument('--load_bert_weight', type=str, default=None)
parser.add_argument('--use_gradual_sampler', action='store_true')
parser.add_argument('--limit_to_negative_examples', action='store_true')
parser.add_argument('--limit_to_positive_examples', action='store_true')
parser.add_argument('--skip_training', action='store_true')
parser.add_argument('--further_split', action='store_true')
parser.add_argument('--further_further_split', action='store_true')
parser.add_argument('--report_example_length', action='store_true')
parser.add_argument('--ignore_fact', action='store_true')
parser.add_argument('--ignore_both', action='store_true')
parser.add_argument('--ignore_query', action='store_true')
parser.add_argument('--change_positional_embedding_after_loading', action='store_true')
parser.add_argument('--change_positional_embedding_before_loading', action='store_true')
parser.add_argument('--shorten_input', action='store_true')
parser.add_argument('--shrink_ratio', default=1, type=int)
parser.add_argument('--use_autocast', action='store_true')
parser.add_argument('--max_depth_during_train', default=1000, type=int)
parser.add_argument('--train_file_path', default=None)
parser.add_argument('--val_file_path', default=None)
parser.add_argument('--group_by_which_depth', default='depth')
parser.add_argument('--keep_only_negative', action='store_true')
parser.add_argument('--limit_report_depth', default=(- 1), type=int)
parser.add_argument('--limit_report_max_depth', default=100, type=int)
parser.add_argument('--skip_long_examples', action='store_true')
parser.add_argument('--limit_example_num', default=(- 1), type=int)
parser.add_argument('--resume_dir', default=None)
args = parser.parse_args()
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
setup_for_distributed((args.local_rank <= 0))
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))), args.fp16)
set_seed(args)
num_labels = 2
if (args.local_rank not in [(- 1), 0]):
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), num_labels=num_labels, cache_dir=(args.cache_dir if args.cache_dir else None))
tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
if ('bert' in args.model_name_or_path):
model = BertForSequenceClassification.from_pretrained(args.model_name_or_path, config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
if ('t5' in args.model_name_or_path):
from transformers import T5Tokenizer, T5ForConditionalGeneration
model = T5ForConditionalGeneration.from_pretrained(args.model_name_or_path)
else:
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
if args.change_positional_embedding_before_loading:
expand_position_embeddings(model, args.max_length, args.model_name_or_path)
if (args.custom_weight is not None):
model.apply(model._init_weights)
custom_state_dict = torch.load(args.custom_weight, map_location='cpu')
for key in list(custom_state_dict.keys()):
custom_state_dict[key.replace('module.', '')] = custom_state_dict[key]
load_state_dict_flexible(model, custom_state_dict)
print('\n\nLoaded {}'.format(args.custom_weight))
if (args.load_bert_weight is not None):
original_bert_weight = torch.load(args.load_bert_weight, map_location='cpu')
old_keys = []
new_keys = []
for key in original_bert_weight.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
original_bert_weight[new_key] = original_bert_weight.pop(old_key)
load_state_dict_flexible(model, original_bert_weight)
if args.change_positional_embedding_after_loading:
expand_position_embeddings(model, args.max_length, args.model_name_or_path)
if args.nopooler:
model.bert.pooler.apply(model._init_weights)
if args.from_scratch:
print('\n\nReinitializing parameters\n\n')
model.bert.apply(model._init_weights)
if (args.local_rank == 0):
torch.distributed.barrier()
model.to(args.device)
print('Training/evaluation parameters %s', args)
if args.do_train:
train_dataset = LogicDataset.initialze_from_file(args.train_file_path, args)
train_dataset.report_length()
val_dataset = LogicDataset.initialze_from_file(args.val_file_path, args)
(global_step, tr_loss) = train(args, train_dataset, model, tokenizer, val_dataset)
print(' global_step = %s, average loss = %s', global_step, tr_loss)
if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))):
if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(args.output_dir)
print('Saving model checkpoint to %s', args.output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
print('Enterring evaluation')
if (args.do_eval and (args.local_rank in [(- 1), 0])):
model.eval()
if (',' in args.val_file_path):
val_files = args.val_file_path.split(',')
else:
val_files = [args.val_file_path]
all_results = {}
all_kinds_of_results = []
results_string_final = ''
for val_file in val_files:
results_string = {}
results = []
print('\n\n', val_file)
val_dataset = LogicDataset.initialze_from_file(val_file, args)
val_dataset.report_allkinds_of_stats()
datasets = LogicDataset.initialize_from_file_by_depth(val_file, args)
depths = list(datasets.keys())
depths.sort()
total_example = sum([len(datasets[i]) for i in datasets])
for depth in depths:
print('\n\n')
print('Evaluating examples of depth ', depth)
result = evaluate(args, model, tokenizer, eval_dataset=datasets[depth])
results_string[depth] = 'Acc: {} ; Percentage {}'.format(result['acc'], (len(datasets[depth]) / total_example))
all_kinds_of_results.append(result['acc'])
if ((depth >= args.limit_report_depth) and (depth <= args.limit_report_max_depth)):
results.append(result['acc'])
pprint.pprint(results_string)
results_string_final += (val_file + '\n\n')
results_string_final += pprint.pformat(results_string)
results_string_final += '\n\n\n'
all_kinds_of_results.insert(0, (sum(all_kinds_of_results) / len(all_kinds_of_results)))
all_results[val_file] = '{:.3f}'.format(((sum(results) / len(results)) * 100))
all_kinds_of_results.insert(1, (sum(results) / len(results)))
print('Final Reporting')
for key in sorted(list(all_results.keys())):
print(key)
print()
for key in sorted(list(all_results.keys())):
print(all_results[key])
pprint.pprint(all_results)
with open('eval_result.txt', 'a+') as f:
f.write(args.custom_weight)
f.write('\n\n')
f.write(results_string_final)
f.write('\n\n\n\n\n')
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
class TrainingMeter():
def __init__(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
def update(self, loss_dict):
for (key, item) in loss_dict.items():
self.counter_dict[key] += 1
self.true_dict[key] += item
def report(self):
keys = list(self.counter_dict.keys())
keys.sort()
for key in keys:
print(' {} : {:.7}'.format(key, (self.true_dict[key] / self.counter_dict[key])))
def clean(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
|
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print('Full loading failed!! Try partial loading!!')
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
print(('Skipped: ' + name))
continue
if isinstance(param, torch.nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
print(('Successfully loaded: ' + name))
except:
print(('Part load failed: ' + name))
print('\n\n')
|
def expand_position_embeddings(model, length=None, model_type='bert'):
if ('bert' in model_type):
embedding_model = model.bert.embeddings
original_embedding = embedding_model.position_embeddings.weight.data
new_embedding = nn.Embedding((length - 500), (1024 if ('large' in model_type) else 768))
_init_weights(new_embedding, model.config)
new_embedding = torch.cat((original_embedding, new_embedding.weight.data), dim=0)
embedding_model.position_embeddings.weight = torch.nn.Parameter(new_embedding)
embedding_model.register_buffer('position_ids', torch.arange(3000).expand((1, (- 1))))
|
def _init_weights(module, config):
' Initialize the weights '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
def init():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--input_file', default='input.json', type=str)
arg_parser.add_argument('--output_file', default='output.json', type=str)
arg_parser.add_argument('--min_rule_num', default=0, type=int)
arg_parser.add_argument('--max_rule_num', default=80, type=int)
args = arg_parser.parse_args()
return args
|
def stats(examples):
label_sum = 0.0
depth_sum = 0.0
backward_depth_sum = 0.0
max_tree_depth_sum = 0.0
tree_depth_sum = 0.0
example_num = len(examples)
if (example_num == 0):
return
for example in examples:
label_sum += example['label']
depth_sum += example['depth']
backward_depth_sum += example['backward_depth']
max_tree_depth_sum += example['max_tree_depth']
tree_depth_sum += example['tree_depth']
print('# of examples:', example_num)
print('percentage of positive example:', (label_sum / example_num))
print('avg depth:', (depth_sum / example_num))
print('avg backward_depth:', (backward_depth_sum / example_num))
print('avg max_tree_depth:', (max_tree_depth_sum / example_num))
print('avg tree_depth:', (tree_depth_sum / example_num))
|
def main():
args = init()
with open(args.input_file, 'r') as fin:
examples = json.load(fin)
random.shuffle(examples)
print('loaded')
balanced_examples = {}
for key in range(0, 121):
balanced_examples[key] = [[], []]
threshold = 1.0
for example in examples:
rule_num = len(example['rules'])
balanced_examples[rule_num][example['label']].append(example)
for key in balanced_examples:
if ((args.min_rule_num <= key) and (key <= args.max_rule_num)):
l0 = len(balanced_examples[key][0])
l1 = len(balanced_examples[key][1])
threshold = min(threshold, ((min(l0, l1) * 2.0) / (l0 + l1)))
balanced_examples_ = []
for key in balanced_examples:
l0 = len(balanced_examples[key][0])
l1 = len(balanced_examples[key][1])
l = math.ceil((((l0 + l1) * threshold) / 2.0))
balanced_examples_.extend(balanced_examples[key][0][:l])
balanced_examples_.extend(balanced_examples[key][1][:l])
if (l0 < l):
balanced_examples_.extend(balanced_examples[key][1][l:((l + l) - l0)])
if (l1 < l):
balanced_examples_.extend(balanced_examples[key][0][l:((l + l) - l1)])
balanced_examples = balanced_examples_
print(f'threshold: {threshold}')
print(f'# examples after balance: {len(balanced_examples)}')
with open(args.output_file, 'w') as fout:
json.dump(balanced_examples, fout, indent=2)
|
def init():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vocab_file', default='vocab.txt', type=str)
arg_parser.add_argument('--output_file', default='prop_examples.txt', type=str)
arg_parser.add_argument('--example_num', default=1000, type=int)
arg_parser.add_argument('--min_pred_num', default=5, type=int)
arg_parser.add_argument('--max_pred_num', default=30, type=int)
arg_parser.add_argument('--balance_by_depth', action='store_true')
arg_parser.add_argument('--max_depth', default=6, type=int)
arg_parser.add_argument('--algo', default='RP', type=str)
args = arg_parser.parse_args()
return args
|
def read_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as fin:
vocab = [line.strip() for line in fin.readlines()]
print('vocabulary size: ', len(vocab))
return vocab
|
def sample_one_rule(preds):
head_num = random.randint(1, 3)
lits = random.sample(preds, min((head_num + 1), len(preds)))
random.shuffle(lits)
return (lits[:(- 1)], lits[(- 1)])
|
def sample_rule_priority(preds):
pred_num = len(preds)
rule_num = random.randint(0, (4 * pred_num))
fact_num = random.randint(0, pred_num)
cache = set()
rules = []
for _ in range(0, rule_num):
rule = None
while True:
rule = sample_one_rule(preds)
rule_hash = ((' '.join(sorted(rule[0])) + ' ') + rule[1])
if (rule_hash not in cache):
cache.add(rule_hash)
break
rules.append(rule)
facts = random.sample(preds, fact_num)
query = random.sample(preds, 1)[0]
return (rules, facts, query)
|
def sample_label_priority(preds):
preds_ = preds[:]
random.shuffle(preds_)
pred_num = len(preds)
graph_depth = random.randint(1, (pred_num // 2))
width = (pred_num // graph_depth)
preds_0 = preds_[:(pred_num % graph_depth)]
preds_ = preds_[(pred_num % graph_depth):]
rules = []
levels = []
prev_level = [[x, random.randint(0, 1)] for x in preds_[:width]]
if (graph_depth > 1):
(prev_level[0][1], prev_level[1][1]) = (0, 1)
else:
(prev_level[0][1], prev_level[1][1], prev_level[2][1], prev_level[3][1]) = (0, 1, 0, 1)
preds_ = preds_[width:]
levels.append(prev_level)
for d in range(0, (graph_depth - 1)):
level = [[x, random.randint(0, 1)] for x in preds_[:width]]
preds_ = preds_[width:]
if (len(preds_0) != 0):
level.append((preds_0[0], random.randint(0, 1)))
preds_0 = preds_0[1:]
(level[0][1], level[1][1]) = (0, 1)
for node in level:
(lit, label) = (node[0], node[1])
head_cand = [x[0] for x in prev_level if (x[1] == label)]
head_num = random.randint(1, min(3, len(head_cand)))
head = random.sample(head_cand, head_num)
rules.append((head, lit))
levels.append(level)
prev_level = level
rule_num = random.randint((0 * pred_num), (3 * pred_num))
nodes = [x for y in levels for x in y]
neg_nodes = [x for x in nodes if (x[1] == 0)]
rule_cnt = 0
while (rule_cnt < rule_num):
tail_node = random.sample(nodes, 1)[0]
tail = tail_node[0]
head_cand = [x for x in nodes if (x[0] != tail)]
while True:
head_num = random.randint(1, min(3, len(head_cand)))
head_nodes = None
head_nodes = random.sample(head_cand, head_num)
if (not (all([(x[1] == 1) for x in head_nodes]) and (tail_node[1] == 0))):
break
head = [x[0] for x in head_nodes]
rules.append((head, tail))
rule_cnt += 1
if all(((x[1] == 1) for x in head_nodes)):
neg_tail = random.sample(neg_nodes, 1)[0][0]
neg_head_cand = [x for x in neg_nodes if (x[0] != neg_tail)]
neg_head_num = random.randint(1, min(3, len(neg_head_cand)))
neg_head_nodes = random.sample(neg_head_cand, neg_head_num)
neg_head = [x[0] for x in neg_head_nodes]
rules.append((neg_head, neg_tail))
rule_cnt += 1
facts = [x[0] for x in levels[0] if (x[1] == 1)]
query = random.sample([x[0] for x in nodes], 1)[0]
return (rules, facts, query)
|
def sample_lp_star(preds):
preds_ = preds[:]
pred_num = len(preds)
graph_depth = random.randint(2, (pred_num // 2))
width = (pred_num // graph_depth)
preds_0 = preds_[:(pred_num % graph_depth)]
preds_ = preds_[(pred_num % graph_depth):]
rules = []
levels = []
prev_level = [[x, random.randint(0, 1)] for x in preds_[:width]]
(prev_level[0][1], prev_level[1][1]) = (0, 1)
preds_ = preds_[width:]
levels.append(prev_level)
for d in range(0, (graph_depth - 1)):
level = [[x, random.randint(0, 1)] for x in preds_[:width]]
if (preds_0 != []):
level.append((preds_0[0], random.randint(0, 1)))
preds_0 = preds_0[1:]
(level[0][1], level[1][1]) = (0, 1)
preds_ = preds_[width:]
for node in level:
(lit, label) = (node[0], node[1])
head_nodes_cand = prev_level
if (label == 1):
head_nodes_cand = [x for x in prev_level if (x[1] == 1)]
head_num = random.randint(1, min(3, len(head_nodes_cand)))
while True:
head_nodes = random.sample(head_nodes_cand, head_num)
if (not (all([x[1] for x in head_nodes]) and (label == 0))):
break
head = [x[0] for x in head_nodes]
rules.append((head, lit))
levels.append(level)
prev_level = level
rule_num = random.randint((0 * pred_num), (3 * pred_num))
nodes = [x for y in levels for x in y]
for _ in range(0, rule_num):
tail_d = random.randint(0, (len(levels) - 2))
tail_level = levels[tail_d]
tail_node = random.sample([x for x in tail_level if (x[1] == 1)], 1)[0]
tail = tail_node[0]
head_cand = [x for y in levels[tail_d:] for x in y if (x[0] != tail)]
head_num = random.randint(1, min(3, len(head_cand)))
while True:
head_nodes = random.sample(head_cand, head_num)
if (not all([x[1] for x in head_nodes])):
break
head_nodes = random.sample(head_cand, head_num)
head = [x[0] for x in head_nodes]
rules.append((head, tail))
facts = [x[0] for x in levels[0] if (x[1] == 1)]
query = random.sample([x[0] for x in nodes], 1)[0]
return (rules, facts, query)
|
def forward_chain(rules, facts):
res = {}
for fact in facts:
res[fact] = 0
depth = 1
prev_len = 0
while (len(res) > prev_len):
new_facts = []
for rule in rules:
(head, tail) = rule
if all([(lit in res) for lit in head]):
new_facts.append(tail)
prev_len = len(res)
for fact in new_facts:
if (fact not in res):
res[fact] = depth
depth += 1
return res
|
def backward_chain_(u, depth, rules, facts, max_depth, ances):
INF = 100000000
if (u in facts):
return INF
if ((u in ances) or (depth == max_depth)):
return depth
res = depth
for rule in [x for x in rules if (x[1] == u)]:
(head, _) = rule
tmp = INF
for lit in head:
ances.add(u)
tmp = min(tmp, backward_chain_(lit, (depth + 1), rules, facts, max_depth, ances))
ances.remove(u)
res = max(res, tmp)
return res
|
def backward_chain(query, rules, facts, max_depth):
return backward_chain_(query, 0, rules, facts, max_depth, set())
|
def process_example(example, max_depth):
[random.shuffle(rule[0]) for rule in example['rules']]
random.shuffle(example['rules'])
random.shuffle(example['facts'])
res = forward_chain(example['rules'], example['facts'])
example['label'] = (1 if (example['query'] in res) else 0)
if (example['label'] == 0):
depth = backward_chain(example['query'], example['rules'], example['facts'], (max_depth + 1))
else:
depth = res[example['query']]
example['depth'] = depth
|
def sample_one_example(vocab, min_pred_num, max_pred_num, max_depth, algo):
pred_num = random.randint(min_pred_num, max_pred_num)
preds = random.sample(vocab, pred_num)
if (algo == 'RP'):
(rules, facts, query) = sample_rule_priority(preds)
if (algo == 'LP'):
(rules, facts, query) = sample_label_priority(preds)
if (algo == 'LP_STAR'):
(rules, facts, query) = sample_lp_star(preds)
if (query is None):
return None
example = {'preds': preds, 'rules': rules, 'facts': facts, 'query': query}
process_example(example, max_depth)
return example
|
def sample_examples(example_num, vocab, min_pred_num, max_pred_num, max_depth, algo):
examples = []
for _ in tqdm(range(0, example_num)):
example = None
while (example is None):
example = sample_one_example(vocab, min_pred_num, max_pred_num, max_depth, algo)
examples.append(example)
return examples
|
def stats(examples):
label_sum = 0.0
depth_sum = 0.0
example_num = len(examples)
if (example_num == 0):
return
for example in examples:
label_sum += example['label']
depth_sum += example['depth']
print('# of examples:', example_num)
print('percentage of positive example:', (label_sum / example_num))
print('avg depth:', (depth_sum / example_num))
|
def write_examples(examples, output_file):
random.shuffle(examples)
with open(output_file, 'w') as fout:
json.dump(examples, fout)
|
def main():
args = init()
vocab = read_vocab(args.vocab_file)
if args.balance_by_depth:
examples = {}
example_num = args.example_num
keys = [x for x in range(0, (args.max_depth + 1))]
for k in keys:
examples[k] = []
while True:
examples_ = sample_examples(1000, vocab, args.min_pred_num, args.max_pred_num, args.max_depth, args.algo)
for example in examples_:
if (example['depth'] > args.max_depth):
continue
key = example['depth']
if (len(examples[key]) < args.example_num):
examples[key].append(example)
if all([(len(examples[k]) == args.example_num) for k in keys]):
break
examples = [x for k in keys for x in examples[k]]
else:
examples = sample_examples(args.example_num, vocab, args.min_pred_num, args.max_pred_num, args.max_depth, args.algo)
stats(examples)
write_examples(examples, args.output_file)
|
class CurveBall(Optimizer):
'CurveBall optimizer'
def __init__(self, params, lr=None, momentum=None, auto_lambda=True, lambd=10.0, lambda_factor=0.999, lambda_low=0.5, lambda_high=1.5, lambda_interval=5):
defaults = dict(lr=lr, momentum=momentum, auto_lambda=auto_lambda, lambd=lambd, lambda_factor=lambda_factor, lambda_low=lambda_low, lambda_high=lambda_high, lambda_interval=lambda_interval)
super().__init__(params, defaults)
def step(self, model_fn, loss_fn):
'Performs a single optimization step'
if (len(self.param_groups) != 1):
raise ValueError('Since the hyper-parameters are set automatically, only one parameter group (with the same hyper-parameters) is supported.')
group = self.param_groups[0]
parameters = group['params']
state = self.state
for p in parameters:
if (p not in state):
state[p] = {'z': t.zeros_like(p)}
zs = [state[p]['z'] for p in parameters]
global_state = state[parameters[0]]
global_state.setdefault('count', 0)
lambd = global_state.get('lambd', group['lambd'])
predictions = model_fn()
predictions_d = predictions.detach().requires_grad_(True)
loss = loss_fn(predictions_d)
(Jz,) = fmad(predictions, parameters, zs)
(Jl,) = grad(loss, predictions_d, create_graph=True)
Jl_d = Jl.detach()
(Hl_Jz,) = grad(Jl, predictions_d, grad_outputs=Jz, retain_graph=True)
delta_zs = grad(predictions, parameters, (Hl_Jz + Jl_d), retain_graph=True)
for (z, dz) in zip(zs, delta_zs):
dz.data.add_(lambd, z)
lr = group['lr']
momentum = group['momentum']
if ((momentum < 0) or (lr < 0) or group['auto_lambda']):
(Jdeltaz,) = fmad(predictions, parameters, delta_zs)
(Hl_Jdeltaz,) = grad(Jl, predictions_d, grad_outputs=Jdeltaz)
z_vec = t.cat([z.flatten() for z in zs])
dz_vec = t.cat([dz.flatten() for dz in delta_zs])
a11 = ((lambd * (dz_vec * dz_vec).sum()) + (Jdeltaz * Hl_Jdeltaz).sum())
a12 = ((lambd * (dz_vec * z_vec).sum()) + (Jz * Hl_Jdeltaz).sum())
a22 = ((lambd * (z_vec * z_vec).sum()) + (Jz * Hl_Jz).sum())
b1 = (Jl_d * Jdeltaz).sum()
b2 = (Jl_d * Jz).sum()
A = t.tensor([[a11.item(), a12.item()], [a12.item(), a22.item()]])
b = t.tensor([[b1.item()], [b2.item()]])
auto_params = (A.pinverse() @ b)
lr = auto_params[0].item()
momentum = (- auto_params[1].item())
for (p, z, dz) in zip(parameters, zs, delta_zs):
z.data.mul_(momentum).add_((- lr), dz)
p.data.add_(z)
if group['auto_lambda']:
if ((global_state['count'] % group['lambda_interval']) == 0):
with t.no_grad():
new_loss = loss_fn(model_fn())
quadratic_change = ((- 0.5) * (auto_params * b).sum())
ratio = ((new_loss - loss) / quadratic_change)
factor = (group['lambda_factor'] ** group['lambda_interval'])
if (ratio < group['lambda_low']):
lambd /= factor
if (ratio > group['lambda_high']):
lambd *= factor
global_state['lambd'] = lambd
global_state['count'] += 1
return (loss, predictions)
|
def fmad(ys, xs, dxs):
'Forward-mode automatic differentiation.'
v = t.zeros_like(ys, requires_grad=True)
g = grad(ys, xs, grad_outputs=v, create_graph=True)
return grad(g, v, grad_outputs=dxs)
|
def train(args, net, device, train_loader, optimizer, epoch, logger):
net.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
start = time()
(data, target) = (data.to(device), target.to(device))
model_fn = (lambda : net(data))
loss_fn = (lambda pred: F.cross_entropy(pred, target))
if isinstance(optimizer, CurveBall):
(loss, predictions) = optimizer.step(model_fn, loss_fn)
else:
optimizer.zero_grad()
predictions = model_fn()
loss = loss_fn(predictions)
loss.backward()
optimizer.step()
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['train.loss'] > 3):
logger.update_average({'train.time': (time() - start)})
logger.print(line_prefix=('ep %i ' % epoch), prefix='train')
else:
print(stats)
|
def test(args, net, device, test_loader, logger):
net.eval()
with torch.no_grad():
for (data, target) in test_loader:
start = time()
(data, target) = (data.to(device), target.to(device))
predictions = net(data)
loss = F.cross_entropy(predictions, target)
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'val.loss': loss.item(), 'val.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['val.loss'] > 3):
logger.update_average({'val.time': (time() - start)})
logger.print(prefix='val')
else:
print(stats)
|
def main():
all_models = [name for name in dir(models) if callable(getattr(models, name))]
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('experiment', nargs='?', default='test')
parser.add_argument('-model', choices=all_models, default='BasicNetBN')
parser.add_argument('-optimizer', choices=['sgd', 'adam', 'curveball'], default='curveball')
parser.add_argument('-lr', default=(- 1), type=float, help='learning rate')
parser.add_argument('-momentum', type=float, default=(- 1), metavar='M')
parser.add_argument('-lambda', type=float, default=1.0)
parser.add_argument('--no-auto-lambda', action='store_true', default=False, help='disables automatic lambda estimation')
parser.add_argument('-batch-size', default=128, type=int)
parser.add_argument('-epochs', default=200, type=int)
parser.add_argument('-save-interval', default=10, type=int)
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('-outputdir', default='data/cifar-experiments', type=str)
parser.add_argument('-datadir', default='data/cifar', type=str)
parser.add_argument('-device', default='cuda', type=str)
parser.add_argument('--parallel', action='store_true', default=False)
args = parser.parse_args()
args.outputdir += ((((('/' + args.model) + '/') + args.optimizer) + '/') + args.experiment)
if os.path.isdir(args.outputdir):
input('Directory already exists. Press Enter to overwrite or Ctrl+C to cancel.')
if (not torch.cuda.is_available()):
args.device = 'cpu'
best_acc = 0
start_epoch = 0
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=2, fill=(128, 128, 128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
train_set = torchvision.datasets.CIFAR10(root=args.datadir, train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, num_workers=2, shuffle=True)
test_set = torchvision.datasets.CIFAR10(root=args.datadir, train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, num_workers=2, shuffle=False)
net = getattr(models, args.model)()
net = net.to(args.device)
if ((args.device != 'cpu') and args.parallel):
net = torch.nn.DataParallel(net)
torch.backends.cudnn.benchmark = True
if args.resume:
print('Resuming from checkpoint..')
assert os.path.isdir(args.outputdir), 'Error: no checkpoint directory found!'
checkpoint = torch.load((args.outputdir + '/last.t7'))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
if (args.optimizer == 'sgd'):
if (args.lr < 0):
args.lr = 0.1
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9)
elif (args.optimizer == 'adam'):
if (args.lr < 0):
args.lr = 0.001
optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif (args.optimizer == 'curveball'):
lambd = getattr(args, 'lambda')
optimizer = CurveBall(net.parameters(), lr=args.lr, momentum=args.momentum, lambd=lambd, auto_lambda=(not args.no_auto_lambda))
logger = None
if Logger:
logger = Logger(args.outputdir, meta=args, resume=args.resume)
for epoch in range(start_epoch, args.epochs):
train(args, net, args.device, train_loader, optimizer, epoch, logger)
test(args, net, args.device, test_loader, logger)
if logger:
acc = logger.average()['val.accuracy']
logger.append()
if ((epoch % args.save_interval) == 0):
print('Saving..')
state = {'net': net.state_dict(), 'optimizer': optimizer.state_dict(), 'acc': acc, 'epoch': epoch}
if (not os.path.isdir(args.outputdir)):
os.mkdir(args.outputdir)
torch.save(state, (args.outputdir + '/last.t7'))
if (logger and (acc > best_acc)):
shutil.copyfile((args.outputdir + '/last.t7'), (args.outputdir + '/best.t7'))
best_acc = acc
|
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), (- 1))
|
def onehot(target, like):
'Transforms numeric labels into one-hot regression targets.'
out = torch.zeros_like(like)
out.scatter_(1, target.unsqueeze(1), 1.0)
return out
|
def train(args, model, device, train_loader, optimizer, epoch, logger):
model.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
start = time()
(data, target) = (data.to(device), target.to(device))
model_fn = (lambda : model(data))
loss_fn = (lambda pred: F.cross_entropy(pred, target))
if isinstance(optimizer, CurveBall):
(loss, predictions) = optimizer.step(model_fn, loss_fn)
else:
optimizer.zero_grad()
predictions = model_fn()
loss = loss_fn(predictions)
loss.backward()
optimizer.step()
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['train.loss'] > 3):
logger.update_average({'train.time': (time() - start)})
logger.print(line_prefix=('ep %i ' % epoch), prefix='train')
else:
print(stats)
|
def test(args, model, device, test_loader, logger):
model.eval()
with torch.no_grad():
for (data, target) in test_loader:
start = time()
(data, target) = (data.to(device), target.to(device))
predictions = model(data)
loss = F.cross_entropy(predictions, target)
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'val.loss': loss.item(), 'val.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['val.loss'] > 3):
logger.update_average({'val.time': (time() - start)})
else:
print(stats)
if logger:
logger.print(prefix='val')
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('experiment', nargs='?', default='test')
parser.add_argument('-batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('-test-batch-size', type=int, default=1000, help='input batch size for testing (default: 1000)')
parser.add_argument('-epochs', type=int, default=10, help='number of epochs to train (default: 10)')
parser.add_argument('-optimizer', choices=['sgd', 'adam', 'curveball'], default='curveball', help='optimizer (sgd, adam, or curveball)')
parser.add_argument('-lr', type=float, default=(- 1), metavar='LR', help='learning rate (default: 0.01 for SGD, 0.001 for Adam, 1 for CurveBall)')
parser.add_argument('-momentum', type=float, default=(- 1), metavar='M', help='momentum (default: 0.5)')
parser.add_argument('-lambda', type=float, default=1.0, help='lambda')
parser.add_argument('--no-auto-lambda', action='store_true', default=False, help='disables automatic lambda estimation')
parser.add_argument('--no-batch-norm', action='store_true', default=False)
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('-seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('-datadir', type=str, default='data/mnist', help='MNIST data directory')
parser.add_argument('-outputdir', type=str, default='data/mnist-experiments', help='output directory')
args = parser.parse_args()
use_cuda = ((not args.no_cuda) and torch.cuda.is_available())
args.outputdir += ((('/' + args.optimizer) + '/') + args.experiment)
if os.path.isdir(args.outputdir):
input('Directory already exists. Press Enter to overwrite or Ctrl+C to cancel.')
torch.manual_seed(args.seed)
device = torch.device(('cuda' if use_cuda else 'cpu'))
kwargs = ({'num_workers': 1, 'pin_memory': True} if use_cuda else {})
train_loader = torch.utils.data.DataLoader(datasets.MNIST(args.datadir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(args.datadir, train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=True, **kwargs)
layers = [nn.Conv2d(1, 10, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Conv2d(10, 20, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Dropout2d(), Flatten(), nn.Linear(320, 50), nn.ReLU(), nn.Dropout(), nn.Linear(50, 10)]
if (not args.no_batch_norm):
insert_bnorm(layers)
model = nn.Sequential(*layers)
model.to(device)
if (args.optimizer == 'sgd'):
if (args.lr < 0):
args.lr = 0.01
if (args.momentum < 0):
args.momentum = 0.5
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
elif (args.optimizer == 'adam'):
if (args.lr < 0):
args.lr = 0.001
optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif (args.optimizer == 'curveball'):
lambd = getattr(args, 'lambda')
optimizer = CurveBall(model.parameters(), lr=args.lr, momentum=args.momentum, lambd=lambd, auto_lambda=(not args.no_auto_lambda))
with Logger(args.outputdir, meta=args) as logger:
for epoch in range(1, (args.epochs + 1)):
train(args, model, device, train_loader, optimizer, epoch, logger)
test(args, model, device, test_loader, logger)
logger.append()
|
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), (- 1))
|
def BasicNetBN():
return BasicNet(batch_norm=True)
|
def BasicNet(batch_norm=False):
'Basic network for CIFAR.'
layers = [nn.Conv2d(3, 32, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), nn.Conv2d(32, 32, kernel_size=5, padding=2), nn.ReLU(), nn.AvgPool2d(kernel_size=3, stride=2, padding=1), nn.Conv2d(32, 64, kernel_size=5, padding=2), nn.ReLU(), nn.AvgPool2d(kernel_size=3, stride=2, padding=1), Flatten(), nn.Linear(((4 * 4) * 64), 64), nn.ReLU(), nn.Linear(64, 10)]
if batch_norm:
insert_bnorm(layers, init_gain=True, eps=0.0001)
return nn.Sequential(*layers)
|
def insert_bnorm(layers, init_gain=False, eps=1e-05, ignore_last_layer=True):
'Inserts batch-norm layers after each convolution/linear layer in a list of layers.'
last = True
for (idx, layer) in reversed(list(enumerate(layers))):
if isinstance(layer, (nn.Conv2d, nn.Linear)):
if (ignore_last_layer and last):
last = False
else:
if isinstance(layer, nn.Conv2d):
bnorm = nn.BatchNorm2d(layer.out_channels, eps=eps)
elif isinstance(layer, nn.Linear):
bnorm = nn.BatchNorm1d(layer.out_features, eps=eps)
if init_gain:
bnorm.weight.data[:] = 1.0
layers.insert((idx + 1), bnorm)
return layers
|
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.conv2 = nn.Conv2d((4 * growth_rate), growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
|
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
|
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += (nblocks[3] * growth_rate)
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def DenseNet121():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
|
def DenseNet169():
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
|
def DenseNet201():
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
|
def DenseNet161():
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
|
def densenet_cifar():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
|
def test():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
|
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.ReLU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.ReLU(True), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.ReLU(True))
self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.ReLU(True), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True))
self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.ReLU(True))
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1)
|
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True))
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def test():
net = GoogLeNet()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
|
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
|
class Block(nn.Module):
'Depthwise conv + Pointwise conv'
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
|
class MobileNet(nn.Module):
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = (x if isinstance(x, int) else x[0])
stride = (1 if isinstance(x, int) else x[1])
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def test():
net = MobileNet()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
|
class Block(nn.Module):
'expand + depthwise + pointwise'
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = (expansion * in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if ((stride == 1) and (in_planes != out_planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = ((out + self.shortcut(x)) if (self.stride == 1) else out)
return out
|
class MobileNetV2(nn.Module):
cfg = [(1, 16, 1, 1), (6, 24, 2, 1), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for (expansion, out_planes, num_blocks, stride) in self.cfg:
strides = ([stride] + ([1] * (num_blocks - 1)))
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def test():
net = MobileNetV2()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
|
class SepConv(nn.Module):
'Separable Convolution.'
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding=((kernel_size - 1) // 2), bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
|
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if (stride == 2):
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if (self.stride == 2):
y2 = self.bn1(self.conv1(y2))
return F.relu((y1 + y2))
|
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if (stride == 2):
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
self.conv2 = nn.Conv2d((2 * out_planes), out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if (self.stride == 2):
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
b1 = F.relu((y1 + y2))
b2 = F.relu((y3 + y4))
y = torch.cat([b1, b2], 1)
return F.relu(self.bn2(self.conv2(y)))
|
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample((num_planes * 2))
self.layer3 = self._make_layer((num_planes * 2), num_cells=6)
self.layer4 = self._downsample((num_planes * 4))
self.layer5 = self._make_layer((num_planes * 4), num_cells=6)
self.linear = nn.Linear((num_planes * 4), 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), (- 1)))
return out
|
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
|
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
|
def test():
net = PNASNetB()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
|
class PreActBlock(nn.Module):
'Pre-activation version of the BasicBlock.'
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
|
class PreActBottleneck(nn.Module):
'Pre-activation version of the original Bottleneck module.'
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
|
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def PreActResNet18():
return PreActResNet(PreActBlock, [2, 2, 2, 2])
|
def PreActResNet34():
return PreActResNet(PreActBlock, [3, 4, 6, 3])
|
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3, 4, 6, 3])
|
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3, 4, 23, 3])
|
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3, 8, 36, 3])
|
def test():
net = PreActResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
|
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
|
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
|
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
|
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
|
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
class Block(nn.Module):
'Grouped convolution block.'
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = (cardinality * bottleneck_width)
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, (self.expansion * group_width), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * group_width))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * group_width))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * group_width), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * group_width)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.linear = nn.Linear(((cardinality * bottleneck_width) * 8), num_classes)
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=2, bottleneck_width=64)
|
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=4, bottleneck_width=64)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.