code stringlengths 17 6.64M |
|---|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
|
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (filename is not None):
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
def get_args(description='Video-Text as Game Players: Hierarchical Banzhaf Interaction for Cross-Modal Representation Learning'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--do_train', type=int, default=0, help='Whether to run training.')
parser.add_argument('--do_eval', type=int, default=0, help='Whether to run evaluation.')
parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.')
parser.add_argument('--anno_path', type=str, default='data/MSR-VTT/anns', help='annotation path')
parser.add_argument('--video_path', type=str, default='data/MSR-VTT/videos', help='video path')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers (default: 4)')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% of training.')
parser.add_argument('--weight_decay', type=float, default=0.2, help='weight decay')
parser.add_argument('--epochs', type=int, default=5, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=128, help='batch size eval')
parser.add_argument('--max_words', type=int, default=32, help='max text token number')
parser.add_argument('--max_frames', type=int, default=12, help='max key frames')
parser.add_argument('--video_framerate', type=int, default=1, help='framerate to sample video frame')
parser.add_argument('--device', default='cpu', type=str, help='cpu/cuda')
parser.add_argument('--world_size', default=1, type=int, help='distribted training')
parser.add_argument('--local_rank', default=0, type=int, help='distribted training')
parser.add_argument('--distributed', default=0, type=int, help='multi machine DDP')
parser.add_argument('--n_display', type=int, default=50, help='Information display frequence')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--base_encoder', default='ViT-B/32', type=str, help='Choose a CLIP version')
parser.add_argument('--agg_module', type=str, default='seqTransf', choices=['None', 'seqLSTM', 'seqTransf'], help='choice a feature aggregation module for video.')
parser.add_argument('--interaction', type=str, default='wti', help='interaction type for retrieval.')
parser.add_argument('--num_hidden_layers', type=int, default=4)
parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.')
args = parser.parse_args()
return args
|
def set_seed_logger(args):
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.distributed.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
args.device = torch.device('cuda', args.local_rank)
args.world_size = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
if torch.cuda.is_available():
torch.distributed.barrier()
logger.info('local_rank: {} world_size: {}'.format(args.local_rank, args.world_size))
if (((args.batch_size % args.world_size) != 0) or ((args.batch_size_val % args.world_size) != 0)):
raise ValueError('Invalid batch_size/batch_size_val and world_size parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.world_size, args.batch_size_val, args.world_size))
logger.info('Effective parameters:')
for key in sorted(args.__dict__):
logger.info(' <<< {}: {}'.format(key, args.__dict__[key]))
return args
|
def build_model(args):
model = HBI(args)
if args.init_model:
if (not exists(args.init_model)):
raise FileNotFoundError
model_state_dict = torch.load(args.init_model, map_location='cpu')
model.load_state_dict(model_state_dict, strict=False)
model.to(args.device)
return model
|
def build_dataloader(args):
tokenizer = ClipTokenizer()
assert (args.datatype in DATALOADER_DICT)
assert ((DATALOADER_DICT[args.datatype]['test'] is not None) or (DATALOADER_DICT[args.datatype]['val'] is not None))
(test_dataloader, test_length) = (None, 0)
if (DATALOADER_DICT[args.datatype]['test'] is not None):
(test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer)
if (DATALOADER_DICT[args.datatype]['val'] is not None):
(val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val')
else:
(val_dataloader, val_length) = (test_dataloader, test_length)
if (test_dataloader is None):
(test_dataloader, test_length) = (val_dataloader, val_length)
if isinstance(test_length, int):
logger.info('***** Running test *****')
logger.info(' Num examples = %d', test_length)
logger.info(' Batch size = %d', args.batch_size_val)
logger.info(' Num steps = %d', len(test_dataloader))
logger.info('***** Running val *****')
logger.info(' Num examples = %d', val_length)
elif (len(test_length) == 2):
logger.info('***** Running test *****')
logger.info(' Num examples = %dt %dv', test_length[0], test_length[1])
logger.info(' Batch size = %d', args.batch_size_val)
logger.info(' Num steps = %d %d', len(test_dataloader[0]), len(test_dataloader[1]))
logger.info('***** Running val *****')
logger.info(' Num examples = %dt %dv', val_length[0], val_length[1])
if args.do_train:
(train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', train_length)
logger.info(' Batch size = %d', args.batch_size)
logger.info(' Num steps = %d', (len(train_dataloader) * args.epochs))
else:
(train_dataloader, train_sampler) = (None, None)
return (test_dataloader, val_dataloader, train_dataloader, train_sampler)
|
def prep_optimizer(args, model, num_train_optimization_steps, local_rank):
if hasattr(model, 'module'):
model = model.module
lr = args.lr
coef_lr = args.coef_lr
weight_decay = args.weight_decay
warmup_proportion = args.warmup_proportion
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))]
no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))]
decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)]
decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)]
no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)]
no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)]
optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0)
if torch.cuda.is_available():
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
return (optimizer, scheduler, model)
|
def save_model(epoch, args, model, type_name=''):
model_to_save = (model.module.banzhafteacher if hasattr(model, 'module') else model.banzhafteacher)
output_model_file = join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('Model saved to %s', output_model_file)
return output_model_file
|
def reduce_loss(loss, args):
world_size = args.world_size
if (world_size < 2):
return loss
with torch.no_grad():
torch.distributed.reduce(loss, dst=0)
if (torch.distributed.get_rank() == 0):
loss /= world_size
return loss
|
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, max_steps, val_dataloader):
global logger
global best_score
global meters
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
total_loss = 0
end = time.time()
logit_scale = 0
for (step, batch) in enumerate(train_dataloader, start=1):
global_step += 1
data_time = (time.time() - end)
if (n_gpu == 1):
batch = tuple((t.to(device=device, non_blocking=True) for t in batch))
(text_ids, text_mask, video, video_mask, inds, idx) = batch
loss = model(text_ids, text_mask, video, video_mask, idx, global_step)
if (n_gpu > 1):
loss = loss.mean()
with torch.autograd.detect_anomaly():
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
if (scheduler is not None):
scheduler.step()
optimizer.zero_grad()
batch_time = (time.time() - end)
end = time.time()
reduced_l = reduce_loss(loss, args)
meters.update(time=batch_time, data=data_time, loss=float(reduced_l))
eta_seconds = (meters.time.global_avg * (max_steps - global_step))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if ((((global_step % log_step) == 0) or (global_step == 1)) and is_main_process()):
logger.info(meters.delimiter.join(['eta: {eta}', 'epoch: {epoch}/{max_epoch}', 'iteration: {iteration}/{max_iteration}', '{meters}', 'lr: {lr}', 'logit_scale: {logit_scale:.2f}max mem: {memory:.0f}']).format(eta=eta_string, epoch=epoch, max_epoch=args.epochs, iteration=global_step, max_iteration=max_steps, meters=str(meters), lr='/'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), logit_scale=logit_scale, memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0)))
total_loss = (total_loss / len(train_dataloader))
return (total_loss, global_step)
|
def main():
global logger
global best_score
global meters
meters = MetricLogger(delimiter=' ')
args = get_args()
if (not exists(args.output_dir)):
os.makedirs(args.output_dir, exist_ok=True)
logger = setup_logger('tvr', args.output_dir, args.local_rank)
args = set_seed_logger(args)
model = build_model(args)
(test_dataloader, val_dataloader, train_dataloader, train_sampler) = build_dataloader(args)
if args.do_train:
tic = time.time()
max_steps = (len(train_dataloader) * args.epochs)
_max_steps = (len(train_dataloader) * 5)
(optimizer, scheduler, model) = prep_optimizer(args, model, _max_steps, args.local_rank)
best_score = 1e-05
best_output_model_file = 'None'
global_step = 0
for epoch in range(args.epochs):
if (train_sampler is not None):
train_sampler.set_epoch(epoch)
synchronize()
torch.cuda.empty_cache()
(tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, args.device, args.world_size, optimizer, scheduler, global_step, max_steps, val_dataloader)
if (args.local_rank == 0):
output_model_file = save_model(epoch, args, model, type_name='')
synchronize()
toc = (time.time() - tic)
training_time = time.strftime('%Hh %Mmin %Ss', time.gmtime(toc))
logger.info(((((('*' * 20) + '\n') + f'training finished with {training_time}') + ('*' * 20)) + '\n'))
|
def get_args(description='Video-Text as Game Players: Hierarchical Banzhaf Interaction for Cross-Modal Representation Learning'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--do_pretrain', action='store_true', help='Whether to run training.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--train_csv', type=str, default='data/.train.csv', help='')
parser.add_argument('--val_csv', type=str, default='data/.val.csv', help='')
parser.add_argument('--data_path', type=str, default='train_ans2label.json', help='data pickle file path')
parser.add_argument('--features_path', type=str, default='MSRVTT_Videos', help='feature path')
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=20, help='')
parser.add_argument('--max_frames', type=int, default=100, help='')
parser.add_argument('--feature_framerate', type=int, default=1, help='')
parser.add_argument('--margin', type=float, default=0.1, help='margin for loss')
parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample')
parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative')
parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cross_model', default='cross-base', type=str, required=False, help='Cross module')
parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--n_gpu', type=int, default=1, help='Changed in the execute process.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--task_type', default='retrieval', type=str, help='Point the task `retrieval` to finetune.')
parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.')
parser.add_argument('--world_size', default=0, type=int, help='distribted training')
parser.add_argument('--local_rank', default=0, type=int, help='distribted training')
parser.add_argument('--rank', default=0, type=int, help='distribted training')
parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.')
parser.add_argument('--use_mil', action='store_true', help='Whether use MIL as Miech et. al. (2020).')
parser.add_argument('--sampled_use_mil', action='store_true', help='Whether MIL, has a high priority than use_mil.')
parser.add_argument('--text_num_hidden_layers', type=int, default=12, help='Layer NO. of text.')
parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help='Layer NO. of visual.')
parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help='Layer NO. of cross.')
parser.add_argument('--loose_type', action='store_true', help='Default using tight type for retrieval.')
parser.add_argument('--expand_msrvtt_sentences', action='store_true', help='')
parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.')
parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.')
parser.add_argument('--freeze_layer_num', type=int, default=0, help='Layer NO. of CLIP need to freeze.')
parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], help='0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.')
parser.add_argument('--linear_patch', type=str, default='2d', choices=['2d', '3d'], help='linear projection of flattened patches.')
parser.add_argument('--sim_header', type=str, default='meanP', choices=['meanP', 'seqLSTM', 'seqTransf', 'tightTransf', 'BTransf', 'denseTransf'], help='choice a similarity header.')
parser.add_argument('--loss', type=str, default='CrossEn', choices=['CrossEn', 'DualLoss', 'CrossCLR', 'MarginLoss', 'DCWLoss'])
parser.add_argument('--estimator', default='None', type=str, required=False, help=' Banzhaf Interaction Estimator.')
parser.add_argument('--kl', type=float, default=0.2)
parser.add_argument('--skl', type=float, default=1)
parser.add_argument('--num_labels', type=int, default=1000)
parser.add_argument('--rate', type=float, default=[0.4, 0.4, 0.2])
args = parser.parse_args()
if (args.sim_header == 'tightTransf'):
args.loose_type = False
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
args.batch_size = int((args.batch_size / args.gradient_accumulation_steps))
return args
|
def set_seed_logger(args):
global logger
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(args.local_rank)
args.world_size = world_size
rank = torch.distributed.get_rank()
args.rank = rank
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, 'log.txt'))
if (args.local_rank == 0):
logger.info('Effective parameters:')
for key in sorted(args.__dict__):
logger.info(' <<< {}: {}'.format(key, args.__dict__[key]))
return args
|
def init_device(args, local_rank):
global logger
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank)
n_gpu = torch.cuda.device_count()
logger.info('device: {} n_gpu: {}'.format(device, n_gpu))
args.n_gpu = n_gpu
if (((args.batch_size % args.n_gpu) != 0) or ((args.batch_size_val % args.n_gpu) != 0)):
raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return (device, n_gpu)
|
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed'))
model = HBI.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
return model
|
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.0):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))]
no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))]
decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)]
decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)]
no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)]
no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)]
weight_decay = 0.2
optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
return (optimizer, scheduler, model)
|
def dataloader_msrvtt_train(args, tokenizer):
msrvtt_dataset = MSRVTT_TrainDataLoader(jsonl_path=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels)
train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset)
dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=True, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(msrvtt_dataset), train_sampler)
|
def dataloader_msrvtt_test(args, tokenizer):
msrvtt_testset = MSRVTT_DataLoader(jsonl_path=args.val_csv, train_jsonl=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels)
dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader_msrvtt, len(msrvtt_testset))
|
def save_model(epoch, args, model, type_name=''):
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('Model saved to %s', output_model_file)
return output_model_file
|
def load_model(epoch, args, n_gpu, device, model_file=None):
if ((model_file is None) or (len(model_file) == 0)):
model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}'.format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if (args.local_rank == 0):
logger.info('Model loaded from %s', model_file)
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed'))
model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
|
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, tokenizer=ClipTokenizer()):
global logger
torch.cuda.empty_cache()
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
for (step, batch) in enumerate(train_dataloader):
if (n_gpu == 1):
batch = tuple((t.to(device=device, non_blocking=True) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask, labels) = batch
ce_loss = model(input_ids, segment_ids, input_mask, video, video_mask, labels)
if (n_gpu > 1):
ce_loss = ce_loss.mean()
if (args.gradient_accumulation_steps > 1):
ce_loss = (ce_loss / args.gradient_accumulation_steps)
loss = ce_loss
loss.backward()
total_loss += float(loss)
if (((step + 1) % args.gradient_accumulation_steps) == 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (scheduler is not None):
scheduler.step()
optimizer.step()
optimizer.zero_grad()
if hasattr(model, 'module'):
torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100))
else:
torch.clamp_(model.clip.logit_scale.data, max=np.log(100))
global_step += 1
if (((global_step % log_step) == 0) and (local_rank == 0)):
logger.info('Epoch: %d/%s, Step: %d/%d, Lr: %s, CeLoss: %f, Time/step: %f', (epoch + 1), args.epochs, (step + 1), len(train_dataloader), '-'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), float(ce_loss), ((time.time() - start_time) / (log_step * args.gradient_accumulation_steps)))
start_time = time.time()
total_loss = (total_loss / len(train_dataloader))
return (total_loss, global_step)
|
def eval_epoch(args, model, test_dataloader, device, n_gpu):
top1 = AverageMeter()
top5 = AverageMeter()
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
for (bid, batch) in enumerate(test_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask, labels) = batch
output = model(input_ids, segment_ids, input_mask, video, video_mask, labels)
(prec1, prec5) = accuracy(output, labels, topk=(1, 5))
top1.update(prec1[0], input_ids.size(0))
top5.update(prec5[0], input_ids.size(0))
print('{}/{}\r'.format(bid, len(test_dataloader)), end='')
logger.info('Video QA:')
logger.info('\t>>> Prec@1: {top1.avg:.3f} - Prec@5: {top5.avg:.3f}'.format(top1=top1, top5=top5))
R1 = top1.avg
return R1
|
def main():
global logger
args = get_args()
args = set_seed_logger(args)
(device, n_gpu) = init_device(args, args.local_rank)
tokenizer = ClipTokenizer()
assert (args.task_type == 'retrieval')
args.num_labels = 1500
model = init_model(args, device, n_gpu, args.local_rank)
assert ((args.freeze_layer_num <= 12) and (args.freeze_layer_num >= (- 1)))
if (hasattr(model, 'clip') and (args.freeze_layer_num > (- 1))):
for (name, param) in model.clip.named_parameters():
if ((name.find('ln_final.') == 0) or (name.find('text_projection') == 0) or (name.find('logit_scale') == 0) or (name.find('visual.ln_post.') == 0) or (name.find('visual.proj') == 0)):
continue
elif ((name.find('visual.transformer.resblocks.') == 0) or (name.find('transformer.resblocks.') == 0)):
layer_num = int(name.split('.resblocks.')[1].split('.')[0])
if (layer_num >= args.freeze_layer_num):
continue
if ((args.linear_patch == '3d') and name.find('conv2.')):
continue
else:
param.requires_grad = False
assert (args.datatype in DATALOADER_DICT)
(test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer)
if (DATALOADER_DICT[args.datatype]['val'] is not None):
(val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val')
else:
(val_dataloader, val_length) = (test_dataloader, test_length)
if (args.local_rank == 0):
logger.info('***** Running test *****')
logger.info(' Num examples = %d', test_length)
logger.info(' Batch size = %d', args.batch_size_val)
logger.info(' Num steps = %d', len(test_dataloader))
logger.info('***** Running val *****')
logger.info(' Num examples = %d', val_length)
if args.do_train:
(train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer)
num_train_optimization_steps = ((int(((len(train_dataloader) + args.gradient_accumulation_steps) - 1)) / args.gradient_accumulation_steps) * args.epochs)
coef_lr = args.coef_lr
(optimizer, scheduler, model) = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if (args.local_rank == 0):
logger.info('***** Running training *****')
logger.info(' Num examples = %d', train_length)
logger.info(' Batch size = %d', args.batch_size)
logger.info(' Num steps = %d', (num_train_optimization_steps * args.gradient_accumulation_steps))
best_score = 1e-05
best_output_model_file = 'None'
global_step = 0
for epoch in range(args.epochs):
train_sampler.set_epoch(epoch)
(tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=args.local_rank, tokenizer=tokenizer)
if (args.local_rank == 0):
logger.info('Epoch %d/%s Finished, Train Loss: %f', (epoch + 1), args.epochs, tr_loss)
output_model_file = save_model(epoch, args, model, type_name='')
logger.info('Eval on val dataset')
R1 = eval_epoch(args, model, val_dataloader, device, n_gpu)
if (best_score <= R1):
best_score = R1
best_output_model_file = output_model_file
logger.info('The best model is: {}, the Top1 Acc is: {:.4f}'.format(best_output_model_file, best_score))
if (args.local_rank == 0):
model = load_model((- 1), args, n_gpu, device, model_file=best_output_model_file)
eval_epoch(args, model, test_dataloader, device, n_gpu)
elif args.do_eval:
if (args.local_rank == 0):
eval_epoch(args, model, test_dataloader, device, n_gpu)
|
def compress(paras):
(input_video_path, output_video_path) = paras
try:
command = ['ffmpeg', '-y', '-i', input_video_path, '-filter:v', "scale='if(gt(a,1),trunc(oh*a/2)*2,224)':'if(gt(a,1),224,trunc(ow*a/2)*2)'", '-map', '0:v', '-r', '3', output_video_path]
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ffmpeg.communicate()
retcode = ffmpeg.poll()
except Exception as e:
raise e
|
def prepare_input_output_pairs(input_root, output_root):
input_video_path_list = []
output_video_path_list = []
for (root, dirs, files) in os.walk(input_root):
for file_name in files:
input_video_path = os.path.join(root, file_name)
output_video_path = os.path.join(output_root, file_name)
if (os.path.exists(output_video_path) and (os.path.getsize(output_video_path) > 0)):
pass
else:
input_video_path_list.append(input_video_path)
output_video_path_list.append(output_video_path)
return (input_video_path_list, output_video_path_list)
|
class QuadKey():
@precondition((lambda c, key: valid_key(key)))
def __init__(self, key):
'\n A quadkey must be between 1 and 23 digits and can only contain digit[0-3]\n '
self.key = key
self.level = len(key)
def children(self):
if (self.level >= 23):
return []
return [QuadKey((self.key + str(k))) for k in [0, 1, 2, 3]]
def parent(self):
return QuadKey(self.key[:(- 1)])
def nearby(self):
(tile, level) = TileSystem.quadkey_to_tile(self.key)
perms = [((- 1), (- 1)), ((- 1), 0), ((- 1), 1), (0, (- 1)), (0, 1), (1, (- 1)), (1, 0), (1, 1)]
tiles = set(map((lambda perm: (abs((tile[0] + perm[0])), abs((tile[1] + perm[1])))), perms))
return [TileSystem.tile_to_quadkey(tile, level) for tile in tiles]
def is_ancestor(self, node):
'\n If node is ancestor of self\n Get the difference in level\n If not, None\n '
if ((self.level <= node.level) or (self.key[:len(node.key)] != node.key)):
return None
return (self.level - node.level)
def is_descendent(self, node):
'\n If node is descendent of self\n Get the difference in level\n If not, None\n '
return node.is_ancestor(self)
def area(self):
size = TileSystem.map_size(self.level)
LAT = 0
res = TileSystem.ground_resolution(LAT, self.level)
side = ((size / 2) * res)
return (side * side)
def xdifference(self, to):
' Generator\n Gives the difference of quadkeys between self and to\n Generator in case done on a low level\n Only works with quadkeys of same level\n '
(x, y) = (0, 1)
assert (self.level == to.level)
self_tile = list(self.to_tile()[0])
to_tile = list(to.to_tile()[0])
if ((self_tile[x] >= to_tile[x]) and (self_tile[y] <= self_tile[y])):
(ne_tile, sw_tile) = (self_tile, to_tile)
else:
(sw_tile, ne_tile) = (self_tile, to_tile)
cur = ne_tile[:]
while (cur[x] >= sw_tile[x]):
while (cur[y] <= sw_tile[y]):
(yield from_tile(tuple(cur), self.level))
cur[y] += 1
cur[x] -= 1
cur[y] = ne_tile[y]
def difference(self, to):
' Non generator version of xdifference\n '
return [qk for qk in self.xdifference(to)]
def unwind(self):
' Get a list of all ancestors in descending order of level, including a new instance of self\n '
return [QuadKey(self.key[:(l + 1)]) for l in reversed(range(len(self.key)))]
def to_tile(self):
return TileSystem.quadkey_to_tile(self.key)
def to_geo(self, centered=False):
ret = TileSystem.quadkey_to_tile(self.key)
tile = ret[0]
lvl = ret[1]
pixel = TileSystem.tile_to_pixel(tile, centered)
return TileSystem.pixel_to_geo(pixel, lvl)
def __eq__(self, other):
return (self.key == other.key)
def __ne__(self, other):
return (not self.__eq__(other))
def __str__(self):
return self.key
def __repr__(self):
return self.key
|
def from_geo(geo, level):
'\n Constucts a quadkey representation from geo and level\n geo => (lat, lon)\n If lat or lon are outside of bounds, they will be clipped\n If level is outside of bounds, an AssertionError is raised\n\n '
pixel = TileSystem.geo_to_pixel(geo, level)
tile = TileSystem.pixel_to_tile(pixel)
key = TileSystem.tile_to_quadkey(tile, level)
return QuadKey(key)
|
def from_tile(tile, level):
return QuadKey(TileSystem.tile_to_quadkey(tile, level))
|
def from_str(qk_str):
return QuadKey(qk_str)
|
def geo_to_dict(geo):
" Take a geo tuple and return a labeled dict\n (lat, lon) -> {'lat': lat, 'lon', lon}\n "
return {LAT_STR: geo[0], LON_STR: geo[1]}
|
def valid_level(level):
LEVEL_RANGE = (1, 23)
return (LEVEL_RANGE[0] <= level <= LEVEL_RANGE[1])
|
@precondition((lambda key: valid_level(len(key))))
def valid_key(key):
return (TileSystem.KEY_PATTERN.match(key) is not None)
|
class TileSystem():
'\n Class with static method to build quadkeys from lat, lon, levels\n see http://msdn.microsoft.com/en-us/library/bb259689.aspx\n '
import re
KEY_PATTERN = re.compile('^[0-3]+$')
EARTH_RADIUS = 6378137
LATITUDE_RANGE = ((- 85.05112878), 85.05112878)
LONGITUDE_RANGE = ((- 180.0), 180.0)
@staticmethod
@precondition((lambda n, minMax: (minMax[0] <= minMax[1])))
def clip(n, minMax):
'\tClips number to specified values '
return min(max(n, minMax[0]), minMax[1])
@staticmethod
@precondition(valid_level)
def map_size(level):
'Determines map height and width in pixel space at level'
return (256 << level)
@staticmethod
@precondition((lambda lat, lvl: valid_level(lvl)))
def ground_resolution(lat, level):
'Gets ground res in meters / pixel'
lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE)
return ((((cos(((lat * pi) / 180)) * 2) * pi) * TileSystem.EARTH_RADIUS) / TileSystem.map_size(level))
@staticmethod
@precondition((lambda lat, lvl, dpi: valid_level(lvl)))
def map_scale(lat, level, dpi):
'Gets the scale of the map expressed as ratio 1\t: N. Returns N'
return ((TileSystem.ground_resolution(lat, level) * dpi) / 0.0254)
@staticmethod
@precondition((lambda geo, lvl: valid_level(lvl)))
def geo_to_pixel(geo, level):
'Transform from geo coordinates to pixel coordinates'
(lat, lon) = (float(geo[0]), float(geo[1]))
lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE)
lon = TileSystem.clip(lon, TileSystem.LONGITUDE_RANGE)
x = ((lon + 180) / 360)
sin_lat = sin(((lat * pi) / 180))
y = (0.5 - (log(((1 + sin_lat) / (1 - sin_lat))) / (4 * pi)))
map_size = TileSystem.map_size(level)
pixel_x = int(TileSystem.clip(((x * map_size) + 0.5), (0, (map_size - 1))))
pixel_y = int(TileSystem.clip(((y * map_size) + 0.5), (0, (map_size - 1))))
return (pixel_x, pixel_y)
@staticmethod
@precondition((lambda pix, lvl: valid_level(lvl)))
def pixel_to_geo(pixel, level):
'Transform from pixel to geo coordinates'
pixel_x = pixel[0]
pixel_y = pixel[1]
map_size = float(TileSystem.map_size(level))
x = ((TileSystem.clip(pixel_x, (0, (map_size - 1))) / map_size) - 0.5)
y = (0.5 - (TileSystem.clip(pixel_y, (0, (map_size - 1))) / map_size))
lat = (90 - ((360 * atan(exp((((- y) * 2) * pi)))) / pi))
lon = (360 * x)
return (round(lat, 6), round(lon, 6))
@staticmethod
def pixel_to_tile(pixel):
'Transform pixel to tile coordinates'
return ((pixel[0] // 256), (pixel[1] // 256))
@staticmethod
def tile_to_pixel(tile, centered=False):
'Transform tile to pixel coordinates'
pixel = [(tile[0] * 256), (tile[1] * 256)]
if centered:
pixel = [(pix + 128) for pix in pixel]
return (pixel[0], pixel[1])
@staticmethod
@precondition((lambda tile, lvl: valid_level(lvl)))
def tile_to_quadkey(tile, level):
'Transform tile coordinates to a quadkey'
tile_x = tile[0]
tile_y = tile[1]
quadkey = ''
for i in xrange(level):
bit = (level - i)
digit = ord('0')
mask = (1 << (bit - 1))
if ((tile_x & mask) is not 0):
digit += 1
if ((tile_y & mask) is not 0):
digit += 2
quadkey += chr(digit)
return quadkey
@staticmethod
def quadkey_to_tile(quadkey):
'Transform quadkey to tile coordinates'
(tile_x, tile_y) = (0, 0)
level = len(quadkey)
for i in xrange(level):
bit = (level - i)
mask = (1 << (bit - 1))
if (quadkey[(level - bit)] == '1'):
tile_x |= mask
if (quadkey[(level - bit)] == '2'):
tile_y |= mask
if (quadkey[(level - bit)] == '3'):
tile_x |= mask
tile_y |= mask
return [(tile_x, tile_y), level]
|
def condition(precondition=None, postcondition=None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (precondition is not None):
assert precondition(*args, **kwargs)
retval = func(*args, **kwargs)
if (postcondition is not None):
assert postcondition(retval)
return retval
return wrapper
return decorator
|
def precondition(check):
return condition(precondition=check)
|
def postcondition(check):
return condition(postcondition=check)
|
def run():
unittest.main()
|
class QuadkeyTest(TestCase):
def testInit(self):
qk = quadkey.from_str('0321201120')
with self.assertRaises(AssertionError):
qk = quadkey.from_str('')
with self.assertRaises(AssertionError):
qk = quadkey.from_str('0156510012')
def testFromGeo(self):
geo = (40, (- 105))
level = 7
key = quadkey.from_str('0231010')
self.assertEqual(key, quadkey.from_geo(geo, level))
def testEquality(self):
one = quadkey.from_str('00')
two = quadkey.from_str('00')
self.assertEqual(one, two)
three = quadkey.from_str('0')
self.assertNotEqual(one, three)
def testChildren(self):
qk = quadkey.from_str('0')
self.assertEqual([c.key for c in qk.children()], ['00', '01', '02', '03'])
qk = quadkey.from_str(''.join(['0' for x in xrange(23)]))
self.assertEqual(qk.children(), [])
def testAncestry(self):
one = quadkey.from_str('0')
two = quadkey.from_str('0101')
self.assertEqual(3, one.is_descendent(two))
self.assertIsNone(two.is_descendent(one))
self.assertEqual(3, two.is_ancestor(one))
three = quadkey.from_str('1')
self.assertIsNone(three.is_ancestor(one))
def testNearby(self):
qk = quadkey.from_str('0')
self.assertEqual(set(['1', '2', '3']), set(qk.nearby()))
def testUnwind(self):
qk = quadkey.from_str('0123')
self.assertEqual(['0123', '012', '01', '0'], [qk.key for qk in qk.unwind()])
def testDifference(self):
_from = quadkey.from_str('0320101102')
_to = quadkey.from_str('0320101110')
diff = set(['0320101102', '0320101100', '0320101103', '0320101101', '0320101112', '0320101110'])
self.assertEqual(diff, set([qk.key for qk in _to.difference(_from)]))
self.assertEqual(diff, set([qk.key for qk in _from.difference(_to)]))
|
class TileSystemTest(TestCase):
def testClip(self):
self.assertEqual(1, TileSystem.clip(0, (1, 5)))
self.assertEqual(5, TileSystem.clip(10, (1, 5)))
self.assertEqual(3, TileSystem.clip(3, (1, 5)))
with self.assertRaises(AssertionError):
TileSystem.clip(7, (5, 1))
def testMapSize(self):
self.assertEqual(512, TileSystem.map_size(1))
with self.assertRaises(AssertionError):
TileSystem.map_size(0)
def testGroundResolution(self):
geo = (40.0, (- 105.0))
res = 936.8665722621985
TileSystem.ground_resolution(geo[0], 7)
def testMapScale(self):
geo = (40.0, (- 105.0))
level = 7
dpi = 96
scale = 3540913.029022482
self.assertEqual(scale, TileSystem.map_scale(geo[0], level, dpi))
def testGeoToPixel(self):
geo = (40.0, (- 105.0))
level = 7
pixel = (6827, 12405)
self.assertEqual(pixel, TileSystem.geo_to_pixel(geo, level))
def testPixelToGeo(self):
pixel = (6827, 12405)
level = 7
geo = (40.002372, (- 104.996338))
self.assertEqual(geo, TileSystem.pixel_to_geo(pixel, level))
def testPixelToTile(self):
pixel = (6827, 12405)
tile = (26, 48)
self.assertEqual(tile, TileSystem.pixel_to_tile(pixel))
def testTileToPixel(self):
tile = (26, 48)
pixel = (6656, 12288)
self.assertEqual(pixel, TileSystem.tile_to_pixel(tile))
def testTileToQuadkey(self):
tile = (26, 48)
level = 7
key = '0231010'
self.assertEqual(key, TileSystem.tile_to_quadkey(tile, level))
def testQuadkeyToTile(self):
tile = (26, 48)
level = 7
key = '0231010'
self.assertEqual([tile, level], TileSystem.quadkey_to_tile(key))
|
class UtilTest(TestCase):
def testPrecondition(self):
self.assertTrue(self.pre(True))
with self.assertRaises(AssertionError):
self.pre(False)
def testPostcondition(self):
pass
@precondition((lambda c, x: (x is True)))
def pre(self, x):
return x
|
def makeOsmFileName(fileNumber):
return os.path.join('anomaly', 'reviewed_{:02d}.osm'.format(fileNumber))
|
def saveOsmData(query):
result = api.query(query)
for way in result.ways:
featureDirectoryName = way.tags.get('sport')
outputDirectoryName = os.path.join(cfg.rootOsmDir, featureDirectoryName)
if (os.path.exists(outputDirectoryName) == False):
os.makedirs(outputDirectoryName)
if ((featureDirectoryName in summary) == False):
summary[featureDirectoryName] = 1
else:
summary[featureDirectoryName] += 1
filenameBase = os.path.join(cfg.rootOsmDir, featureDirectoryName, str(way.id))
with open(('%s.csv' % filenameBase), 'wt') as text_file:
for node in way.nodes:
text_file.write(('%0.7f\t%0.7f\n' % (node.lat, node.lon)))
with open(('%s.GeoJSON' % filenameBase), 'wt') as text_file:
rawNodes = []
for node in way.nodes:
rawNodes.append((node.lon, node.lat))
try:
geom = shapely.geometry.Polygon(rawNodes)
tags = way.tags
tags['wayOSMId'] = way.id
features = []
features.append(geojson.Feature(geometry=geom, properties=tags))
featureC = geojson.FeatureCollection(features)
text_file.write(geojson.dumps(featureC))
except Exception as e:
print(e)
|
def _find_getch():
try:
import termios
except ImportError:
import msvcrt
return msvcrt.getch
import sys, tty
def _getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
return _getch
|
def find_in_path(name, path):
'Find a file in a search path'
for _dir in path.split(os.pathsep):
binpath = os.path.join(_dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
|
def get_cuda_sm_list(cuda_ver):
if ('CUDA_SM_LIST' in os.environ):
sm_list = os.environ['CUDA_SM_LIST'].split(',')
else:
sm_list = ['30', '52', '60', '61', '70', '75', '80', '86']
if (cuda_ver >= 110):
filter_list = ['30']
if (cuda_ver == 110):
filter_list += ['86']
else:
filter_list = ['80', '86']
if (cuda_ver < 100):
filter_list += ['75']
if (cuda_ver < 90):
filter_list += ['70']
if (cuda_ver < 80):
filter_list += ['60', '61']
sm_list = [sm for sm in sm_list if (sm not in filter_list)]
return sm_list
|
def get_cuda_compute(cuda_ver):
if ('CUDA_COMPUTE' in os.environ):
compute = os.environ['CUDA_COMPUTE']
else:
if (70 <= cuda_ver < 80):
compute = '52'
if (80 <= cuda_ver < 90):
compute = '61'
if (90 <= cuda_ver < 100):
compute = '70'
if (100 <= cuda_ver < 110):
compute = '75'
if (cuda_ver == 110):
compute = '80'
if (cuda_ver == 111):
compute = '86'
return compute
|
def get_cuda_arch(cuda_ver):
if ('CUDA_ARCH' in os.environ):
arch = os.environ['CUDA_ARCH']
else:
if (70 <= cuda_ver < 92):
arch = '30'
if (92 <= cuda_ver < 110):
arch = '50'
if (cuda_ver == 110):
arch = '52'
if (cuda_ver == 111):
arch = '80'
return arch
|
def locate_cuda():
"Locate the CUDA environment on the system\n If a valid cuda installation is found\n this returns a dict with keys 'home', 'nvcc', 'include',\n and 'lib64' and values giving the absolute path to each directory.\n Starts by looking for the CUDAHOME env variable.\n If not found, everything is based on finding\n 'nvcc' in the PATH.\n If nvcc can't be found, this returns None\n "
nvcc_bin = 'nvcc'
if sys.platform.startswith('win'):
nvcc_bin = 'nvcc.exe'
found = False
for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:
if (env_name not in os.environ):
continue
found = True
home = os.environ[env_name]
nvcc = os.path.join(home, 'bin', nvcc_bin)
break
if (not found):
nvcc = find_in_path(nvcc_bin, os.environ['PATH'])
if (nvcc is None):
logging.warning('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDA_HOME to enable CUDA extensions')
return None
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': os.path.join(home, 'include'), 'lib64': os.path.join(home, 'lib64')}
cuda_ver = os.path.basename(os.path.realpath(home)).split('-')[1].split('.')
(major, minor) = (int(cuda_ver[0]), int(cuda_ver[1]))
cuda_ver = ((10 * major) + minor)
assert (cuda_ver >= 70), f'too low cuda ver {major}.{minor}'
print(f'cuda_ver: {major}.{minor}')
arch = get_cuda_arch(cuda_ver)
sm_list = get_cuda_sm_list(cuda_ver)
compute = get_cuda_compute(cuda_ver)
post_args = (([f'-arch=sm_{arch}'] + [f'-gencode=arch=compute_{sm},code=sm_{sm}' for sm in sm_list]) + [f'-gencode=arch=compute_{compute},code=compute_{compute}', '--ptxas-options=-v', '-O2'])
print(f'nvcc post args: {post_args}')
if HALF_PRECISION:
post_args = [flag for flag in post_args if ('52' not in flag)]
if (sys.platform == 'win32'):
cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')
post_args += ['-Xcompiler', '/MD', '-std=c++14', '-Xcompiler', '/openmp']
if HALF_PRECISION:
post_args += ['-Xcompiler', '/D HALF_PRECISION']
else:
post_args += ['-c', '--compiler-options', "'-fPIC'", '--compiler-options', "'-std=c++14'"]
if HALF_PRECISION:
post_args += ['--compiler-options', "'-D HALF_PRECISION'"]
for (k, val) in cudaconfig.items():
if (not os.path.exists(val)):
logging.warning('The CUDA %s path could not be located in %s', k, val)
return None
cudaconfig['post_args'] = post_args
return cudaconfig
|
class _UnixCCompiler(unixccompiler.UnixCCompiler):
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.append('.cu')
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if (os.path.splitext(src)[1] != '.cu'):
return unixccompiler.UnixCCompiler._compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)
_compiler_so = self.compiler_so
try:
nvcc_path = CUDA['nvcc']
post_args = CUDA['post_args']
self.set_executable('compiler_so', nvcc_path)
return unixccompiler.UnixCCompiler._compile(self, obj, src, ext, cc_args, post_args, pp_opts)
finally:
self.compiler_so = _compiler_so
|
class _MSVCCompiler(msvccompiler.MSVCCompiler):
_cu_extensions = ['.cu']
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.extend(_cu_extensions)
def _compile_cu(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
(macros, objects, extra_postargs, pp_opts, _build) = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
compiler_so = CUDA['nvcc']
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
post_args = CUDA['post_args']
for obj in objects:
try:
(src, _) = _build[obj]
except KeyError:
continue
try:
self.spawn(((([compiler_so] + cc_args) + [src, '-o', obj]) + post_args))
except errors.DistutilsExecError as e:
raise errors.CompileError(str(e))
return objects
def compile(self, sources, **kwargs):
cu_sources = []
other_sources = []
for source in sources:
if (os.path.splitext(source)[1] == '.cu'):
cu_sources.append(source)
else:
other_sources.append(source)
other_objects = msvccompiler.MSVCCompiler.compile(self, other_sources, **kwargs)
cu_objects = self._compile_cu(cu_sources, **kwargs)
return (other_objects + cu_objects)
|
class CudaBuildExt(setuptools_build_ext):
'Custom `build_ext` command to include CUDA C source files.'
def run(self):
if (CUDA is not None):
def wrap_new_compiler(func):
def _wrap_new_compiler(*args, **kwargs):
try:
return func(*args, **kwargs)
except errors.DistutilsPlatformError:
if (sys.platform != 'win32'):
CCompiler = _UnixCCompiler
else:
CCompiler = _MSVCCompiler
return CCompiler(None, kwargs['dry_run'], kwargs['force'])
return _wrap_new_compiler
ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler)
self.compiler = 'nvidia'
setuptools_build_ext.run(self)
|
def get_logger(name=__file__, level=2):
if (level == 1):
level = logging.WARNING
elif (level == 2):
level = logging.INFO
elif (level == 3):
level = logging.DEBUG
logger = logging.getLogger(name)
if logger.handlers:
return logger
logger.setLevel(level)
sh0 = logging.StreamHandler()
sh0.setLevel(level)
formatter = logging.Formatter('[%(levelname)-8s] %(asctime)s [%(filename)s] [%(funcName)s:%(lineno)d]%(message)s', '%Y-%m-%d %H:%M:%S')
sh0.setFormatter(formatter)
logger.addHandler(sh0)
return logger
|
def load_json_string(cont):
cont = jsmin.jsmin(cont)
cont = re.sub(',[ \t\r\n]*}', '}', cont)
cont = re.sub((',[ \t\r\n]*' + '\\]'), ']', cont)
return json.loads(cont)
|
def load_json_file(fname):
with open(fname, 'r') as fin:
ret = load_json_string(fin.read())
return ret
|
def get_opt_as_proto(raw, proto_type=ConfigProto):
proto = proto_type()
Parse(json.dumps(Option(raw)), proto)
err = []
assert proto.IsInitialized(err), f'''some required fields are missing in proto {err}
{proto}'''
return proto
|
def proto_to_dict(proto):
return MessageToDict(proto, including_default_value_fields=True, preserving_proto_field_name=True)
|
def copy_proto(proto):
newproto = type(proto)()
Parse(json.dumps(proto_to_dict(proto)), newproto)
return newproto
|
class Option(dict):
def __init__(self, *args, **kwargs):
args = [(arg if isinstance(arg, dict) else load_json_file(arg)) for arg in args]
super().__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for (k, val) in arg.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
if kwargs:
for (k, val) in kwargs.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super().__delitem__(key)
del self.__dict__[key]
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
|
def get_extend_compile_flags():
flags = ['-march=native']
return flags
|
class CMakeExtension(Extension):
extension_type = 'cmake'
def __init__(self, name):
super().__init__(name, sources=[])
|
def git_version():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH']:
val = os.environ.get(k)
if (val is not None):
env[k] = val
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
git_revision = 'Unknown'
return git_revision
|
def write_version_py(filename='cuhnsw/version.py'):
cnt = "\nshort_version = '%(version)s'\ngit_revision = '%(git_revision)s'\n"
git_revision = git_version()
with open(filename, 'w') as fout:
fout.write((cnt % {'version': VERSION, 'git_revision': git_revision}))
|
class BuildExtension(BUILDEXT):
def run(self):
for ext in self.extensions:
print(ext.name)
if (hasattr(ext, 'extension_type') and (ext.extension_type == 'cmake')):
self.cmake()
super().run()
def cmake(self):
cwd = pathlib.Path().absolute()
build_temp = pathlib.Path(self.build_temp)
build_temp.mkdir(parents=True, exist_ok=True)
build_type = ('Debug' if self.debug else 'Release')
cmake_args = [('-DCMAKE_BUILD_TYPE=' + build_type), ('-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + CLIB_DIR)]
build_args = []
os.chdir(str(build_temp))
self.spawn((['cmake', str(cwd)] + cmake_args))
if (not self.dry_run):
self.spawn((['cmake', '--build', '.'] + build_args))
os.chdir(str(cwd))
|
def setup_package():
write_version_py()
cmdclass = {'build_ext': BuildExtension}
metadata = dict(name='cuhnsw', maintainer='Jisang Yoon', maintainer_email='vjs10101v@gmail.com', author='Jisang Yoon', author_email='vjs10101v@gmail.com', description=DOCLINES[0], long_description='\n'.join(DOCLINES[2:]), url='https://github.com/js1010/cuhnsw', download_url='https://github.com/js1010/cuhnsw/releases', include_package_data=False, license='Apache2', packages=['cuhnsw/'], cmdclass=cmdclass, classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=['Linux', 'Mac OSX', 'Unix'], ext_modules=extensions, install_requires=INSTALL_REQUIRES, entry_points={'console_scripts': []}, python_requires='>=3.6')
metadata['version'] = VERSION
setup(**metadata)
|
class VOCSegGroupLoader(mx.io.DataIter):
def __init__(self, image_root, label_root, annotation_root, data_list, batch_size, group_size, num_block, target_size, pad=False, shuffle=False, rand_scale=False, rand_mirror=False, rand_crop=False, downsample=None):
assert (group_size >= 2), "'group_size': # common-class images, typical value is 2 for pairs"
assert (num_block >= 1), "'num_block': should equal # GPU"
assert ((batch_size % (group_size * num_block)) == 0)
with open(data_list, 'r') as f:
data_names = [x.strip() for x in f.readlines()]
if (pad and ((len(data_names) % batch_size) > 0)):
pad_num = (batch_size - (len(data_names) % batch_size))
data_names = (data_names + data_names[:pad_num])
self.image_src_list = [os.path.join(image_root, (x + '.jpg')) for x in data_names]
self.label_src_list = ([os.path.join(label_root, (x + '.png')) for x in data_names] if (label_root is not None) else ([None] * len(data_names)))
self.ann_list = [VOC.get_annotation(os.path.join(annotation_root, (x + '.xml'))) for x in data_names]
self.batch_size = batch_size
self.group_size = group_size
self.num_block = num_block
self.meta_length = (self.batch_size // (self.num_block * self.group_size))
self.target_size = target_size
self.shuffle = shuffle
self.rand_scale = rand_scale
self.rand_mirror = rand_mirror
self.rand_crop = rand_crop
self.downsample = downsample
scale_pool = [0.5, 0.75, 1, 1.25, 1.5]
self.scale_sampler = (lambda : np.random.choice(scale_pool))
self.index = list(range(len(data_names)))
self.num_batch = (len(data_names) // self.batch_size)
self.reset()
def reset(self):
self.index_pointer = 0
self.cache = []
if self.shuffle:
np.random.shuffle(self.index)
def pop(self):
if (len(self.cache) > 0):
index = self.cache.pop()
elif (self.index_pointer < len(self.index)):
index = self.index[self.index_pointer]
self.index_pointer += 1
else:
raise StopIteration
return index
def is_ok(self, a, b):
lbl_a = self.ann_list[a]
lbl_b = self.ann_list[b]
return (len((set(lbl_a) - set(lbl_b))) < len(lbl_a))
def next(self):
indices = []
while (len(indices) < (self.batch_size // self.group_size)):
cache = []
partners = [self.pop()]
while (len(partners) < self.group_size):
this = self.pop()
while (not all([self.is_ok(prev, this) for prev in partners])):
cache.append(this)
this = self.pop()
partners.append(this)
indices.append(partners)
self.cache = (cache[::(- 1)] + self.cache)
indices = sum([sum(zip(*indices[i:(i + self.meta_length)]), tuple()) for i in range(0, len(indices), self.meta_length)], tuple())
image_src_list = [self.image_src_list[i] for i in indices]
label_src_list = [self.label_src_list[i] for i in indices]
self.cache_image_src_list = image_src_list
batch = load_batch_semantic(image_src_list, label_src_list, self.target_size, self.scale_sampler, self.rand_scale, self.rand_mirror, self.rand_crop, self.downsample)
return batch
|
def resnet101_largefov_SA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None, **kwargs):
x_raw = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
x_res = build_self_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats_affinity, lr_mult, reuse)
x = (x_raw + x_res)
x = Conv(x, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_largefov_CA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None):
x_raw = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
(x_res_self, x_res_cross) = build_cross_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, group_size, merge_type, merge_self, use_global_stats_affinity, lr_mult, reuse)
x_self = (x_raw + x_res_self)
x_self = Conv(x_self, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
x_cross = (x_raw + x_res_cross)
x_cross = Conv(x_cross, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=x_self)
return (x_self, x_cross)
|
def resnet50_largefov_SA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None, **kwargs):
x_raw = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
x_res = build_self_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats_affinity, lr_mult, reuse)
x = (x_raw + x_res)
x = Conv(x, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet50_largefov_CA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None):
x_raw = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
(x_res_self, x_res_cross) = build_cross_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, group_size, merge_type, merge_self, use_global_stats_affinity, lr_mult, reuse)
x_self = (x_raw + x_res_self)
x_self = Conv(x_self, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
x_cross = (x_raw + x_res_cross)
x_cross = Conv(x_cross, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=x_self)
return (x_self, x_cross)
|
def in_embedding_conv(x_feat, num_filter_hidden, is_downsample=True, lr_mult=1, reuse=None):
x_query = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_q', lr_mult=lr_mult, reuse=reuse)
x_key = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_k', lr_mult=lr_mult, reuse=reuse)
x_value = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_v', lr_mult=lr_mult, reuse=reuse)
if is_downsample:
x_key = Pool(x_key, (3, 3), (2, 2), (1, 1))
x_value = Pool(x_value, (3, 3), (2, 2), (1, 1))
return (x_query, x_key, x_value)
|
def out_embedding_convbn(x_res, num_filter_out, use_global_stats=False, lr_mult=1, reuse=None):
x_res = Conv(x_res, num_filter_out, (1, 1), no_bias=True, name='conv_out', lr_mult=lr_mult, reuse=reuse)
x_res = BN(x_res, fix_gamma=False, use_global_stats=use_global_stats, name='bn_out', lr_mult=lr_mult, reuse=reuse)
return x_res
|
def compute_sim_mat(x_key, x_query, sim_type):
if (sim_type == 'dot'):
sim_mat = mx.sym.batch_dot(x_key, x_query, transpose_a=True)
elif (sim_type == 'cosine'):
x_key_norm = mx.sym.L2Normalization(x_key, mode='channel')
x_query_norm = mx.sym.L2Normalization(x_query, mode='channel')
sim_mat = mx.sym.batch_dot(x_key_norm, x_query_norm, transpose_a=True)
else:
raise ValueError(sim_type)
return sim_mat
|
def build_self_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats=False, lr_mult=1, reuse=None, return_internals=False):
get_embedding_in = eval(('in_embedding_' + in_embed_type))
get_embedding_out = eval(('out_embedding_' + out_embed_type))
(x_query, x_key, x_value) = get_embedding_in(x_feat, num_filter_hidden, is_downsample, lr_mult, reuse)
x_query = mx.sym.reshape(x_query, (0, 0, (- 3)))
x_key = mx.sym.reshape(x_key, (0, 0, (- 3)))
x_value = mx.sym.reshape(x_value, (0, 0, (- 3)))
sim_mat = compute_sim_mat(x_key, x_query, sim_type)
sim_mat = mx.sym.softmax(sim_mat, axis=1)
x_res = mx.sym.batch_dot(x_value, sim_mat)
x_res = mx.sym.reshape_like(x_res, x_feat, lhs_begin=2, lhs_end=3, rhs_begin=2, rhs_end=4)
x_out = get_embedding_out(x_res, num_filter_out, use_global_stats, lr_mult, reuse)
if return_internals:
return (x_out, x_query, x_key, x_value, sim_mat, x_res)
return x_out
|
def build_cross_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats=False, lr_mult=1, reuse=None):
get_embedding_in = eval(('in_embedding_' + in_embed_type))
get_embedding_out = eval(('out_embedding_' + out_embed_type))
(x_out_self, x_query, x_key, x_value, sim_mat_self, x_res_self) = build_self_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats, lr_mult, reuse, True)
x_key_sp = list(mx.sym.split(x_key, num_outputs=group_size, axis=0))
x_value_sp = list(mx.sym.split(x_value, num_outputs=group_size, axis=0))
x_res_list = []
for i in range((group_size - 1)):
x_key_sp = (x_key_sp[1:] + x_key_sp[0:1])
x_value_sp = (x_value_sp[1:] + x_value_sp[0:1])
x_key_roll = mx.sym.concat(*x_key_sp, dim=0)
x_value_roll = mx.sym.concat(*x_value_sp, dim=0)
sim_mat = compute_sim_mat(x_key_roll, x_query, sim_type)
sim_mat = mx.sym.softmax(sim_mat, axis=1)
x_res = mx.sym.batch_dot(x_value_roll, sim_mat)
x_res = mx.sym.reshape_like(x_res, x_feat, lhs_begin=2, lhs_end=3, rhs_begin=2, rhs_end=4)
x_res_list.append(x_res)
if merge_self:
x_res_list.append(x_res_self)
if (merge_type == 'max'):
x_res_cross = x_res_list[0]
for x_res in x_res_list[1:]:
x_res_cross = mx.sym.maximum(x_res_cross, x_res)
elif (merge_type == 'avg'):
x_res_cross = (sum(x_res_list) / len(x_res_list))
x_out_cross = get_embedding_out(x_res_cross, num_filter_out, use_global_stats, lr_mult, x_out_self)
return (x_out_self, x_out_cross)
|
def Convolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None, **kwargs):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('conv') if (name is None) else name)
stride = (((1,) * len(kernel)) if (stride is None) else stride)
dilate = (((1,) * len(kernel)) if (dilate is None) else dilate)
if isinstance(pad, str):
input_size = kwargs.get('input_size', None)
if (input_size is None):
raise ValueError('`input_size` is needed for padding')
del kwargs['input_size']
if isinstance(input_size, int):
in_size_h = in_size_w = input_size
else:
(in_size_h, in_size_w) = input_size
(ph0, ph1) = padding_helper(in_size_h, kernel[0], stride[0], pad)
(pw0, pw1) = padding_helper(in_size_w, kernel[1], stride[1], pad)
data = mx.sym.pad(data, mode='constant', pad_width=(0, 0, 0, 0, ph0, ph1, pw0, pw1))
pad = ((0,) * len(kernel))
else:
pad = (((0,) * len(kernel)) if (pad is None) else pad)
assert (len(kwargs) == 0), sorted(kwargs)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W)
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B)
return x
|
def Deconvolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, adj=None, target_shape=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('deconv') if (name is None) else name)
stride = (((1,) * len(kernel)) if (stride is None) else stride)
dilate = (((1,) * len(kernel)) if (dilate is None) else dilate)
pad = (((0,) * len(kernel)) if (pad is None) else pad)
adj = (((0,) * len(kernel)) if (adj is None) else adj)
target_shape = (tuple([]) if (target_shape is None) else target_shape)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W)
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B)
return x
|
def FullyConnected(data, num_hidden, flatten=True, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('fc') if (name is None) else name)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, name=(name if (reuse is None) else None))
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, bias=B, name=(name if (reuse is None) else None))
return x
|
def Relu(data, name=None):
name = (GetLayerName.get('relu') if (name is None) else name)
x = mx.sym.Activation(data, act_type='relu', name=name)
return x
|
def LeakyRelu(data, slope=0.25, name=None):
name = (GetLayerName.get('leakyRelu') if (name is None) else name)
x = mx.sym.LeakyReLU(data, slope=slope, act_type='leaky', name=name)
return x
|
def Tanh(data, name=None):
name = (GetLayerName.get('tanh') if (name is None) else name)
x = mx.sym.tanh(data, name=name)
return x
|
def Swish(data, name=None):
name = (GetLayerName.get('swish') if (name is None) else name)
x = (data * mx.sym.sigmoid(data))
return x
|
def Pooling(data, kernel, stride=None, pad=None, pool_type='max', global_pool=False, name=None):
name = (GetLayerName.get('pool') if (name is None) else name)
stride = (kernel if (stride is None) else stride)
pad = (((0,) * len(kernel)) if (pad is None) else pad)
x = mx.sym.Pooling(data, kernel=kernel, stride=stride, pad=pad, pool_type=pool_type, global_pool=global_pool, name=name)
return x
|
def Dropout(data, p, name=None):
name = (GetLayerName.get('drop') if (name is None) else name)
x = mx.sym.Dropout(data, p=p, name=name)
return x
|
def BatchNorm(data, fix_gamma=False, momentum=0.9, eps=1e-05, use_global_stats=False, gamma=None, beta=None, moving_mean=None, moving_var=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('bn') if (name is None) else name)
gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma)
beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta)
moving_mean = (get_variable((name + '_moving_mean'), 1, reuse) if (moving_mean is None) else moving_mean)
moving_var = (get_variable((name + '_moving_var'), 1, reuse) if (moving_var is None) else moving_var)
x = mx.sym.BatchNorm(data, fix_gamma=fix_gamma, momentum=momentum, eps=eps, use_global_stats=use_global_stats, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, name=(name if (reuse is None) else None))
return x
|
def InstanceNorm(data, eps=1e-05, gamma=None, beta=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('in') if (name is None) else name)
gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma)
beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta)
x = mx.sym.InstanceNorm(data, eps=eps, gamma=gamma, beta=beta, name=(name if (reuse is None) else None))
return x
|
def Flatten(data, name=None):
name = (GetLayerName.get('flatten') if (name is None) else name)
x = mx.sym.flatten(data, name=name)
return x
|
def ConvRelu(*args, **kwargs):
x = Conv(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def BNRelu(*args, **kwargs):
x = BN(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def FCRelu(*args, **kwargs):
x = FC(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def ConvBNRelu(*args, **kwargs):
x = Conv(*args, **kwargs)
x = BN(x, name=(x.name + '_bn'), lr_mult=kwargs.get('lr_mult', 1), reuse=kwargs.get('reuse', None))
x = Relu(x, (x.name + '_relu'))
return x
|
def get_variable(name, lr_mult=1, reuse=None):
if (reuse is None):
return mx.sym.Variable(name, lr_mult=lr_mult)
return reuse.get_internals()[name]
|
class GetLayerName(object):
_name_count = {}
@classmethod
def get(cls, name_prefix):
cnt = cls._name_count.get(name_prefix, 0)
cls._name_count[name_prefix] = (cnt + 1)
return (name_prefix + str(cnt))
|
def padding_helper(in_size, kernel_size, stride, pad_type='same'):
pad_type = pad_type.lower()
if (pad_type == 'same'):
out_size = ((in_size // stride) + int(((in_size % stride) > 0)))
pad_size = max(((((out_size - 1) * stride) + kernel_size) - in_size), 0)
return ((pad_size // 2), (pad_size - (pad_size // 2)))
else:
raise ValueError(pad_type)
|
class OpConstant(mx.operator.CustomOp):
def __init__(self, val):
self.val = val
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
|
@mx.operator.register('Constant')
class OpConstantProp(mx.operator.CustomOpProp):
def __init__(self, val_str, shape_str, type_str='float32'):
super(OpConstantProp, self).__init__(need_top_grad=False)
val = [float(x) for x in val_str.split(',')]
shape = [int(x) for x in shape_str.split(',')]
self.val = mx.nd.array(val, dtype=type_str).reshape(shape)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [self.val.shape], [])
def infer_type(self, in_type):
return (in_type, [self.val.dtype], [])
def create_operator(self, ctx, shapes, dtypes):
return OpConstant(self.val.as_in_context(ctx))
|
def CustomConstantEncoder(value, dtype='float32'):
if (not isinstance(value, np.ndarray)):
if (not isinstance(value, (list, tuple))):
value = [value]
value = np.array(value, dtype=dtype)
return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
|
def Constant(value, dtype='float32'):
assert isinstance(dtype, str), dtype
(val, shape) = CustomConstantEncoder(value, dtype)
return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.