dianecy commited on
Commit
4b408c6
·
verified ·
1 Parent(s): 0bc00f1

Upload ./ASDA/train_gref_selffilter.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ASDA/train_gref_selffilter.py +293 -0
ASDA/train_gref_selffilter.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import argparse
4
+ import random
5
+ import datetime
6
+ import matplotlib as mpl
7
+ mpl.use('Agg')
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn.parallel
12
+ import torch.backends.cudnn as cudnn
13
+ import torch.distributed as dist
14
+ import torch.optim
15
+ import torch.utils.data.distributed
16
+ from torch.utils.data import DataLoader
17
+ from torchvision.transforms import Compose, ToTensor, Normalize
18
+
19
+ import torch.distributed as dist
20
+ import torch.multiprocessing as mp
21
+ from torch.nn.parallel import DistributedDataParallel as DDP
22
+ import torch.utils.data.distributed
23
+
24
+ from tensorboardX import SummaryWriter
25
+
26
+ #import apex.amp as amp
27
+ from torch.cuda.amp import autocast as autocast, GradScaler
28
+
29
+ from model.model_ca_fuser import Model_CAFilter
30
+ from model.model_ca_fuser import *
31
+ from engine.engine_gref_selffilter import *
32
+
33
+ from dataset.data_loader_gref_sbert import *
34
+ from utils.losses import *
35
+ from utils.parsing_metrics import *
36
+ from utils.utils import *
37
+ from utils.checkpoint import save_checkpoint, load_pretrain, load_resume
38
+ from utils.logger import setup_logger
39
+
40
+ def get_args():
41
+ parser = argparse.ArgumentParser(description='Dataloader test')
42
+ parser.add_argument('--gpu', default='2', help='gpu id')
43
+ parser.add_argument('--ngpu', default=2, type=int, help='gpu num')
44
+ parser.add_argument('--workers', default=4, type=int, help='num workers for data loading')
45
+ parser.add_argument('--seed', default=0, type=int, help='random seed')
46
+
47
+ parser.add_argument('--clip_model', default='ViT-B/16', type=str, help='clip model RN50 RN101 ViT-B/32')
48
+ parser.add_argument('--nb_epoch', default=32, type=int, help='training epoch')
49
+ parser.add_argument('--lr', default=0.000025, type=float, help='batch size 16 learning rate')
50
+ parser.add_argument('--power', default=0.1, type=float, help='lr poly power')
51
+ parser.add_argument('--steps', default=[18, 28], type=list, help='in which step lr decay by power')
52
+ parser.add_argument('--batch_size', default=16, type=int, help='batch size')
53
+ parser.add_argument('--size', default=416, type=int, help='image size')
54
+ parser.add_argument('--dataset', default='grefcoco', type=str,
55
+ help='refcoco/refcoco+/refcocog/grefcoco')
56
+
57
+ parser.add_argument('--splitBy', default='umd', type=str,
58
+ help='unc/umd/google')
59
+
60
+ parser.add_argument('--num_query', default=16, type=int, help='the number of query')
61
+ parser.add_argument('--w_seg', default=0.1, type=float, help='weight of the seg loss')
62
+ parser.add_argument('--w_coord', default=5, type=float, help='weight of the reg loss')
63
+ parser.add_argument('--tunelang', dest='tunelang', default=True, action='store_true', help='if finetune language model')
64
+ parser.add_argument('--anchor_imsize', default=416, type=int,
65
+ help='scale used to calculate anchors defined in model cfg file')
66
+ parser.add_argument('--data_root', type=str, default='./ln_data',
67
+ help='path to ReferIt splits data folder')
68
+ parser.add_argument('--split_root', type=str, default='./data',
69
+ help='location of pre-parsed dataset info')
70
+ parser.add_argument('--time', default=17, type=int,
71
+ help='maximum time steps (lang length) per batch')
72
+ parser.add_argument('--log_dir', type=str, default='./logs',
73
+ help='path to ReferIt splits data folder')
74
+
75
+ parser.add_argument('--fusion_dim', default=768, type=int,
76
+ help='fusion module embedding dimensions')
77
+ parser.add_argument('--resume', default='', type=str, metavar='PATH',
78
+ help='path to latest checkpoint (default: none)')
79
+ parser.add_argument('--pretrain', default='', type=str, metavar='PATH',
80
+ help='pretrain support load state_dict that are not identical, while have no loss saved as resume')
81
+ parser.add_argument('--print_freq', '-p', default=100, type=int,
82
+ metavar='N', help='print frequency (default: 1e3)')
83
+ parser.add_argument('--savename', default='default', type=str, help='Name head for saved model')
84
+
85
+ parser.add_argument('--seg_thresh', default=0.35, type=float, help='seg score above this value means foreground')
86
+ parser.add_argument('--seg_out_stride', default=2, type=int, help='the seg out stride')
87
+ parser.add_argument('--best_iou', default=-float('Inf'), type=int, help='the best accu')
88
+
89
+ # metric loss related ones
90
+ parser.add_argument('--use_projections', action='store_true', help='whether to use projections in metric loss')
91
+ parser.add_argument('--metric_learning', action='store_true',help='whether to use metric learning')
92
+ parser.add_argument('--metric_loss_weight', default=0.1, type=float, help='weight for metric loss')
93
+ parser.add_argument('--metric_mode', default='hardpos_rev3', help='test options..')
94
+ # always involve --exclude_pos
95
+ parser.add_argument('--exclude_pos', action='store_true', help='exclude obj ov 2 and position included images')
96
+ parser.add_argument('--exclude_multiobj', action='store_true', help='exclude multi-object images')
97
+ parser.add_argument('--hp_selection', default='strict', help='test options..')
98
+ parser.add_argument('--margin_value', default=10, type=float, help='weight for metric loss')
99
+ parser.add_argument('--temperature', default=0.05, type=float, help='test options..')
100
+ parser.add_argument('--filter_thres', default=0.5, type=float, help = 'set sbert similarity threhold!')
101
+ parser.add_argument('--fuse_mode', default='fine')
102
+
103
+ # new arguments for CA feature based self-filtering
104
+ parser.add_argument('--metric_tensor_option', default='use_fuser', help='metric tensor (fusing) options..') # "plain", "use_multiple", "use_fuser"
105
+ parser.add_argument('--self_filter', action='store_true', help='implement self filtering')
106
+ parser.add_argument('--self_fthres', default=0.5, type=float, help = 'set sbert similarity threhold!')
107
+
108
+ # parser.add_argument('--addzero', action='store_true', help='test options..')
109
+ # parser.add_argument('--get_all_verbs',action='store_true', help='test options..')
110
+
111
+ global args, anchors_full, writer, logger
112
+ args = parser.parse_args()
113
+ args.gsize = 32
114
+ args.date = datetime.datetime.now().strftime('%Y%m%d')
115
+ if args.savename=='default':
116
+ args.savename = 'model_v1_%s_batch%d_%s'%(args.dataset, args.batch_size, args.date)
117
+ os.makedirs(args.log_dir, exist_ok=True)
118
+ args.lr = round(args.lr * (args.batch_size * args.ngpu / 16), 6)
119
+ print('----------------------------------------------------------------------')
120
+ print(sys.argv[0])
121
+ print(args)
122
+ print('----------------------------------------------------------------------')
123
+
124
+ return args
125
+
126
+ def main(args):
127
+ os.environ['MASTER_ADDR'] = 'localhost'
128
+ # os.environ['MASTER_PORT'] = '12356'
129
+
130
+ if(torch.cuda.is_available()):
131
+ n_gpus = torch.cuda.device_count()
132
+ print("Running DDP with {} GPUs".format(n_gpus))
133
+ # args_dict = vars(args)
134
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, args,))
135
+ # mp.spawn(run, nprocs=n_gpus, args=(n_gpus, args_dict,))
136
+ else:
137
+ print("Please use GPU for training")
138
+
139
+ def run(rank, n_gpus, args):
140
+ # args = argparse.Namespace(**args_dict)
141
+
142
+ dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
143
+ torch.cuda.set_device(rank)
144
+
145
+ ## fix seed
146
+ cudnn.benchmark = False
147
+ cudnn.deterministic = True
148
+ random.seed(args.seed)
149
+ np.random.seed(args.seed+1)
150
+ torch.manual_seed(args.seed+2)
151
+ torch.cuda.manual_seed_all(args.seed+3)
152
+
153
+ ## save logs
154
+ logger = setup_logger(output=os.path.join(args.log_dir, args.savename), distributed_rank=rank, color=False, name="model-v1")
155
+
156
+ logger.info(str(sys.argv))
157
+ logger.info(str(args))
158
+ if rank == 0:
159
+ writer = SummaryWriter(comment=args.savename)
160
+
161
+ input_transform = Compose([
162
+ ToTensor(),
163
+ Normalize(
164
+ mean=[0.48145466, 0.4578275, 0.40821073],
165
+ std=[0.26862954, 0.26130258, 0.27577711]
166
+ )
167
+ ])
168
+
169
+ train_dataset = ReferDataset(data_root=args.data_root,
170
+ dataset=args.dataset,
171
+ split_root=args.split_root,
172
+ split='train',
173
+ splitby=args.splitBy,
174
+ imsize = args.size,
175
+ transform=input_transform,
176
+ max_query_len=args.time,
177
+ augment=True,
178
+ metric_learning=args.metric_learning)
179
+
180
+
181
+ train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=n_gpus, rank=rank, shuffle=True)
182
+
183
+ train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False,
184
+ pin_memory=True, drop_last=True, num_workers=args.workers, sampler=train_sampler)
185
+
186
+
187
+ if rank == 0:
188
+ val_dataset = ReferDataset(data_root=args.data_root,
189
+ dataset=args.dataset,
190
+ split_root=args.split_root,
191
+ split='val',
192
+ splitby=args.splitBy,
193
+ imsize = args.size,
194
+ transform=input_transform,
195
+ max_query_len=args.time)
196
+
197
+
198
+
199
+ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False,
200
+ pin_memory=True, drop_last=True, num_workers=args.workers)
201
+
202
+
203
+ ## Model
204
+
205
+
206
+ model = Model_CAFilter(clip_model=args.clip_model, tunelang=args.tunelang, num_query=args.num_query, fusion_dim=args.fusion_dim, fuse_mode=args.fuse_mode, metric_tensor_option=args.metric_tensor_option, use_projections=args.use_projections).cuda(rank)
207
+ model = DDP(model, device_ids=[rank], find_unused_parameters=True)
208
+ model_without_ddp = model.module
209
+
210
+ args.start_epoch = 0
211
+ if args.pretrain and os.path.isfile(args.pretrain):
212
+ model=load_pretrain(model,args,logger, rank)
213
+ model.to(rank)
214
+
215
+ visu_param = [param for name, param in model_without_ddp.named_parameters() if 'visumodel' in name]
216
+ text_param = [param for name, param in model_without_ddp.named_parameters() if 'textmodel' in name]
217
+ rest_param = [param for name, param in model_without_ddp.named_parameters() if 'textmodel' not in name and 'visumodel' not in name]
218
+
219
+ sum_visu = sum([param.nelement() for param in visu_param])
220
+ sum_text = sum([param.nelement() for param in text_param])
221
+ sum_fusion = sum([param.nelement() for param in rest_param])
222
+ if rank == 0:
223
+ print('Num of parameters:', sum([param.nelement() for param in model_without_ddp.parameters()]))
224
+ logger.info('Num of parameters:%d'%int(sum([param.nelement() for param in model_without_ddp.parameters()])))
225
+ print('visu, text, fusion module parameters:', sum_visu, sum_text, sum_fusion)
226
+
227
+ ## optimizer; adam default
228
+ if args.tunelang:
229
+ optimizer = torch.optim.Adam([{'params': rest_param, 'lr': args.lr},
230
+ {'params': visu_param, 'lr': args.lr / 10.},
231
+ {'params': text_param, 'lr': args.lr / 10.}])
232
+ else:
233
+ optimizer = torch.optim.Adam([{'params': rest_param},
234
+ {'params': visu_param, 'lr': args.lr / 10.}], lr=args.lr)
235
+
236
+ # Initialization
237
+ scaler = GradScaler()
238
+
239
+ best_miou_seg = -float('Inf')
240
+ best_oiou_seg = -float('Inf')
241
+ best_oiou_filename = args.savename + '_best_oiou'
242
+
243
+ if args.resume:
244
+ model = load_resume(model, optimizer, args, logger, rank)
245
+ model.to(rank)
246
+ best_miou_seg = args.best_iou
247
+ print(best_miou_seg)
248
+
249
+ for epoch in range(args.start_epoch, args.nb_epoch):
250
+ adjust_learning_rate(args, optimizer, epoch)
251
+ loss = train_epoch(rank, args, train_loader, model, optimizer, epoch, scaler, logger)
252
+ if rank == 0:
253
+ writer.add_scalar('loss', loss, global_step=epoch)
254
+ miou_seg = 0
255
+ if epoch == 0 or epoch > 8:
256
+ miou_seg, oiou_seg, prec = validate_epoch(args, val_loader, model, logger, 'Val')
257
+ writer.add_scalar('miou_seg', miou_seg, global_step=epoch)
258
+ writer.add_scalar('oiou_seg', oiou_seg, global_step=epoch)
259
+ thresholds = np.arange(0.5, 1, 0.05)
260
+ for thresh in thresholds:
261
+ writer.add_scalar('prec@%f'%thresh, prec[thresh].avg, global_step=epoch)
262
+
263
+ ## remember best accu and save checkpoint
264
+ is_best = miou_seg > best_miou_seg
265
+ is_best_oiou = oiou_seg > best_oiou_seg
266
+ best_miou_seg= max(miou_seg, best_miou_seg)
267
+ best_oiou_seg = max(oiou_seg, best_oiou_seg)
268
+
269
+ save_checkpoint({
270
+ 'epoch': epoch + 1,
271
+ 'state_dict': model.module.state_dict(),
272
+ 'best_iou': best_miou_seg,
273
+ 'best_oiou' : best_oiou_seg,
274
+ 'optimizer' : optimizer.state_dict(),
275
+ }, is_best, args, filename=args.savename)
276
+
277
+ if is_best_oiou:
278
+ save_checkpoint({
279
+ 'epoch': epoch + 1,
280
+ 'state_dict': model.module.state_dict(),
281
+ 'best_iou': best_miou_seg,
282
+ 'best_oiou': best_oiou_seg,
283
+ 'optimizer': optimizer.state_dict(),
284
+ }, is_best_oiou, args=args, filename=best_oiou_filename)
285
+
286
+
287
+ print('\nBest Accu: %f\n'%best_miou_seg)
288
+ logger.info('\nBest Accu: %f\n'%best_miou_seg)
289
+
290
+
291
+ if __name__ == "__main__":
292
+ args = get_args()
293
+ main(args)