theachyuttiwari commited on
Commit
5bb64ce
·
1 Parent(s): 80a1730

Upload run_retriever_no_trainer.py

Browse files
Files changed (1) hide show
  1. run_retriever_no_trainer.py +381 -0
run_retriever_no_trainer.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import functools
3
+ import logging
4
+ import math
5
+ from random import choice, randint
6
+
7
+ import torch
8
+ from accelerate import Accelerator
9
+ from accelerate.utils import set_seed
10
+ from datasets import load_dataset
11
+ from torch.utils import checkpoint
12
+ from torch.utils.data import Dataset, RandomSampler, DataLoader, SequentialSampler
13
+ from tqdm.auto import tqdm
14
+ from transformers import get_scheduler, AutoTokenizer, AdamW, SchedulerType, AutoModelForSequenceClassification
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def get_parser():
20
+ parser = argparse.ArgumentParser(description="Train ELI5 retriever")
21
+ parser.add_argument(
22
+ "--dataset_name",
23
+ type=str,
24
+ default="vblagoje/lfqa",
25
+ help="The name of the dataset to use (via the datasets library).",
26
+ )
27
+
28
+ parser.add_argument(
29
+ "--per_device_train_batch_size",
30
+ type=int,
31
+ default=1024,
32
+ )
33
+
34
+ parser.add_argument(
35
+ "--per_device_eval_batch_size",
36
+ type=int,
37
+ default=1024,
38
+ help="Batch size (per device) for the evaluation dataloader.",
39
+ )
40
+
41
+ parser.add_argument(
42
+ "--max_length",
43
+ type=int,
44
+ default=128,
45
+ )
46
+
47
+ parser.add_argument(
48
+ "--checkpoint_batch_size",
49
+ type=int,
50
+ default=32,
51
+ )
52
+
53
+ parser.add_argument(
54
+ "--pretrained_model_name",
55
+ type=str,
56
+ default="google/bert_uncased_L-8_H-768_A-12",
57
+ )
58
+
59
+ parser.add_argument(
60
+ "--model_save_name",
61
+ type=str,
62
+ default="eli5_retriever_model_l-12_h-768_b-512-512",
63
+ )
64
+
65
+ parser.add_argument(
66
+ "--learning_rate",
67
+ type=float,
68
+ default=2e-4,
69
+ )
70
+
71
+ parser.add_argument(
72
+ "--weight_decay",
73
+ type=float,
74
+ default=0.2,
75
+ )
76
+
77
+ parser.add_argument(
78
+ "--log_freq",
79
+ type=int,
80
+ default=500,
81
+ help="Log train/validation loss every log_freq update steps"
82
+ )
83
+
84
+ parser.add_argument(
85
+ "--num_train_epochs",
86
+ type=int,
87
+ default=4,
88
+ )
89
+
90
+ parser.add_argument(
91
+ "--max_train_steps",
92
+ type=int,
93
+ default=None,
94
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
95
+ )
96
+
97
+ parser.add_argument(
98
+ "--gradient_accumulation_steps",
99
+ type=int,
100
+ default=1,
101
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
102
+ )
103
+
104
+ parser.add_argument(
105
+ "--lr_scheduler_type",
106
+ type=SchedulerType,
107
+ default="linear", # this is linear with warmup
108
+ help="The scheduler type to use.",
109
+ choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
110
+ )
111
+
112
+ parser.add_argument(
113
+ "--num_warmup_steps",
114
+ type=int,
115
+ default=100,
116
+ help="Number of steps for the warmup in the lr scheduler."
117
+ )
118
+
119
+ parser.add_argument(
120
+ "--warmup_percentage",
121
+ type=float,
122
+ default=0.08,
123
+ help="Number of steps for the warmup in the lr scheduler."
124
+ )
125
+ return parser
126
+
127
+
128
+ class RetrievalQAEmbedder(torch.nn.Module):
129
+ def __init__(self, sent_encoder):
130
+ super(RetrievalQAEmbedder, self).__init__()
131
+ dim = sent_encoder.config.hidden_size
132
+ self.bert_query = sent_encoder
133
+ self.output_dim = 128
134
+ self.project_query = torch.nn.Linear(dim, self.output_dim, bias=False)
135
+ self.project_doc = torch.nn.Linear(dim, self.output_dim, bias=False)
136
+ self.ce_loss = torch.nn.CrossEntropyLoss(reduction="mean")
137
+
138
+ def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1):
139
+ # reproduces BERT forward pass with checkpointing
140
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
141
+ return self.bert_query(input_ids, attention_mask=attention_mask)[1]
142
+ else:
143
+ # prepare implicit variables
144
+ device = input_ids.device
145
+ input_shape = input_ids.size()
146
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
147
+ head_mask = [None] * self.bert_query.config.num_hidden_layers
148
+ extended_attention_mask: torch.Tensor = self.bert_query.get_extended_attention_mask(
149
+ attention_mask, input_shape, device
150
+ )
151
+
152
+ # define function for checkpointing
153
+ def partial_encode(*inputs):
154
+ encoder_outputs = self.bert_query.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask, )
155
+ sequence_output = encoder_outputs[0]
156
+ pooled_output = self.bert_query.pooler(sequence_output)
157
+ return pooled_output
158
+
159
+ # run embedding layer on everything at once
160
+ embedding_output = self.bert_query.embeddings(
161
+ input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
162
+ )
163
+ # run encoding and pooling on one mini-batch at a time
164
+ pooled_output_list = []
165
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
166
+ b_embedding_output = embedding_output[b * checkpoint_batch_size: (b + 1) * checkpoint_batch_size]
167
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size: (b + 1) * checkpoint_batch_size]
168
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
169
+ pooled_output_list.append(pooled_output)
170
+ return torch.cat(pooled_output_list, dim=0)
171
+
172
+ def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1):
173
+ q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size)
174
+ return self.project_query(q_reps)
175
+
176
+ def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1):
177
+ a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size)
178
+ return self.project_doc(a_reps)
179
+
180
+ def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1):
181
+ device = q_ids.device
182
+ q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size)
183
+ a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size)
184
+ compare_scores = torch.mm(q_reps, a_reps.t())
185
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
186
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
187
+ loss = (loss_qa + loss_aq) / 2
188
+ return loss
189
+
190
+
191
+ class ELI5DatasetQARetriever(Dataset):
192
+ def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None):
193
+ self.data = examples_array
194
+ self.answer_thres = extra_answer_threshold
195
+ self.min_length = min_answer_length
196
+ self.training = training
197
+ self.n_samples = self.data.num_rows if n_samples is None else n_samples
198
+
199
+ def __len__(self):
200
+ return self.n_samples
201
+
202
+ def make_example(self, idx):
203
+ example = self.data[idx]
204
+ question = example["title"]
205
+ if self.training:
206
+ answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))]
207
+ answer_tab = choice(answers).split(" ")
208
+ start_idx = randint(0, max(0, len(answer_tab) - self.min_length))
209
+ answer_span = " ".join(answer_tab[start_idx:])
210
+ else:
211
+ answer_span = example["answers"]["text"][0]
212
+ return question, answer_span
213
+
214
+ def __getitem__(self, idx):
215
+ return self.make_example(idx % self.data.num_rows)
216
+
217
+
218
+ def make_qa_retriever_batch(qa_list, tokenizer, max_len=64):
219
+ q_ls = [q for q, a in qa_list]
220
+ a_ls = [a for q, a in qa_list]
221
+ q_toks = tokenizer(q_ls, padding="max_length", max_length=max_len, truncation=True)
222
+ q_ids, q_mask = (
223
+ torch.LongTensor(q_toks["input_ids"]),
224
+ torch.LongTensor(q_toks["attention_mask"])
225
+ )
226
+ a_toks = tokenizer(a_ls, padding="max_length", max_length=max_len, truncation=True)
227
+ a_ids, a_mask = (
228
+ torch.LongTensor(a_toks["input_ids"]),
229
+ torch.LongTensor(a_toks["attention_mask"]),
230
+ )
231
+ return q_ids, q_mask, a_ids, a_mask
232
+
233
+
234
+ def evaluate_qa_retriever(model, data_loader):
235
+ # make iterator
236
+ epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True)
237
+ tot_loss = 0.0
238
+ with torch.no_grad():
239
+ for step, batch in enumerate(epoch_iterator):
240
+ q_ids, q_mask, a_ids, a_mask = batch
241
+ loss = model(q_ids, q_mask, a_ids, a_mask)
242
+ tot_loss += loss.item()
243
+ return tot_loss / (step + 1)
244
+
245
+
246
+ def train(config):
247
+ set_seed(42)
248
+ args = config["args"]
249
+ data_files = {"train": "train.json", "validation": "validation.json", "test": "test.json"}
250
+ eli5 = load_dataset(args.dataset_name, data_files=data_files)
251
+
252
+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
253
+ accelerator = Accelerator()
254
+ # Make one log on every process with the configuration for debugging.
255
+ logging.basicConfig(
256
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
257
+ datefmt="%m/%d/%Y %H:%M:%S",
258
+ level=logging.INFO,
259
+ )
260
+ logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
261
+ logger.info(accelerator.state)
262
+
263
+ # prepare torch Dataset objects
264
+ train_dataset = ELI5DatasetQARetriever(eli5['train'], training=True)
265
+ valid_dataset = ELI5DatasetQARetriever(eli5['validation'], training=False)
266
+
267
+ tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name)
268
+ base_model = AutoModel.from_pretrained(args.pretrained_model_name)
269
+
270
+ model = RetrievalQAEmbedder(base_model)
271
+ no_decay = ['bias', 'LayerNorm.weight']
272
+ optimizer_grouped_parameters = [
273
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
274
+ 'weight_decay': args.weight_decay},
275
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
276
+ ]
277
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, weight_decay=args.weight_decay)
278
+
279
+ model_collate_fn = functools.partial(make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length)
280
+ train_dataloader = DataLoader(train_dataset, batch_size=args.per_device_train_batch_size,
281
+ sampler=RandomSampler(train_dataset), collate_fn=model_collate_fn)
282
+
283
+ model_collate_fn = functools.partial(make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length)
284
+ eval_dataloader = DataLoader(valid_dataset, batch_size=args.per_device_eval_batch_size,
285
+ sampler=SequentialSampler(valid_dataset), collate_fn=model_collate_fn)
286
+
287
+ # train the model
288
+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(model, optimizer,
289
+ train_dataloader, eval_dataloader)
290
+ # Scheduler and math around the number of training steps.
291
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
292
+ if args.max_train_steps is None:
293
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
294
+ else:
295
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
296
+
297
+ num_warmup_steps = args.num_warmup_steps if args.num_warmup_steps else math.ceil(args.max_train_steps *
298
+ args.warmup_percentage)
299
+ scheduler = get_scheduler(
300
+ name=args.lr_scheduler_type,
301
+ optimizer=optimizer,
302
+ num_warmup_steps=args.num_warmup_steps,
303
+ num_training_steps=args.max_train_steps,
304
+ )
305
+
306
+ # Train!
307
+ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
308
+
309
+ logger.info("***** Running training *****")
310
+ logger.info(f" Num examples = {len(train_dataset)}")
311
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
312
+ logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
313
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
314
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
315
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
316
+ logger.info(f" Warmup steps = {num_warmup_steps}")
317
+ logger.info(f" Logging training progress every {args.log_freq} optimization steps")
318
+
319
+ loc_loss = 0.0
320
+ current_loss = 0.0
321
+ checkpoint_step = 0
322
+
323
+ completed_steps = checkpoint_step
324
+ progress_bar = tqdm(range(args.max_train_steps), initial=checkpoint_step,
325
+ disable=not accelerator.is_local_main_process)
326
+ for epoch in range(args.num_train_epochs):
327
+ model.train()
328
+ batch = next(iter(train_dataloader))
329
+ for step in range(1000):
330
+ #for step, batch in enumerate(train_dataloader, start=checkpoint_step):
331
+ # model inputs
332
+ q_ids, q_mask, a_ids, a_mask = batch
333
+ pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
334
+ loss = pre_loss.sum() / args.gradient_accumulation_steps
335
+ accelerator.backward(loss)
336
+ loc_loss += loss.item()
337
+ if ((step + 1) % args.gradient_accumulation_steps == 0) or (step + 1 == len(train_dataloader)):
338
+ current_loss = loc_loss
339
+ optimizer.step()
340
+ scheduler.step()
341
+ optimizer.zero_grad()
342
+ progress_bar.update(1)
343
+ progress_bar.set_postfix(loss=loc_loss)
344
+ loc_loss = 0
345
+ completed_steps += 1
346
+
347
+ if step % (args.log_freq * args.gradient_accumulation_steps) == 0:
348
+ accelerator.wait_for_everyone()
349
+ unwrapped_model = accelerator.unwrap_model(model)
350
+ eval_loss = evaluate_qa_retriever(unwrapped_model, eval_dataloader)
351
+ logger.info(f"Train loss {current_loss} , eval loss {eval_loss}")
352
+ if args.wandb and accelerator.is_local_main_process:
353
+ import wandb
354
+ wandb.log({"loss": current_loss, "eval_loss": eval_loss, "step": completed_steps})
355
+
356
+ if completed_steps >= args.max_train_steps:
357
+ break
358
+
359
+ logger.info("Saving model {}".format(args.model_save_name))
360
+ accelerator.wait_for_everyone()
361
+ unwrapped_model = accelerator.unwrap_model(model)
362
+ accelerator.save(unwrapped_model.state_dict(), "{}_{}.bin".format(args.model_save_name, epoch))
363
+ eval_loss = evaluate_qa_retriever(unwrapped_model, eval_dataloader)
364
+ logger.info("Evaluation loss epoch {:4d}: {:.3f}".format(epoch, eval_loss))
365
+
366
+
367
+ if __name__ == "__main__":
368
+ parser = get_parser()
369
+ parser.add_argument(
370
+ "--wandb",
371
+ action="store_true",
372
+ help="Whether to use W&B logging",
373
+ )
374
+ main_args, _ = parser.parse_known_args()
375
+ config = {"args": main_args}
376
+ if main_args.wandb:
377
+ import wandb
378
+ wandb.init(project="Retriever")
379
+
380
+ train(config=config)
381
+