combi2k2 commited on
Commit
f0f242a
·
1 Parent(s): 37eb052

Add my custom Trainer for Question Answering Problem

Browse files
Files changed (1) hide show
  1. trainer_qa.py +75 -0
trainer_qa.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Trainer, is_torch_tpu_available
2
+ from transformers.trainer_utils import PredictionOutput
3
+
4
+ class QuestionAnsweringTrainer(Trainer):
5
+ def __init__(self, *args, post_process_function = None, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+ self.post_process_function = post_process_function
8
+
9
+ def evaluate(self, eval_dataset = None, ignore_keys = None, metric_key_prefix: str = "eval"):
10
+ eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
11
+ eval_dataloader = self.get_eval_dataloader(eval_dataset)
12
+
13
+ # Temporarily disable metric computation, we will do it in the loop here.
14
+ compute_metrics = self.compute_metrics
15
+ self.compute_metrics = None
16
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
17
+ try:
18
+ output = eval_loop(
19
+ eval_dataloader,
20
+ description="Evaluation",
21
+ # No point gathering the predictions if there are no metrics, otherwise we defer to
22
+ # self.args.prediction_loss_only
23
+ prediction_loss_only=True if compute_metrics is None else None,
24
+ ignore_keys=ignore_keys,
25
+ )
26
+ finally:
27
+ self.compute_metrics = compute_metrics
28
+
29
+ if self.post_process_function is not None and self.compute_metrics is not None:
30
+ eval_preds = self.post_process_function(eval_dataset, self.tokenizer, output.predictions)
31
+ metrics = self.compute_metrics(eval_preds)
32
+
33
+ # Prefix all keys with metric_key_prefix + '_'
34
+ for key in list(metrics.keys()):
35
+ if not key.startswith(f"{metric_key_prefix}_"):
36
+ metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
37
+
38
+ self.log(metrics)
39
+ else:
40
+ metrics = {}
41
+
42
+ self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
43
+ return metrics
44
+
45
+ def predict(self, predict_dataset, ignore_keys=None, metric_key_prefix: str = "test"):
46
+ predict_dataloader = self.get_test_dataloader(predict_dataset)
47
+
48
+ # Temporarily disable metric computation, we will do it in the loop here.
49
+ compute_metrics = self.compute_metrics
50
+ self.compute_metrics = None
51
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
52
+ try:
53
+ output = eval_loop(
54
+ predict_dataloader,
55
+ description="Prediction",
56
+ # No point gathering the predictions if there are no metrics, otherwise we defer to
57
+ # self.args.prediction_loss_only
58
+ prediction_loss_only=True if compute_metrics is None else None,
59
+ ignore_keys=ignore_keys,
60
+ )
61
+ finally:
62
+ self.compute_metrics = compute_metrics
63
+
64
+ if self.post_process_function is None or self.compute_metrics is None:
65
+ return output
66
+
67
+ predictions = self.post_process_function(predict_dataset, self.tokenizer, output.predictions, "predict")
68
+ metrics = self.compute_metrics(predictions)
69
+
70
+ # Prefix all keys with metric_key_prefix + '_'
71
+ for key in list(metrics.keys()):
72
+ if not key.startswith(f"{metric_key_prefix}_"):
73
+ metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
74
+
75
+ return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)