| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| A subclass of `Trainer` specific to Question-Answering tasks |
| """ |
|
|
| import math |
| import time |
|
|
| from torch.utils.data import Dataset |
|
|
| from transformers import Seq2SeqTrainer, is_torch_xla_available |
| from transformers.trainer_utils import PredictionOutput, speed_metrics |
|
|
|
|
| if is_torch_xla_available(): |
| import torch_xla.core.xla_model as xm |
| import torch_xla.debug.metrics as met |
|
|
|
|
| class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer): |
| def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): |
| super().__init__(*args, **kwargs) |
| self.eval_examples = eval_examples |
| self.post_process_function = post_process_function |
|
|
| |
| def evaluate( |
| self, |
| eval_dataset: Dataset | None = None, |
| eval_examples=None, |
| ignore_keys: list[str] | None = None, |
| metric_key_prefix: str = "eval", |
| **gen_kwargs, |
| ) -> dict[str, float]: |
| gen_kwargs = gen_kwargs.copy() |
|
|
| |
| |
| if gen_kwargs.get("max_length") is None and self.args.generation_max_length is not None: |
| gen_kwargs["max_length"] = self.args.generation_max_length |
| if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: |
| gen_kwargs["num_beams"] = self.args.generation_num_beams |
| self._gen_kwargs = gen_kwargs |
|
|
| eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset |
| eval_dataloader = self.get_eval_dataloader(eval_dataset) |
| eval_examples = self.eval_examples if eval_examples is None else eval_examples |
|
|
| |
| compute_metrics = self.compute_metrics |
| self.compute_metrics = None |
| start_time = time.time() |
| try: |
| output = self.evaluation_loop( |
| eval_dataloader, |
| description="Evaluation", |
| |
| |
| prediction_loss_only=True if compute_metrics is None else None, |
| ignore_keys=ignore_keys, |
| metric_key_prefix=metric_key_prefix, |
| ) |
| finally: |
| self.compute_metrics = compute_metrics |
| total_batch_size = self.args.eval_batch_size * self.args.world_size |
| output.metrics.update( |
| speed_metrics( |
| metric_key_prefix, |
| start_time, |
| num_samples=output.num_samples, |
| num_steps=math.ceil(output.num_samples / total_batch_size), |
| ) |
| ) |
|
|
| if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: |
| |
| eval_preds = self.post_process_function(eval_examples, eval_dataset, output) |
| metrics = self.compute_metrics(eval_preds) |
|
|
| |
| for key in list(metrics.keys()): |
| if not key.startswith(f"{metric_key_prefix}_"): |
| metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) |
|
|
| metrics.update(output.metrics) |
| else: |
| metrics = output.metrics |
|
|
| if self.args.should_log: |
| |
| self.log(metrics) |
|
|
| if self.args.debug: |
| |
| xm.master_print(met.metrics_report()) |
|
|
| self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) |
| return metrics |
|
|
| def predict( |
| self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test", **gen_kwargs |
| ): |
| self._gen_kwargs = gen_kwargs.copy() |
|
|
| predict_dataloader = self.get_test_dataloader(predict_dataset) |
|
|
| |
| compute_metrics = self.compute_metrics |
| self.compute_metrics = None |
| start_time = time.time() |
| try: |
| output = self.evaluation_loop( |
| predict_dataloader, |
| description="Prediction", |
| |
| |
| prediction_loss_only=True if compute_metrics is None else None, |
| ignore_keys=ignore_keys, |
| metric_key_prefix=metric_key_prefix, |
| ) |
| finally: |
| self.compute_metrics = compute_metrics |
|
|
| total_batch_size = self.args.eval_batch_size * self.args.world_size |
| output.metrics.update( |
| speed_metrics( |
| metric_key_prefix, |
| start_time, |
| num_samples=output.num_samples, |
| num_steps=math.ceil(output.num_samples / total_batch_size), |
| ) |
| ) |
| if self.post_process_function is None or self.compute_metrics is None: |
| return output |
|
|
| predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict") |
| metrics = self.compute_metrics(predictions) |
|
|
| |
| for key in list(metrics.keys()): |
| if not key.startswith(f"{metric_key_prefix}_"): |
| metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) |
| metrics.update(output.metrics) |
| return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) |
|
|