| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from types import MethodType |
| from typing import TYPE_CHECKING, Optional |
|
|
| import torch |
| from transformers import Trainer |
| from typing_extensions import override |
|
|
| from ...extras.packages import is_transformers_version_greater_than |
| from ..callbacks import SaveProcessorCallback |
| from ..trainer_utils import create_custom_optimizer, create_custom_scheduler |
|
|
|
|
| if TYPE_CHECKING: |
| from transformers import ProcessorMixin |
|
|
| from ...hparams import FinetuningArguments |
|
|
|
|
| class CustomTrainer(Trainer): |
| r"""Inherit Trainer for custom optimizer.""" |
|
|
| def __init__( |
| self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs |
| ) -> None: |
| if is_transformers_version_greater_than("4.46"): |
| kwargs["processing_class"] = kwargs.pop("tokenizer") |
|
|
| super().__init__(**kwargs) |
| if processor is not None: |
| |
| |
| self.model_accepts_loss_kwargs = False |
|
|
| self.finetuning_args = finetuning_args |
|
|
| if processor is not None: |
| self.add_callback(SaveProcessorCallback(processor)) |
|
|
| if finetuning_args.use_badam: |
| from badam import BAdamCallback, clip_grad_norm_old_version |
|
|
| self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator) |
| self.add_callback(BAdamCallback) |
|
|
| @override |
| def create_optimizer(self) -> "torch.optim.Optimizer": |
| if self.optimizer is None: |
| self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args) |
| return super().create_optimizer() |
|
|
| @override |
| def create_scheduler( |
| self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None |
| ) -> "torch.optim.lr_scheduler.LRScheduler": |
| create_custom_scheduler(self.args, num_training_steps, optimizer) |
| return super().create_scheduler(num_training_steps, optimizer) |
|
|
| @override |
| def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]: |
| if self.finetuning_args.disable_shuffling: |
| return torch.utils.data.SequentialSampler(self.train_dataset) |
|
|
| return super()._get_train_sampler(*args, **kwargs) |
|
|
| @override |
| def compute_loss(self, model, inputs, *args, **kwargs): |
| return super().compute_loss(model, inputs, *args, **kwargs) |
|
|