File size: 1,943 Bytes
2742ed8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | # Copyright (c) Alibaba, Inc. and its affiliates.
from typing import TYPE_CHECKING
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import (EvaluationStrategy, FSDPOption, HPSearchBackend, HubStrategy, IntervalStrategy,
SchedulerType)
from swift.utils.import_utils import _LazyModule
from . import callback
try:
# https://github.com/huggingface/transformers/pull/25702
from transformers.trainer_utils import ShardedDDPOption
except ImportError:
ShardedDDPOption = None
if TYPE_CHECKING:
from .arguments import Seq2SeqTrainingArguments, TrainingArguments
from .rlhf_trainer import (CPOTrainer, DPOTrainer, KTOTrainer, ORPOTrainer, RLHFTrainerMixin, PPOTrainer,
RewardTrainer, GRPOTrainer)
from .rlhf_arguments import DPOConfig, CPOConfig, KTOConfig, ORPOConfig, PPOConfig, RewardConfig
from .trainer_factory import TrainerFactory
from .trainers import Seq2SeqTrainer, Trainer, EmbeddingTrainer
from .mixin import SwiftMixin
else:
_extra_objects = {k: v for k, v in globals().items() if not k.startswith('_')}
_import_structure = {
'arguments': ['Seq2SeqTrainingArguments', 'TrainingArguments'],
'rlhf_arguments':
['DPOConfig', 'CPOConfig', 'KTOConfig', 'ORPOConfig', 'PPOConfig', 'RewardConfig', 'GRPOConfig'],
'rlhf_trainer': [
'CPOTrainer', 'DPOTrainer', 'KTOTrainer', 'ORPOTrainer', 'RLHFTrainerMixin', 'PPOTrainer', 'RewardTrainer',
'GRPOTrainer'
],
'trainer_factory': ['TrainerFactory'],
'trainers': ['Seq2SeqTrainer', 'Trainer', 'EmbeddingTrainer'],
'mixin': ['SwiftMixin'],
}
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
module_spec=__spec__,
extra_objects=_extra_objects,
)
|