yingyingzhang commited on
Commit
e22faba
·
verified ·
1 Parent(s): a86e25e

Upload 5 files

Browse files
Files changed (5) hide show
  1. my_ppo_config.py +240 -0
  2. my_ppo_trainer_v1.py +716 -0
  3. ppo_trainer.py +1646 -0
  4. step_dpo_ppo.py +344 -0
  5. step_dpo_reward.py +140 -0
my_ppo_config.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import json
15
+ import os
16
+ import sys
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from typing import Literal, Optional
20
+
21
+ import numpy as np
22
+ import tyro
23
+ from transformers import is_wandb_available
24
+ from typing_extensions import Annotated
25
+
26
+ from trl.trainer.utils import exact_div
27
+
28
+ from trl.core import flatten_dict
29
+
30
+
31
+ JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar="JSON", constructor=json.loads)]
32
+
33
+
34
+ @dataclass
35
+ class PPOConfig:
36
+ r"""
37
+ Configuration class for the [`PPOTrainer`].
38
+
39
+ Using [`~transformers.HfArgumentParser`] we can turn this class into
40
+ [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
41
+ command line.
42
+
43
+ Parameters:
44
+ exp_name (`str`, *optional*, defaults to `os.path.basename(__file__)[: -len(".py")]`):
45
+ Name of this experiment.
46
+ seed (`int`, *optional*, defaults to `0`):
47
+ Random seed.
48
+ log_with (`Optional[Literal["wandb", "tensorboard"]]`, *optional*, defaults to `None`):
49
+ Log with either `"wandb"` or `"tensorboard"`. Check
50
+ [tracking](https://huggingface.co/docs/accelerate/usage_guides/tracking) for more details.
51
+ task_name (`Optional[str]`, *optional*, defaults to `None`):
52
+ Name of task to use - used only for tracking purposes.
53
+ model_name (`Optional[str]`, *optional*, defaults to `"gpt2"`):
54
+ Name of model to use - used only for tracking purposes.
55
+ query_dataset (`Optional[str]`, *optional*, defaults to `"stanfordnlp/imdb"`):
56
+ Name of dataset to query - used only for tracking purposes.
57
+ reward_model (`Optional[str]`, *optional*, defaults to `"sentiment-analysis:lvwerra/distilbert-imdb"`):
58
+ Reward model to use - used only for tracking purposes.
59
+ remove_unused_columns (`bool`, *optional*, defaults to `True`):
60
+ Remove unused columns from the dataset.
61
+ tracker_kwargs (`JSONDict`, *optional*, defaults to `{}`):
62
+ Keyword arguments for the tracker (e.g. `python ppo.py --tracker_kwargs='{"wandb": {"entity": "my_wandb_entity", "name": "my_exp_name"}}'`.
63
+ accelerator_kwargs (`JSONDict`, *optional*, defaults to `{}`):
64
+ Keyword arguments for the accelerator.
65
+ project_kwargs (`JSONDict`, *optional*, defaults to `{}`):
66
+ Keyword arguments for the accelerator project config (e.g. `logging_dir`).
67
+ tracker_project_name (`str`, *optional*, defaults to `"trl"`):
68
+ Name of project to use for tracking.
69
+ push_to_hub_if_best_kwargs (`JSONDict`, *optional*, defaults to `{}`):
70
+ Keyword arguments for pushing model to the hub during training (e.g. repo_id).
71
+ steps (`int`, *optional*, defaults to `20000`):
72
+ Number of training steps.
73
+ learning_rate (`float`, *optional*, defaults to `1.41e-5`):
74
+ Learning rate for the optimizer.
75
+ adap_kl_ctrl (`bool`, *optional*, defaults to `True`):
76
+ Use adaptive KL control, otherwise linear.
77
+ init_kl_coef (`Optional[float]`, *optional*, defaults to `0.2`):
78
+ Initial KL penalty coefficient (used for adaptive and linear control).
79
+ kl_penalty (`Literal["kl", "abs", "mse", "full"]`, *optional*, defaults to `"kl"`):
80
+ kl penalty options. Possible values are:
81
+
82
+ - `"kl"`: model_logp - ref_logp
83
+ - `"abs"`: abs(kl)
84
+ - `"mse"`: mean squared error mse(kl)
85
+ - `"full"`: the actual kl for all tokens in the distribution.
86
+
87
+ target (`float`, *optional*, defaults to `6.0`):
88
+ Target KL value for adaptive KL control.
89
+ horizon (`float`, *optional*, defaults to `10000.0`):
90
+ Horizon for adaptive KL control.
91
+ gamma (`float`, *optional*, defaults to `1.0`):
92
+ Gamma parameter for advantage calculation.
93
+ lam (`float`, *optional*, defaults to `0.95`):
94
+ Lambda parameter for advantage calculation.
95
+ cliprange (`float`, *optional*, defaults to `0.2`):
96
+ Range for clipping in PPO policy gradient loss.
97
+ cliprange_value (`float`, *optional*, defaults to `0.2`):
98
+ Range for clipping values in loss calculation.
99
+ vf_coef (`float`, *optional*, defaults to `0.1`):
100
+ Scaling factor for value loss.
101
+ batch_size (`int`, *optional*, defaults to `128`):
102
+ Number of samples per optimisation step.
103
+ forward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
104
+ DEPRECATED: use `mini_batch_size` instead, which does the same thing.
105
+ mini_batch_size (`int`, *optional*, defaults to `128`):
106
+ Number of samples optimized in each mini batch.
107
+ gradient_accumulation_steps (`int`, *optional*, defaults to `1`):
108
+ Number of gradient accumulation steps.
109
+ world_size (`Optional[int]`, *optional*, defaults to `None`):
110
+ Number of processes to use for distributed training.
111
+ ppo_epochs (`int`, *optional*, defaults to `4`):
112
+ Number of optimisation epochs per batch of samples.
113
+ optimize_device_cache (`bool`, *optional*, defaults to `False`):
114
+ Optimize device cache for slightly more memory-efficient training.
115
+ early_stopping (`bool`, *optional*, defaults to `False`):
116
+ Whether to stop the PPO optimization loop early is the KL too high.
117
+ target_kl (`float`, *optional*, defaults to `1.0`):
118
+ Stop early if we exceed this value by over 50%.
119
+ compare_steps (`int`, *optional*, defaults to `1`):
120
+ Compare the current step with the previous `compare_steps` steps.
121
+ ratio_threshold (`float`, *optional*, defaults to `10.0`):
122
+ Skip mini-batches with high PPO ratios that can cause loss spikes.
123
+ use_score_scaling (`bool`, *optional*, defaults to `False`):
124
+ Use score scaling.
125
+ use_score_norm (`bool`, *optional*, defaults to `False`):
126
+ Use score normalization. Only applicable if `use_score_scaling` is True.
127
+ score_clip (`Optional[float]`, *optional*, defaults to `None`):
128
+ Score clipping.
129
+ whiten_rewards (`bool`, *optional*, defaults to `False`):
130
+ Whiten the rewards before computing advantages.
131
+ is_encoder_decoder (`Optional[bool]`, *optional*, defaults to `None`):
132
+ When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
133
+ you need to specify if the model returned by the callable is an encoder-decoder model.
134
+ is_peft_model (`Optional[bool]`, *optional*, defaults to `None`):
135
+ Whether the model is a PEFT model.
136
+ backward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
137
+ Number of samples optimized in an `optimizer.step()` call.
138
+ global_backward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
139
+ Effective `backward_batch_size` across all processes.
140
+ global_batch_size (`Optional[int]`, *optional*, defaults to `None`):
141
+ Effective `batch_size` across all processes.
142
+ dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`):
143
+ Number of processes to use for processing the dataset.
144
+ """
145
+
146
+ exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")]
147
+ seed: int = 0
148
+ log_with: Optional[Literal["wandb", "tensorboard"]] = None
149
+ task_name: Optional[str] = None
150
+ model_name: str = "gpt2"
151
+ query_dataset: str = "stanfordnlp/imdb"
152
+ reward_model: str = "sentiment-analysis:lvwerra/distilbert-imdb"
153
+ remove_unused_columns: bool = True
154
+ tracker_kwargs: JSONDict = field(default_factory=dict)
155
+ accelerator_kwargs: JSONDict = field(default_factory=dict)
156
+ project_kwargs: JSONDict = field(default_factory=dict)
157
+ tracker_project_name: str = "trl"
158
+ push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict)
159
+ steps: int = 20000
160
+ learning_rate: float = 1.41e-5
161
+ adap_kl_ctrl: bool = True
162
+ init_kl_coef: float = 0.2
163
+ kl_penalty: Literal["kl", "abs", "mse", "full", "control_variate", "seq_control_variate"] = "kl"
164
+ target: float = 6.0
165
+ horizon: float = 10000.0
166
+ gamma: float = 1.0
167
+ lam: float = 0.95
168
+ cliprange: float = 0.2
169
+ cliprange_value: float = 0.2
170
+ vf_coef: float = 0.1
171
+ batch_size: int = 128
172
+ forward_batch_size: Optional[int] = None
173
+ mini_batch_size: int = 128
174
+ gradient_accumulation_steps: int = 1
175
+ world_size: tyro.conf.Suppress[int] = None
176
+ ppo_epochs: int = 4
177
+ max_grad_norm: Optional[float] = None
178
+ optimize_cuda_cache: Optional[bool] = None
179
+ optimize_device_cache: bool = False
180
+ early_stopping: bool = False
181
+ target_kl: float = 1.0
182
+ compare_steps: int = 1
183
+ ratio_threshold: float = 10.0
184
+ use_score_scaling: bool = False
185
+ use_score_norm: bool = False
186
+ score_clip: Optional[float] = None
187
+ whiten_rewards: bool = False
188
+ whiten_advantages: bool = False
189
+ gradient_checkpointing: bool = False
190
+ is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None
191
+ is_peft_model: Optional[tyro.conf.Suppress[bool]] = None
192
+ backward_batch_size: tyro.conf.Suppress[int] = None
193
+ global_backward_batch_size: Optional[tyro.conf.Suppress[int]] = None
194
+ global_batch_size: tyro.conf.Suppress[int] = None
195
+ dataset_num_proc: Optional[int] = None
196
+
197
+ if optimize_cuda_cache is not None:
198
+ warnings.warn(
199
+ "The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead."
200
+ )
201
+
202
+ if optimize_device_cache is True:
203
+ raise ValueError("Both `optimize_device_cache` and `optimize_cuda_cache` were provided")
204
+
205
+ optimize_device_cache = optimize_cuda_cache
206
+
207
+ def __post_init__(self):
208
+ warnings.warn(
209
+ "`PPOConfig` is deprecated and will be removed in the future. Please use `PPOv2Config` with `PPOv2Trainer` instead.",
210
+ FutureWarning,
211
+ )
212
+ if self.forward_batch_size is not None:
213
+ warnings.warn(
214
+ "Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization."
215
+ )
216
+ self.mini_batch_size = self.forward_batch_size
217
+
218
+ self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps
219
+ exact_div(
220
+ self.batch_size,
221
+ self.backward_batch_size,
222
+ "`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`",
223
+ )
224
+
225
+ # check if wandb is installed
226
+ if self.log_with == "wandb":
227
+ # raise error if wandb is not installed
228
+ if not is_wandb_available():
229
+ raise ImportError(
230
+ "Please install wandb to use wandb logging. You can do this by running `pip install wandb`."
231
+ )
232
+
233
+ self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size))
234
+ assert self.kl_penalty in ["kl", "abs", "mse", "full", "control_variate", "seq_control_variate"]
235
+
236
+ def to_dict(self):
237
+ output_dict = {}
238
+ for key, value in self.__dict__.items():
239
+ output_dict[key] = value
240
+ return flatten_dict(output_dict)
my_ppo_trainer_v1.py ADDED
@@ -0,0 +1,716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
2
+ #
3
+ # This code is inspired by the HuggingFace's TRL library.
4
+ # https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/ppo_trainer.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import math
19
+ import json
20
+ import os
21
+ import sys
22
+ import copy
23
+ import warnings
24
+ from types import MethodType
25
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
26
+ from accelerate.utils import DummyOptim, DummyScheduler
27
+
28
+ import torch
29
+ from accelerate.utils import DistributedDataParallelKwargs
30
+ from tqdm import tqdm
31
+ from transformers import GenerationConfig, Trainer, TrainerControl, TrainerState
32
+ from transformers.optimization import get_scheduler
33
+ from transformers.trainer import DEFAULT_CALLBACKS
34
+ from transformers.trainer_callback import CallbackHandler
35
+ from transformers.trainer_pt_utils import remove_dummy_checkpoint
36
+ from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
37
+ from transformers.utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
38
+ from trl.core import PPODecorators, logprobs_from_logits
39
+ from trl.models.utils import unwrap_model_for_generation
40
+ from typing_extensions import override
41
+
42
+ from my_ppo_config import PPOConfig
43
+ from ppo_trainer import PPOTrainer
44
+ from ppo_trainer_v1 import PPOTrainerV1
45
+ from trl.core import PPODecorators, logprobs_from_logits
46
+ from trl.models.utils import unwrap_model_for_generation
47
+ from typing_extensions import override
48
+
49
+ from llamafactory.extras.logging import get_logger
50
+ from llamafactory.extras.misc import AverageMeter, count_parameters, get_current_device, get_logits_processor
51
+ from llamafactory.train.callbacks import SaveProcessorCallback
52
+ from llamafactory.train.trainer_utils import create_custom_optimizer, create_custom_scheduler
53
+ from llamafactory.train.ppo.ppo_utils import dump_layernorm, replace_model, restore_layernorm
54
+ from iTrainingLogger import iSummaryWriter
55
+ from ppo_save_utils import FixValueHeadModelCallback
56
+ import torch.distributed as dist
57
+ import numpy as np
58
+ from transformers.integrations.deepspeed import deepspeed_init
59
+
60
+ if TYPE_CHECKING:
61
+ from datasets import Dataset
62
+ from transformers import (
63
+ DataCollatorWithPadding,
64
+ PreTrainedTokenizer,
65
+ ProcessorMixin,
66
+ Seq2SeqTrainingArguments,
67
+ TrainerCallback,
68
+ )
69
+ from trl import AutoModelForCausalLMWithValueHead
70
+
71
+ from llamafactory.hparams import FinetuningArguments, GeneratingArguments, ModelArguments
72
+
73
+ import logging
74
+
75
+ logging.basicConfig()
76
+ logger = logging.getLogger(__name__)
77
+ logger.setLevel(logging.INFO)
78
+
79
+ from step_dpo_reward import get_rewards_from_server, get_rewards_from_rule
80
+ import re
81
+ import numpy as np
82
+ from trl.trainer.utils import (
83
+ disable_dropout_in_model
84
+ )
85
+
86
+ RM_TYPE = os.environ.get('RM_TYPE', 'RULE')
87
+ if RM_TYPE == 'RULE':
88
+ get_rewards_from_fn = get_rewards_from_rule
89
+ elif RM_TYPE == 'BT_RM':
90
+ get_rewards_from_fn = get_rewards_from_server
91
+ else:
92
+ get_rewards_from_fn = get_rewards_from_rule
93
+
94
+ print(get_rewards_from_fn, '==get_rewards_from_fn==', RM_TYPE)
95
+
96
+
97
+ class CustomPPOTrainer(PPOTrainer, Trainer):
98
+ r"""
99
+ Inherits PPOTrainer.
100
+ """
101
+
102
+ def __init__(
103
+ self,
104
+ model_args: "ModelArguments",
105
+ training_args: "Seq2SeqTrainingArguments",
106
+ finetuning_args: "FinetuningArguments",
107
+ generating_args: "GeneratingArguments",
108
+ callbacks: Optional[List["TrainerCallback"]],
109
+ model: "AutoModelForCausalLMWithValueHead",
110
+ reward_model: Optional["AutoModelForCausalLMWithValueHead"],
111
+ ref_model: Optional["AutoModelForCausalLMWithValueHead"],
112
+ tokenizer: "PreTrainedTokenizer",
113
+ processor: Optional["ProcessorMixin"],
114
+ dataset: "Dataset",
115
+ data_collator: "DataCollatorWithPadding"
116
+ ) -> None:
117
+ # if eval_dataset is not None:
118
+ # raise NotImplementedError("PPOTrainer does not support eval dataset yet.")
119
+
120
+ self.training_args = training_args
121
+ print(self.training_args, '===training_args===')
122
+ backward_batch_size = training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps
123
+ ppo_config = PPOConfig(
124
+ gradient_checkpointing=training_args.gradient_checkpointing,
125
+ model_name=model_args.model_name_or_path,
126
+ learning_rate=training_args.learning_rate,
127
+ mini_batch_size=training_args.per_device_train_batch_size,
128
+ batch_size=backward_batch_size * finetuning_args.ppo_buffer_size,
129
+ gradient_accumulation_steps=training_args.gradient_accumulation_steps,
130
+ ppo_epochs=finetuning_args.ppo_epochs,
131
+ max_grad_norm=training_args.max_grad_norm,
132
+ seed=training_args.seed,
133
+ optimize_device_cache=True,
134
+ target=finetuning_args.ppo_target,
135
+ lam=training_args.lam,
136
+ whiten_advantages=training_args.ppo_whiten_advantages,
137
+ init_kl_coef=training_args.init_kl_coef,
138
+ adap_kl_ctrl=training_args.adap_kl_ctrl,
139
+ kl_penalty=training_args.kl_penalty,
140
+ use_score_scaling=finetuning_args.ppo_score_norm,
141
+ use_score_norm=finetuning_args.ppo_score_norm,
142
+ whiten_rewards=finetuning_args.ppo_whiten_rewards,
143
+ accelerator_kwargs={"step_scheduler_with_optimizer": True},
144
+ log_with='tensorboard',
145
+ project_kwargs={"logging_dir": training_args.logging_dir},
146
+ )
147
+
148
+ logger.info("####training_args####")
149
+ logger.info(training_args)
150
+
151
+ logger.info("####ppo_config####")
152
+ logger.info(ppo_config)
153
+
154
+ # Create optimizer and scheduler
155
+ if training_args.max_steps > 0:
156
+ num_training_steps = training_args.max_steps
157
+ else:
158
+ total_train_batch_size = backward_batch_size * finetuning_args.ppo_buffer_size * training_args.world_size
159
+ num_training_steps = training_args.num_train_epochs * math.ceil(
160
+ len(dataset) / total_train_batch_size
161
+ )
162
+
163
+ # Add deepspeed config
164
+ if training_args.deepspeed_plugin is not None:
165
+ ppo_config.accelerator_kwargs["kwargs_handlers"] = [
166
+ DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters)
167
+ ]
168
+
169
+ self.propagate_args_to_deepspeed()
170
+
171
+ hf_deepspeed_config = training_args.deepspeed_plugin.hf_ds_config
172
+ hf_deepspeed_config.trainer_config_finalize(self.training_args,
173
+ model, num_training_steps)
174
+
175
+ logger.info("####trainer_config_finalize####")
176
+ logger.info(hf_deepspeed_config.config)
177
+
178
+ logger.info("####ppo_config.log_with####")
179
+ logger.info(ppo_config.log_with)
180
+
181
+ ppo_config.accelerator_kwargs["deepspeed_plugin"] = training_args.deepspeed_plugin
182
+ # if ppo_config.log_with is not None:
183
+ # logger.warning("PPOTrainer cannot use external logger when DeepSpeed is enabled.")
184
+ # ppo_config.log_with = None
185
+
186
+ model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
187
+ if "optimizer" in hf_deepspeed_config.config:
188
+ optimizer = DummyOptim(params=model_parameters)
189
+ logger.info('###optimizer in hf_deepspeed_config###')
190
+ logger.info(optimizer)
191
+ else:
192
+ optimizer = self.create_optimizer(model, training_args, finetuning_args)
193
+ hf_deepspeed_config.config["zero_allow_untested_optimizer"] = True
194
+
195
+ if "scheduler" in hf_deepspeed_config.config:
196
+ scheduler = DummyScheduler(optimizer)
197
+ logger.info('###scheduler in hf_deepspeed_config###')
198
+ logger.info(scheduler)
199
+ else:
200
+ if isinstance(optimizer, DummyOptim):
201
+ def _lr_scheduler_callable(optimizer):
202
+ # create a shallow copy first, so later modifications do not affect original trainer
203
+ trainer_copy = copy.copy(self)
204
+ # at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
205
+ # update it to None so that we can re-create a new scheduler
206
+ trainer_copy.lr_scheduler = None
207
+ lr_scheduler = trainer_copy.create_scheduler(
208
+ training_args=training_args,
209
+ num_training_steps=num_training_steps, optimizer=optimizer
210
+ )
211
+ logger.info('###lr_scheduler in _lr_scheduler_callable###')
212
+ logger.info(lr_scheduler)
213
+ return lr_scheduler
214
+ scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
215
+ else:
216
+ scheduler = self.create_scheduler(training_args, num_training_steps, optimizer)
217
+
218
+ logger.info("#####optimizer#####")
219
+ logger.info(optimizer)
220
+ logger.info("#####scheduler#####")
221
+ logger.info(scheduler)
222
+
223
+ dist.barrier()
224
+ disable_dropout_in_model(model)
225
+ disable_dropout_in_model(ref_model)
226
+
227
+ PPOTrainer.__init__(
228
+ self,
229
+ training_args=training_args,
230
+ config=ppo_config,
231
+ model=model,
232
+ ref_model=ref_model,
233
+ tokenizer=tokenizer,
234
+ dataset=dataset,
235
+ optimizer=optimizer,
236
+ data_collator=data_collator,
237
+ lr_scheduler=scheduler,
238
+ num_training_steps=num_training_steps
239
+ )
240
+
241
+ dist.barrier()
242
+
243
+ self.args = training_args
244
+ self.model_args = model_args
245
+ self.finetuning_args = finetuning_args
246
+ self.reward_model = reward_model
247
+ self.current_device = get_current_device() # patch for deepspeed training
248
+
249
+ self.generation_config = GenerationConfig(
250
+ pad_token_id=self.tokenizer.pad_token_id,
251
+ eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
252
+ **generating_args.to_dict(),
253
+ )
254
+
255
+ self.state = TrainerState()
256
+ self.control = TrainerControl()
257
+ self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
258
+ self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
259
+ callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks
260
+ self.callback_handler = CallbackHandler(
261
+ callbacks, self.accelerator.unwrap_model(self.model), self.tokenizer, self.optimizer, self.lr_scheduler
262
+ )
263
+ if self.args.max_steps > 0:
264
+ logger.info("max_steps is given, it will override any value given in num_train_epochs")
265
+
266
+ self.amp_context = torch.autocast(self.current_device.type)
267
+ warnings.simplefilter("ignore") # remove gc warnings on ref model
268
+
269
+ if finetuning_args.reward_model_type == "full":
270
+ if self.is_deepspeed_enabled:
271
+ if not (
272
+ getattr(reward_model.pretrained_model, "is_loaded_in_8bit", False)
273
+ or getattr(reward_model.pretrained_model, "is_loaded_in_4bit", False)
274
+ ): # quantized models are already set on the correct device
275
+ self.reward_model = self._prepare_deepspeed(self.reward_model)
276
+ else:
277
+ self.reward_model = self.accelerator.prepare_model(self.reward_model, evaluation_mode=True)
278
+
279
+ self.add_callback(FixValueHeadModelCallback)
280
+
281
+ dist.barrier()
282
+
283
+ if processor is not None:
284
+ self.add_callback(SaveProcessorCallback(processor))
285
+
286
+ if finetuning_args.use_badam:
287
+ from badam import BAdamCallback, clip_grad_norm_old_version
288
+
289
+ self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
290
+ self.add_callback(BAdamCallback)
291
+
292
+
293
+ def ppo_train(self, resume_from_checkpoint: Optional[str] = None) -> None:
294
+ r"""
295
+ Implements training loop for the PPO stage, like _inner_training_loop() in Huggingface's Trainer.
296
+ """
297
+ if resume_from_checkpoint is not None:
298
+ raise ValueError("`resume_from_checkpoint` will be supported in the future version.")
299
+
300
+ total_train_batch_size = (
301
+ self.args.per_device_train_batch_size
302
+ * self.args.gradient_accumulation_steps
303
+ * self.finetuning_args.ppo_buffer_size
304
+ * self.args.world_size
305
+ )
306
+ if self.args.max_steps > 0:
307
+ num_examples = total_train_batch_size * self.args.max_steps
308
+ num_train_epochs = sys.maxsize
309
+ max_steps = self.args.max_steps
310
+ steps_in_epoch = self.args.max_steps
311
+ else:
312
+ len_dataloader = len(self.dataloader)
313
+ num_examples = len(self.dataset)
314
+ num_train_epochs = self.args.num_train_epochs
315
+ max_steps = math.ceil(num_train_epochs * len_dataloader)
316
+ steps_in_epoch = len_dataloader
317
+
318
+ self.state.max_steps = max_steps
319
+ self.state.num_train_epochs = num_train_epochs
320
+ self.state.is_local_process_zero = self.is_local_process_zero()
321
+ self.state.is_world_process_zero = self.is_world_process_zero()
322
+
323
+ if self.is_world_process_zero():
324
+ logger.info("***** Running training *****")
325
+ logger.info(" Num examples = {:,}".format(num_examples))
326
+ logger.info(" Num Epochs = {:,}".format(num_train_epochs))
327
+ logger.info(" Instantaneous batch size per device = {:,}".format(self.args.per_device_train_batch_size))
328
+ logger.info(
329
+ " Total train batch size (w. parallel, buffer, distributed & accumulation) = {:,}".format(
330
+ total_train_batch_size
331
+ )
332
+ )
333
+ logger.info(" Gradient Accumulation steps = {:,}".format(self.args.gradient_accumulation_steps))
334
+ logger.info(" Num optimization epochs per batch = {:,}".format(self.finetuning_args.ppo_epochs))
335
+ logger.info(" Total training steps = {:,}".format(max_steps))
336
+ logger.info(" Number of trainable parameters = {:,}".format(count_parameters(self.model)[0]))
337
+
338
+ if self.is_world_process_zero():
339
+ self.writer = iSummaryWriter(
340
+ log_path=os.path.join(self.args.logging_dir, 'my_log'),
341
+ log_name='step_dpo')
342
+ logger.info("#######iSummaryWriter#######")
343
+
344
+ dataiter = iter(self.dataloader)
345
+ loss_meter = AverageMeter()
346
+ reward_meter = AverageMeter()
347
+ gold_reward_meter = AverageMeter()
348
+ self.callback_handler.on_train_begin(self.args, self.state, self.control)
349
+
350
+ for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()):
351
+ try:
352
+ batch = next(dataiter)
353
+ except StopIteration:
354
+ dataiter = iter(self.dataloader)
355
+ batch = next(dataiter)
356
+
357
+ dist.barrier()
358
+
359
+ # Get inputs
360
+ self.model.eval()
361
+ self.tokenizer.padding_side = "right" # change padding side
362
+ queries, responses, rewards = [], [], []
363
+ for idx in range(0, self.config.batch_size, self.config.mini_batch_size):
364
+ mini_batch_queries, mini_batch_responses = self.get_inputs(
365
+ batch[idx : idx + self.config.mini_batch_size]
366
+ )
367
+ mini_batch_rewards = self.get_rewards(mini_batch_queries, mini_batch_responses)
368
+ queries.extend(mini_batch_queries)
369
+ responses.extend(mini_batch_responses)
370
+ rewards.extend(mini_batch_rewards)
371
+
372
+ if self.is_world_process_zero():
373
+ logger.info("#######queries#######")
374
+ logger.info(self.tokenizer.batch_decode(queries, skip_special_tokens=False)[0])
375
+ logger.info("#######responses#######")
376
+ logger.info(self.tokenizer.batch_decode(responses, skip_special_tokens=False)[0])
377
+ logger.info("#######rewards#######")
378
+ logger.info(rewards)
379
+
380
+ dist.barrier()
381
+ logger.info("#######begin ppo-step#######")
382
+
383
+ # Run PPO step
384
+ self.model.train()
385
+ # with torch.autocast(device_type='cuda'):
386
+ with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
387
+ stats = self.step(queries, responses, rewards)
388
+
389
+ # stats = self.step(queries, responses, rewards)
390
+
391
+ logger.info("#######end ppo-step#######")
392
+
393
+ self.tokenizer.padding_side = "left" # restore padding side
394
+ loss_meter.update(float(stats["ppo/loss/total"]), n=len(rewards))
395
+ reward_meter.update(torch.stack(rewards).mean().item(), n=len(rewards))
396
+
397
+ logger.info("#######log_with#######")
398
+ logger.info(self.config.log_with)
399
+
400
+ gold_rewards = None
401
+ if self.config.log_with is not None:
402
+ try:
403
+ batch["query"] = self.tokenizer.batch_decode(queries, skip_special_tokens=True)
404
+ batch["response"] = self.tokenizer.batch_decode(responses, skip_special_tokens=True)
405
+ gold_rewards = get_rewards_from_rule(batch["query"],
406
+ batch["response"])
407
+ gold_reward_meter.update(gold_rewards.mean().cpu().numpy().item(), n=len(gold_rewards))
408
+ self.log_stats(stats, batch, rewards)
409
+ except Exception:
410
+ gold_rewards = None
411
+ logger.warning("Failed to save stats due to unknown errors.")
412
+
413
+ if self.is_world_process_zero():
414
+ logger.info("#######begin ppo-info-log#######")
415
+ for key in stats:
416
+ if key in ['ppo/loss/policy', 'ppo/loss/value',
417
+ 'ppo/loss/total', 'ppo/policy/entropy',
418
+ 'ppo/policy/policykl']:
419
+ self.writer.add_scalar(key,
420
+ np.mean(stats[key]).item(),
421
+ self.state.global_step)
422
+ self.writer.add_scalar('env/reward_mean',
423
+ torch.mean(torch.tensor(rewards)).cpu().numpy().item(),
424
+ self.state.global_step)
425
+ if gold_rewards is not None:
426
+ self.writer.add_scalar('env/gold_reward_mean',
427
+ torch.mean(torch.tensor(gold_rewards)).cpu().numpy().item(),
428
+ self.state.global_step)
429
+ self.writer.record()
430
+ logger.info("#######end ppo-info-log#######")
431
+
432
+ self.state.global_step += 1
433
+ self.callback_handler.on_step_end(self.args, self.state, self.control)
434
+
435
+ if self.is_local_process_zero() and (step + 1) % self.args.logging_steps == 0:
436
+ logger.info("#######begin info-log#######")
437
+ logs = dict(
438
+ loss=round(loss_meter.avg, 4),
439
+ reward=round(reward_meter.avg, 4),
440
+ learning_rate=stats["ppo/learning_rate"],
441
+ learning_rate_optimizer=stats["ppo/learning_rate_optimizer"],
442
+ clipfrac=float(np.mean(stats["ppo/policy/clipfrac"])),
443
+ advantages_mean=float(np.mean(stats["ppo/policy/advantages_mean"])),
444
+ entropy=float(np.mean(stats["ppo/policy/entropy"])),
445
+ val_error=float(np.mean(stats["ppo/val/error"])),
446
+ approxkl=float(np.mean(stats["ppo/policy/approxkl"])),
447
+ policykl=float(np.mean(stats["ppo/policy/policykl"])),
448
+ value_loss=float(np.mean(stats["ppo/loss/value"])),
449
+ kl_coef=float(np.mean(stats["objective/kl_coef"])),
450
+ objective_kl=float(np.mean(stats["objective/kl"])),
451
+ pg_loss=float(np.mean(stats["ppo/loss/policy"])),
452
+ epoch=round(step / steps_in_epoch, 2),
453
+ gold_reward_meter=round(gold_reward_meter.avg, 4),
454
+ )
455
+ tqdm.write(str(logs))
456
+ logs["step"] = step
457
+ self.state.log_history.append(logs)
458
+ self.callback_handler.on_log(self.args, self.state, self.control, logs)
459
+ loss_meter.reset()
460
+ reward_meter.reset()
461
+
462
+ if (step + 1) % self.args.save_steps == 0: # save checkpoint
463
+ self.save_model(
464
+ os.path.join(self.args.output_dir, "{}-{}".format(PREFIX_CHECKPOINT_DIR, self.state.global_step))
465
+ )
466
+ self.callback_handler.on_save(self.args, self.state, self.control)
467
+
468
+ if self.control.should_epoch_stop or self.control.should_training_stop:
469
+ break
470
+
471
+ self.callback_handler.on_train_end(self.args, self.state, self.control)
472
+
473
+ @override
474
+ def create_optimizer(
475
+ self,
476
+ model: "AutoModelForCausalLMWithValueHead",
477
+ training_args: "Seq2SeqTrainingArguments",
478
+ finetuning_args: "FinetuningArguments",
479
+ ) -> "torch.optim.Optimizer":
480
+ optimizer = create_custom_optimizer(model, training_args, finetuning_args)
481
+ if optimizer is None:
482
+ decay_params, nodecay_params = [], []
483
+ decay_param_names = self.get_decay_parameter_names(model)
484
+ for name, param in model.named_parameters():
485
+ if param.requires_grad:
486
+ if name in decay_param_names:
487
+ decay_params.append(param)
488
+ else:
489
+ nodecay_params.append(param)
490
+
491
+ optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
492
+ param_groups = [
493
+ dict(params=nodecay_params, weight_decay=0.0),
494
+ dict(params=decay_params, weight_decay=training_args.weight_decay),
495
+ ]
496
+ optimizer = optim_class(param_groups, **optim_kwargs)
497
+ logger.info('#######optimizer######')
498
+ logger.info(optim_class)
499
+ logger.info('#######optim_kwargs######')
500
+ logger.info(optim_kwargs)
501
+
502
+ return optimizer
503
+
504
+ @override
505
+ def create_scheduler(
506
+ self, training_args: "Seq2SeqTrainingArguments", num_training_steps: int, optimizer: "torch.optim.Optimizer"
507
+ ) -> "torch.optim.lr_scheduler.LRScheduler":
508
+ # create_custom_scheduler(training_args, num_training_steps, optimizer)
509
+ lr_scheduler = get_scheduler(
510
+ training_args.lr_scheduler_type,
511
+ optimizer=optimizer,
512
+ num_warmup_steps=training_args.get_warmup_steps(num_training_steps),
513
+ num_training_steps=num_training_steps,
514
+ )
515
+ return lr_scheduler
516
+
517
+ @torch.no_grad()
518
+ def get_inputs(self, batch: Dict[str, "torch.Tensor"]) -> Tuple[List["torch.Tensor"], List["torch.Tensor"]]:
519
+ r"""
520
+ Generates model's responses given queries.
521
+ """
522
+ if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1
523
+ start_index = (batch["input_ids"][0] != self.tokenizer.pad_token_id).nonzero()[0].item()
524
+ for k, v in batch.items():
525
+ batch[k] = v[:, start_index:]
526
+
527
+ with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model:
528
+ unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
529
+ if self.model_args.upcast_layernorm:
530
+ layernorm_params = dump_layernorm(unwrapped_model)
531
+
532
+ generate_output: "torch.Tensor" = unwrapped_model.generate(
533
+ generation_config=self.generation_config, logits_processor=get_logits_processor(),
534
+ input_ids=batch['input_ids']
535
+ )
536
+ if self.model_args.upcast_layernorm:
537
+ restore_layernorm(unwrapped_model, layernorm_params)
538
+
539
+ query = batch["input_ids"].detach().cpu()
540
+ response = generate_output[:, batch["input_ids"].size(-1) :].detach().cpu()
541
+ queries, responses = [], []
542
+ for i in range(len(query)):
543
+ query_start_index = (query[i] != self.tokenizer.pad_token_id).nonzero()[0].item()
544
+ response_indexes = (response[i] != self.tokenizer.pad_token_id).nonzero()
545
+
546
+ if len(response_indexes) == 0: # allow empty response
547
+ response_length = 1
548
+ elif self.tokenizer.eos_token_id == self.tokenizer.pad_token_id: # include eos token
549
+ response_length = response_indexes[-1].item() + 2
550
+ else:
551
+ response_length = response_indexes[-1].item() + 1
552
+
553
+ queries.append(query[i, query_start_index:]) # remove padding from left
554
+ responses.append(response[i, :response_length]) # remove padding from right
555
+
556
+ return queries, responses
557
+
558
+ @torch.no_grad()
559
+ def get_rewards(
560
+ self,
561
+ queries: List["torch.Tensor"],
562
+ responses: List["torch.Tensor"],
563
+ **kwargs
564
+ ) -> List["torch.Tensor"]:
565
+ r"""
566
+ Computes scores using given reward model.
567
+
568
+ Both inputs and outputs are put on CPU.
569
+ """
570
+ if self.finetuning_args.reward_model_type == "api":
571
+ query_messages = self.tokenizer.batch_decode(queries,
572
+ skip_special_tokens=True)
573
+ response_messages = self.tokenizer.batch_decode(responses,
574
+ skip_special_tokens=True)
575
+ if 'mini_batch_answers' in kwargs:
576
+ mini_batch_answers = kwargs['mini_batch_answers'].detach().cpu()
577
+ answer_messages = self.tokenizer.batch_decode(mini_batch_answers,
578
+ skip_special_tokens=True)
579
+ return get_rewards_from_fn(query_messages,
580
+ response_messages,
581
+ answers=answer_messages)
582
+ else:
583
+ return get_rewards_from_fn(query_messages,
584
+ response_messages)
585
+
586
+ batch: Dict[str, "torch.Tensor"] = self.prepare_model_inputs(queries, responses)
587
+ unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
588
+
589
+ if self.finetuning_args.reward_model_type == "lora":
590
+ replace_model(unwrapped_model, target="reward")
591
+ reward_model = self.model
592
+ else:
593
+ reward_model = self.reward_model
594
+
595
+ with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16
596
+ values: "torch.Tensor" = reward_model(**batch, return_dict=True, use_cache=False)[-1]
597
+
598
+ if self.finetuning_args.reward_model_type == "lora":
599
+ replace_model(unwrapped_model, target="default")
600
+
601
+ rewards = values.gather(dim=-1, index=(batch["attention_mask"].sum(dim=-1, keepdim=True) - 1))
602
+ return rewards.float().detach() # use fp32 type
603
+
604
+ @override
605
+ @PPODecorators.empty_device_cache()
606
+ def batched_forward_pass(
607
+ self,
608
+ model: "AutoModelForCausalLMWithValueHead",
609
+ queries: "torch.Tensor",
610
+ responses: "torch.Tensor",
611
+ model_inputs: Dict[str, Any],
612
+ return_logits: bool = False,
613
+ response_masks: Optional["torch.Tensor"] = None,
614
+ ) -> Tuple["torch.Tensor", Optional["torch.Tensor"], "torch.Tensor", "torch.Tensor"]:
615
+ r"""
616
+ Calculates model outputs in multiple batches.
617
+
618
+ Subclass and override to inject custom behavior.
619
+ """
620
+ bs = len(queries)
621
+ fbs = self.config.mini_batch_size
622
+ all_logprobs = []
623
+ all_logits = []
624
+ all_masks = []
625
+ all_values = []
626
+
627
+ for i in range(math.ceil(bs / fbs)):
628
+ input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
629
+ query_batch = queries[i * fbs : (i + 1) * fbs]
630
+ response_batch = responses[i * fbs : (i + 1) * fbs]
631
+ if response_masks is not None:
632
+ response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
633
+ input_ids = input_kwargs["input_ids"]
634
+ attention_mask = input_kwargs["attention_mask"]
635
+
636
+ with self.amp_context: # support bf16
637
+ logits, _, values = model(**input_kwargs, return_dict=True, use_cache=False)
638
+
639
+ logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
640
+ masks = torch.zeros_like(attention_mask)
641
+ masks[:, :-1] = attention_mask[:, 1:]
642
+
643
+ for j in range(len(query_batch)):
644
+ start = len(query_batch[j]) - 1
645
+ if attention_mask[j, 0] == 0: # offset left padding
646
+ start += attention_mask[j, :].nonzero()[0].item()
647
+ end = start + len(response_batch[j])
648
+
649
+ if response_masks is not None:
650
+ response_masks_batch = torch.cat((torch.zeros_like(query_batch[j]), response_masks_batch[j]))[1:]
651
+
652
+ masks[j, :start] = 0
653
+ masks[j, end:] = 0
654
+ if response_masks is not None:
655
+ masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end]
656
+
657
+ if return_logits:
658
+ all_logits.append(logits)
659
+ else:
660
+ del logits
661
+
662
+ all_values.append(values)
663
+ all_logprobs.append(logprobs)
664
+ all_masks.append(masks)
665
+
666
+ return (
667
+ torch.cat(all_logprobs),
668
+ torch.cat(all_logits)[:, :-1] if return_logits else None,
669
+ torch.cat(all_values)[:, :-1],
670
+ torch.cat(all_masks)[:, :-1],
671
+ )
672
+
673
+ @override
674
+ def save_model(self, output_dir: Optional[str] = None) -> None:
675
+ r"""
676
+ Saves model checkpoint.
677
+
678
+ Subclass and override to inject custom behavior.
679
+ """
680
+ if output_dir is None:
681
+ output_dir = self.args.output_dir
682
+
683
+ if self.is_fsdp_enabled or self.is_deepspeed_enabled:
684
+ try:
685
+ state_dict = self.accelerator.get_state_dict(self.model) # must be called at all ranks
686
+ if self.args.should_save:
687
+ self._save(output_dir, state_dict=state_dict)
688
+ except ValueError:
689
+ logger.warning(
690
+ " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead,"
691
+ " use zero_to_fp32.py to recover weights"
692
+ )
693
+ if self.args.should_save:
694
+ self._save(output_dir, state_dict={})
695
+ # remove the dummy state_dict
696
+ remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME])
697
+ self.model.save_checkpoint(output_dir)
698
+
699
+ elif self.args.should_save:
700
+ unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
701
+ self._save(output_dir, state_dict=unwrapped_model.state_dict())
702
+
703
+ def propagate_args_to_deepspeed(self, auto_find_batch_size=False):
704
+ """
705
+ Sets values in the deepspeed plugin based on the Trainer args
706
+ """
707
+ from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
708
+
709
+ ds_plugin = self.training_args.deepspeed_plugin
710
+
711
+ ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config)
712
+ ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config
713
+ ds_plugin.hf_ds_config.trainer_config_process(self.training_args, auto_find_batch_size)
714
+
715
+ logger.info("####trainer_config_process####")
716
+ logger.info(self.training_args.deepspeed_plugin.deepspeed_config)
ppo_trainer.py ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import math
16
+ import json
17
+ import os
18
+ import time
19
+ import typing
20
+ import warnings
21
+ import copy
22
+ from contextlib import nullcontext
23
+ from typing import Callable, List, Optional, Union
24
+ from transformers.optimization import get_scheduler
25
+
26
+ import datasets
27
+ import numpy as np
28
+ import torch
29
+ import torch.nn.functional as F
30
+ from accelerate import Accelerator
31
+ from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available
32
+ from datasets import Dataset
33
+ from huggingface_hub import whoami
34
+ from packaging import version
35
+ from torch.optim import Adam
36
+ from transformers import (
37
+ DataCollatorForLanguageModeling,
38
+ PreTrainedTokenizer,
39
+ PreTrainedTokenizerBase,
40
+ PreTrainedTokenizerFast,
41
+ )
42
+
43
+ from trl.core import (
44
+ WANDB_PADDING,
45
+ PPODecorators,
46
+ clip_by_value,
47
+ convert_to_scalar,
48
+ entropy_from_logits,
49
+ flatten_dict,
50
+ logprobs_from_logits,
51
+ masked_mean,
52
+ masked_var,
53
+ masked_whiten,
54
+ set_seed,
55
+ stack_dicts,
56
+ stats_to_np,
57
+ )
58
+ from trl.import_utils import is_npu_available, is_torch_greater_2_0, is_xpu_available
59
+ from trl.models import (
60
+ SUPPORTED_ARCHITECTURES,
61
+ PreTrainedModelWrapper,
62
+ create_reference_model,
63
+ unwrap_model_for_generation,
64
+ )
65
+ from trl.trainer import AdaptiveKLController, BaseTrainer, FixedKLController, RunningMoments
66
+ from my_ppo_config import PPOConfig
67
+ from transformers.trainer_pt_utils import _get_learning_rate
68
+
69
+ if is_deepspeed_available():
70
+ import deepspeed
71
+
72
+ import logging
73
+
74
+ from accelerate.state import AcceleratorState
75
+ from accelerate.utils import DeepSpeedOptimizerWrapper
76
+ from transformers import (
77
+ DataCollatorWithPadding,
78
+ PreTrainedTokenizer,
79
+ ProcessorMixin,
80
+ Seq2SeqTrainingArguments,
81
+ TrainerCallback,
82
+ )
83
+
84
+ logging.basicConfig()
85
+ logger = logging.getLogger(__name__)
86
+ logger.setLevel(logging.INFO)
87
+
88
+ MODEL_CARD_TEMPLATE = """---
89
+ license: apache-2.0
90
+ library_name: transformers
91
+ tags:
92
+ - trl
93
+ - ppo
94
+ - transformers
95
+ - reinforcement-learning
96
+ ---
97
+
98
+ # {model_name}
99
+
100
+ This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
101
+ guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
102
+
103
+ ## Usage
104
+
105
+ To use this model for inference, first install the TRL library:
106
+
107
+ ```bash
108
+ python -m pip install trl
109
+ ```
110
+
111
+ You can then generate text as follows:
112
+
113
+ ```python
114
+ from transformers import pipeline
115
+
116
+ generator = pipeline("text-generation", model="{model_id}")
117
+ outputs = generator("Hello, my llama is cute")
118
+ ```
119
+
120
+ If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
121
+
122
+ ```python
123
+ from transformers import AutoTokenizer
124
+ from trl import AutoModelForCausalLMWithValueHead
125
+
126
+ tokenizer = AutoTokenizer.from_pretrained("{model_id}")
127
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}")
128
+
129
+ inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
130
+ outputs = model(**inputs, labels=inputs["input_ids"])
131
+ ```
132
+ """
133
+
134
+ def get_eval_ds_config(deepspeed_states, offload=None, stage=3):
135
+ deepspeed_states = AcceleratorState().deepspeed_plugin
136
+
137
+ device = "cpu" if offload else "none"
138
+ zero_opt_dict = {
139
+ "stage": stage,
140
+ "stage3_param_persistence_threshold": 1e4,
141
+ "offload_param": {
142
+ "device": device
143
+ }
144
+ }
145
+ return {
146
+ "train_micro_batch_size_per_gpu": deepspeed_states.deepspeed_config['train_micro_batch_size_per_gpu'],
147
+ "steps_per_print": 10,
148
+ "zero_optimization": zero_opt_dict,
149
+ "bf16": {
150
+ "enabled": True
151
+ },
152
+ "gradient_clipping": 1.0,
153
+ "prescale_gradients": False,
154
+ "wall_clock_breakdown": False
155
+ }
156
+
157
+
158
+ class PPOTrainer(BaseTrainer):
159
+ """
160
+ The PPOTrainer uses Proximal Policy Optimization to optimise language models.
161
+ Note, this trainer is heavily inspired by the original OpenAI learning to summarize work here:
162
+ https://github.com/openai/summarize-from-feedback
163
+
164
+ Attributes:
165
+ **config** (`PPOConfig`) -- Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more
166
+ details.
167
+ **model** (`PreTrainedModelWrapper`) -- Model to be optimized, Hugging Face transformer model with a value head.
168
+ Check the documentation of `PreTrainedModelWrapper` for more details.
169
+ **ref_model** (`PreTrainedModelWrapper`, *optional*) -- Reference model to be used for KL penalty, Hugging Face
170
+ transformer model with a casual language modelling head. Check the documentation of `PreTrainedModelWrapper`
171
+ for more details. If no reference model is provided, the trainer will create a reference model with the same
172
+ architecture as the model to be optimized with shared layers.
173
+ **tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the
174
+ data. Check the documentation of `transformers.PreTrainedTokenizer` and
175
+ `transformers.PreTrainedTokenizerFast` for more details.
176
+ **dataset** (Union[`torch.utils.data.Dataset`, `datasets.Dataset`], *optional*) -- PyTorch dataset or Hugging
177
+ Face dataset. This is used to create a PyTorch dataloader. If no dataset is provided, the dataloader must be
178
+ created outside the trainer users needs to design their own dataloader and make sure the batch
179
+ size that is used is the same as the one specified in the configuration object.
180
+ **optimizer** (`torch.optim.Optimizer`, *optional*) -- Optimizer to be used for training. If no optimizer is
181
+ provided, the trainer will create an Adam optimizer with the learning rate specified in the configuration
182
+ object.
183
+ **data_collator** (DataCollatorForLanguageModeling, *optional*) -- Data collator to be used for training and
184
+ passed along the dataloader
185
+ **num_shared_layers** (int, *optional*) -- Number of layers to be shared between the model and the reference
186
+ model, if no reference model is passed. If no number is provided, all the layers will be shared.
187
+ **lr_scheduler** (`torch.optim.lr_scheduler`, *optional*) -- Learning rate scheduler to be used for training.
188
+ """
189
+
190
+ _tag_names = ["trl", "ppo"]
191
+
192
+ def __init__(
193
+ self,
194
+ training_args: Optional[Seq2SeqTrainingArguments] = None,
195
+ config: Optional[PPOConfig] = None,
196
+ model: Optional[PreTrainedModelWrapper] = None,
197
+ ref_model: Optional[PreTrainedModelWrapper] = None,
198
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
199
+ dataset: Optional[Union[torch.utils.data.Dataset, Dataset]] = None,
200
+ optimizer: Optional[torch.optim.Optimizer] = None,
201
+ data_collator: Optional[typing.Callable] = None,
202
+ num_shared_layers: Optional[int] = None,
203
+ lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
204
+ training_data_collator: Optional[typing.Callable] = None,
205
+ num_training_steps: Optional[int] = None
206
+ ):
207
+ """
208
+ Initialize PPOTrainer.
209
+
210
+ Args:
211
+ config (`PPOConfig`):
212
+ Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more details.
213
+ model (`PreTrainedModelWrapper`):
214
+ Hugging Face transformer model with a value head.
215
+ ref_model (`PreTrainedModelWrapper`):
216
+ Hugging Face transformer model with a casual language modelling head. Used for KL penalty
217
+ tokenizer (`transformers.PreTrainedTokenizerBase`):
218
+ Hugging Face tokenizer
219
+ dataset (Optional[Union[`torch.utils.data.Dataset`, `datasets.Dataset`]]):
220
+ PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
221
+ will be preprocessed by removing the columns that are not used by the model. If none is passed,
222
+ a warning will be raised in a multi-GPU setting.
223
+ optimizer (`Optional[torch.optim.Optimizer]`):
224
+ Optimizer used for training. If `None`, the `Adam` is used as default.
225
+ data_collator (Optional[function]):
226
+ Data collator function that is going to be used for `prepare_dataloader` method. Note this collator
227
+ is different from the one we use for training. Pass a valid `training_data_collator` instead.
228
+ num_shared_layers (Optional[int]):
229
+ Number of shared layers between the model and the reference model. If `None`, all layers are shared.
230
+ used only if `ref_model` is `None`.
231
+ lr_scheduler (`Optional[torch.optim.lr_scheduler]`):
232
+ Learning rate scheduler used for training.
233
+ training_data_collator (Optional[function]):
234
+ Custom data collator used for training.
235
+ """
236
+ warnings.warn(
237
+ "`PPOTrainer` is deprecated and will be removed in trl v0.12. Please use `PPOv2Trainer` instead.",
238
+ FutureWarning,
239
+ )
240
+ super().__init__(config)
241
+
242
+ # initial seed for reproducible experiments
243
+ set_seed(config.seed)
244
+ self.training_args = training_args
245
+ self.num_training_steps = num_training_steps
246
+
247
+ # Step 0: check positional arguments validity
248
+ if not isinstance(config, PPOConfig):
249
+ raise ValueError(f"config must be a PPOConfig, got {type(config)}")
250
+ if not isinstance(tokenizer, (PreTrainedTokenizerBase)):
251
+ raise ValueError(
252
+ f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}"
253
+ )
254
+ if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
255
+ raise ValueError(
256
+ f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
257
+ )
258
+ # Step 1: Initialize Accelerator
259
+ self.accelerator = Accelerator(
260
+ log_with=config.log_with,
261
+ gradient_accumulation_steps=config.gradient_accumulation_steps,
262
+ project_config=ProjectConfiguration(**config.project_kwargs),
263
+ **config.accelerator_kwargs,
264
+ )
265
+
266
+ logger.info("##step_scheduler_with_optimizer##")
267
+ logger.info(self.accelerator.step_scheduler_with_optimizer)
268
+
269
+ logger.info("###distributed_type###")
270
+ logger.info(self.accelerator.distributed_type)
271
+
272
+ self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
273
+ if self.is_deepspeed_enabled and getattr(self.training_args, "hf_deepspeed_config", None) is None:
274
+ self.propagate_args_to_deepspeed()
275
+
276
+ # Step 1.1 Runtime variables filled by the accelerator
277
+ config.world_size = self.accelerator.num_processes
278
+ config.global_backward_batch_size = config.backward_batch_size * config.world_size
279
+ config.global_batch_size = config.batch_size * config.world_size
280
+
281
+ self.model = model
282
+ self.model_params = filter(lambda p: p.requires_grad, self.model.parameters())
283
+ self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder")
284
+ self.is_peft_model = getattr(self.model, "is_peft_model", False)
285
+ config.is_encoder_decoder = self.is_encoder_decoder
286
+ config.is_peft_model = self.is_peft_model
287
+
288
+ is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard"
289
+ # self.accelerator.init_trackers(
290
+ # config.tracker_project_name,
291
+ # config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(),
292
+ # init_kwargs=config.tracker_kwargs,
293
+ # )
294
+ self.is_using_text_environment = getattr(config, "use_text_environment", False)
295
+
296
+ if isinstance(ref_model, SUPPORTED_ARCHITECTURES):
297
+ self.ref_model = ref_model
298
+ if num_shared_layers is not None:
299
+ warnings.warn(
300
+ "num_shared_layers is ignored when ref_model is provided. Two different models are used for the "
301
+ "model and the reference model and no layers are shared.",
302
+ UserWarning,
303
+ )
304
+ elif ref_model is None and not self.is_peft_model:
305
+ self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers)
306
+ elif self.is_peft_model:
307
+ self.ref_model = None
308
+ else:
309
+ raise ValueError(
310
+ f"ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported "
311
+ f"architectures are: {SUPPORTED_ARCHITECTURES} "
312
+ )
313
+ self.optional_peft_ctx = (
314
+ self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter
315
+ if self.is_peft_model
316
+ else nullcontext
317
+ )
318
+
319
+ if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)):
320
+ raise ValueError(
321
+ "tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast"
322
+ )
323
+ self.tokenizer = tokenizer
324
+
325
+ if dataset is not None and not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset)):
326
+ raise ValueError("dataset must be a torch.utils.data.Dataset or datasets.Dataset")
327
+ elif dataset is None:
328
+ warnings.warn(
329
+ "No dataset is provided. Make sure to set config.batch_size to the correct value before training.",
330
+ UserWarning,
331
+ )
332
+ self.dataset = dataset
333
+ self._signature_columns = None
334
+ if self.dataset is not None:
335
+ self.dataloader = self.prepare_dataloader(self.dataset, data_collator)
336
+ elif self.dataset is None and self.accelerator.num_processes > 1:
337
+ warnings.warn(
338
+ "No dataset is provided. In a multi-GPU setting, this will lead to an error. You should"
339
+ " prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`"
340
+ " and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please "
341
+ " refer to the documentation for more details.",
342
+ UserWarning,
343
+ )
344
+ self.dataloader = None
345
+ else:
346
+ self.dataloader = None
347
+
348
+ # Step 3: Initialize optimizer and data collator
349
+ if training_data_collator is None:
350
+ self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
351
+ else:
352
+ self.data_collator = training_data_collator
353
+ if optimizer is None:
354
+ self.optimizer = Adam(
355
+ filter(lambda p: p.requires_grad, self.model.parameters()),
356
+ lr=self.config.learning_rate,
357
+ )
358
+ else:
359
+ self.optimizer = optimizer
360
+
361
+ self.lr_scheduler = lr_scheduler
362
+ if self.lr_scheduler is not None:
363
+ lr_scheduler_class = (
364
+ torch.optim.lr_scheduler._LRScheduler
365
+ if not is_torch_greater_2_0()
366
+ else torch.optim.lr_scheduler.LRScheduler
367
+ )
368
+
369
+ # if not isinstance(self.lr_scheduler, lr_scheduler_class):
370
+ # raise ValueError(
371
+ # "lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)"
372
+ # )
373
+
374
+ if self.config.adap_kl_ctrl:
375
+ self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon)
376
+ else:
377
+ self.kl_ctl = FixedKLController(self.config.init_kl_coef)
378
+
379
+ # Safety checkers for DS integration
380
+ is_deepspeed_used = self.accelerator.distributed_type == "DEEPSPEED" and hasattr(
381
+ self.accelerator.state, "deepspeed_plugin"
382
+ )
383
+
384
+ self.is_deepspeed_used = is_deepspeed_used
385
+ logger.info('#### is_deepspeed_used ####')
386
+ logger.info(self.is_deepspeed_used)
387
+
388
+ if config.gradient_checkpointing:
389
+ self.model.gradient_checkpointing_enable()
390
+
391
+ if hasattr(self.model, "enable_input_require_grads"):
392
+ self.model.enable_input_require_grads()
393
+ else:
394
+ # For backward compatibility with older versions of transformers
395
+ def make_inputs_require_grad(module, input, output):
396
+ output.requires_grad_(True)
397
+
398
+ self.model.pretrained_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
399
+
400
+ if hasattr(self.lr_scheduler, "step"):
401
+ logger.info('###self.lr_scheduler has step###')
402
+ (
403
+ self.model,
404
+ self.optimizer,
405
+ self.data_collator,
406
+ self.dataloader,
407
+ ) = self.accelerator.prepare(
408
+ self.model,
409
+ self.optimizer,
410
+ self.data_collator,
411
+ self.dataloader,
412
+ )
413
+ else:
414
+ logger.info('###self.lr_scheduler has no step###')
415
+ (
416
+ self.model,
417
+ self.optimizer,
418
+ self.data_collator,
419
+ self.dataloader,
420
+ # self.lr_scheduler,
421
+ ) = self.accelerator.prepare(
422
+ self.model,
423
+ self.optimizer,
424
+ self.data_collator,
425
+ self.dataloader,
426
+ # self.lr_scheduler,
427
+ )
428
+
429
+ logger.info('#####self.optimizer-defaults#####')
430
+ logger.info(self.optimizer.optimizer.optimizer.defaults)
431
+ logger.info('#####self.optimizer#####')
432
+ logger.info(self.optimizer)
433
+ logger.info('#####self.optimizer.optimizer#####')
434
+ logger.info(self.optimizer.optimizer)
435
+
436
+ if is_deepspeed_used and isinstance(self.optimizer, DeepSpeedOptimizerWrapper):
437
+ self.lr_scheduler = get_scheduler(
438
+ training_args.lr_scheduler_type,
439
+ optimizer=self.optimizer.optimizer.optimizer,
440
+ num_warmup_steps=self.training_args.get_warmup_steps(self.num_training_steps),
441
+ num_training_steps=self.num_training_steps
442
+ )
443
+ logger.info('#####self.lr_scheduler.lr_lambdas#####')
444
+ logger.info(self.lr_scheduler.lr_lambdas)
445
+
446
+ if is_deepspeed_used:
447
+ # Quantized models are already set on the correct device
448
+ if not self.is_peft_model and not (
449
+ getattr(self.ref_model.pretrained_model, "is_loaded_in_8bit", False)
450
+ or getattr(self.ref_model.pretrained_model, "is_loaded_in_4bit", False)
451
+ ):
452
+ self.ref_model = self.my_prepare_deepspeed(self.ref_model)
453
+ else:
454
+ self.ref_model = self.accelerator.prepare(self.ref_model)
455
+
456
+ logger.info('#####self.lr_scheduler#####')
457
+ logger.info(self.lr_scheduler)
458
+
459
+ # In a distributed setup, only logging needs to be performed on the main process
460
+ # check: https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html
461
+ # or: https://discuss.pytorch.org/t/use-distributed-data-parallel-correctly/82500/11
462
+ self.is_distributed = self.accelerator.num_processes > 1
463
+
464
+ # init the current step
465
+ self.current_step = 0
466
+
467
+ # init variables for pushing model to hub
468
+ if config.push_to_hub_if_best_kwargs:
469
+ if "repo_id" not in config.push_to_hub_if_best_kwargs:
470
+ raise ValueError("You have to specify repo_id in order to push the model to the hub!")
471
+ self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs
472
+ self.compare_step = 0
473
+ self.highest_reward = torch.tensor(-float("inf"))
474
+
475
+ # post process for PP
476
+ if not getattr(self.model, "is_sequential_parallel", False):
477
+ self.current_device = self.accelerator.device
478
+ else:
479
+ if is_torch_xpu_available():
480
+ self.current_device = torch.device("xpu:0")
481
+ elif is_torch_npu_available():
482
+ self.current_device = torch.device("npu:0")
483
+ else:
484
+ self.current_device = torch.device("cuda:0")
485
+
486
+ PPODecorators.optimize_device_cache = self.config.optimize_device_cache
487
+
488
+ self.running = RunningMoments(self.accelerator)
489
+
490
+ def _filter_kwargs(self, kwargs, target_func):
491
+ """
492
+ filter the keyword arguments that are supported by the target function.
493
+
494
+ Args:
495
+ kwargs (dict):
496
+ Keyword arguments
497
+ target_func (function):
498
+ Target function
499
+ """
500
+ return {k: v for k, v in kwargs.items() if k in inspect.signature(target_func).parameters.keys()}
501
+
502
+ def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None):
503
+ """
504
+ Prepare the dataloader for training.
505
+
506
+ Args:
507
+ dataset (Union[`torch.utils.data.Dataset`, `datasets.Dataset`]):
508
+ PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
509
+ will be preprocessed by removing the columns that are not used by the model.
510
+ data_collator (Optional[function]):
511
+ Data collator function.
512
+
513
+ Returns:
514
+ `torch.utils.data.DataLoader`: PyTorch dataloader
515
+ """
516
+ if isinstance(dataset, Dataset):
517
+ dataset = self._remove_unused_columns(dataset)
518
+ dataloader = torch.utils.data.DataLoader(
519
+ dataset,
520
+ batch_size=self.config.batch_size,
521
+ collate_fn=data_collator,
522
+ shuffle=True,
523
+ drop_last=True,
524
+ )
525
+ return dataloader
526
+
527
+ # Adapted from transformers.Trainer._set_signature_columns_if_needed
528
+ def _set_signature_columns_if_needed(self):
529
+ if self._signature_columns is None:
530
+ # Inspect model forward signature to keep only the arguments it accepts.
531
+ signature = inspect.signature(self.model.forward)
532
+ self._signature_columns = list(signature.parameters.keys())
533
+ # label => sentiment | we need query and response for logging purpose
534
+ self._signature_columns += ["label", "query", "response"]
535
+
536
+ # Adapted from transformers.Trainer._remove_unused_columns
537
+ def _remove_unused_columns(self, dataset: "Dataset"):
538
+ if not self.config.remove_unused_columns:
539
+ return dataset
540
+ self._set_signature_columns_if_needed()
541
+ signature_columns = self._signature_columns
542
+
543
+ ignored_columns = list(set(dataset.column_names) - set(signature_columns))
544
+
545
+ columns = [k for k in signature_columns if k in dataset.column_names]
546
+
547
+ if version.parse(datasets.__version__) < version.parse("1.4.0"):
548
+ dataset.set_format(
549
+ type=dataset.format["type"],
550
+ columns=columns,
551
+ format_kwargs=dataset.format["format_kwargs"],
552
+ )
553
+ return dataset
554
+ else:
555
+ return dataset.remove_columns(ignored_columns)
556
+
557
+ def generate(
558
+ self,
559
+ query_tensor: Union[torch.Tensor, List[torch.Tensor]],
560
+ length_sampler: Optional[Callable] = None,
561
+ batch_size: int = 4,
562
+ return_prompt: bool = True,
563
+ generate_ref_response: bool = False,
564
+ **generation_kwargs,
565
+ ):
566
+ """
567
+ Generate response with the model given the query tensor.
568
+ call the `generate` method of the model.
569
+
570
+ Args:
571
+ query_tensor (`torch.LongTensor`):
572
+ A tensor of shape (`seq_len`) containing query tokens or a list of tensors of shape (`seq_len`).
573
+ length_sampler (`Callable`, *optional*):
574
+ Callable that returns the number of newly generated tokens.
575
+ batch_size (`int`, *optional):
576
+ Batch size used for generation, defaults to `4`.
577
+ return_prompt (`bool`, *optional*):
578
+ If set to `False` the prompt is not returned but only the newly generated tokens, defaults to `True`.
579
+ generate_ref_response (`bool`, *optional*):
580
+ If set to `True` the reference response is also generated, defaults to `False`.
581
+ generation_kwargs (dict[str, Any]):
582
+ Keyword arguments for generation.
583
+
584
+ Returns:
585
+ `torch.LongTensor`: A tensor of shape (`batch_size`, `gen_len`) containing response tokens.
586
+ """
587
+ if generate_ref_response:
588
+ ref_model = self.model if self.is_peft_model else self.ref_model
589
+ if isinstance(query_tensor, List):
590
+ response = self._generate_batched(
591
+ self.model,
592
+ query_tensor,
593
+ length_sampler=length_sampler,
594
+ batch_size=batch_size,
595
+ return_prompt=return_prompt,
596
+ **generation_kwargs,
597
+ )
598
+ if generate_ref_response:
599
+ ref_response = self._generate_batched(
600
+ ref_model,
601
+ query_tensor,
602
+ length_sampler=length_sampler,
603
+ batch_size=batch_size,
604
+ return_prompt=return_prompt,
605
+ **generation_kwargs,
606
+ )
607
+
608
+ else:
609
+ if len(query_tensor.shape) == 2:
610
+ raise ValueError(
611
+ "query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)"
612
+ )
613
+
614
+ if length_sampler is not None:
615
+ generation_kwargs["max_new_tokens"] = length_sampler()
616
+
617
+ with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model:
618
+ response = unwrapped_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs)
619
+
620
+ if generate_ref_response:
621
+ with unwrap_model_for_generation(
622
+ ref_model, self.accelerator, is_peft_model=self.is_peft_model
623
+ ) as unwrapped_model:
624
+ ref_response = unwrapped_model.generate(
625
+ input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs
626
+ )
627
+
628
+ if not return_prompt and not self.is_encoder_decoder:
629
+ response = response[:, query_tensor.shape[0] :]
630
+ if generate_ref_response:
631
+ ref_response = ref_response[:, query_tensor.shape[0] :]
632
+
633
+ if generate_ref_response:
634
+ return response, ref_response
635
+ return response
636
+
637
+ def _generate_batched(
638
+ self,
639
+ model: PreTrainedModelWrapper,
640
+ query_tensors: List[torch.Tensor],
641
+ length_sampler: Optional[Callable] = None,
642
+ batch_size: int = 4,
643
+ return_prompt: bool = True,
644
+ pad_to_multiple_of: Optional[int] = None,
645
+ remove_padding: bool = True,
646
+ **generation_kwargs,
647
+ ):
648
+ outputs = []
649
+
650
+ padding_side_default = self.tokenizer.padding_side
651
+ if not self.is_encoder_decoder:
652
+ self.tokenizer.padding_side = "left"
653
+
654
+ # in case we have fewer examples than bs
655
+ batch_size = min(len(query_tensors), batch_size)
656
+
657
+ for i in range(0, len(query_tensors), batch_size):
658
+ if length_sampler is not None:
659
+ generation_kwargs["max_new_tokens"] = length_sampler()
660
+
661
+ # prevent overflow if query tensors are not even multiple of bs
662
+ end_index = min(len(query_tensors), i + batch_size)
663
+
664
+ batch = query_tensors[i:end_index]
665
+ batch_mask = [torch.ones_like(element) for element in batch]
666
+ inputs = {"input_ids": batch, "attention_mask": batch_mask}
667
+
668
+ padded_inputs = self.tokenizer.pad(
669
+ inputs,
670
+ padding=True,
671
+ max_length=None,
672
+ pad_to_multiple_of=pad_to_multiple_of,
673
+ return_tensors="pt",
674
+ ).to(self.current_device)
675
+
676
+ with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model:
677
+ generations = unwrapped_model.generate(**padded_inputs, **generation_kwargs)
678
+
679
+ for generation, mask in zip(generations, padded_inputs["attention_mask"]):
680
+ if not self.is_encoder_decoder:
681
+ output = generation[(1 - mask).sum() :] # remove padding
682
+ else:
683
+ output = generation
684
+
685
+ if not return_prompt and not self.is_encoder_decoder:
686
+ output = output[(mask).sum() :] # remove prompt
687
+
688
+ if remove_padding and self.tokenizer.eos_token_id in output:
689
+ pad_mask = output == self.tokenizer.eos_token_id
690
+ pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item()
691
+ output = output[: pad_start + 1] # keep the eos token at the end
692
+
693
+ outputs.append(output)
694
+
695
+ self.tokenizer.padding_side = padding_side_default
696
+ return outputs
697
+
698
+ def _step_safety_checker(
699
+ self,
700
+ batch_size: int,
701
+ queries: List[torch.LongTensor],
702
+ responses: List[torch.LongTensor],
703
+ scores: List[torch.FloatTensor],
704
+ masks: Optional[List[torch.LongTensor]] = None,
705
+ ):
706
+ """
707
+ Check if the input data is valid for training.
708
+
709
+ Args:
710
+ batch_size (int):
711
+ Batch size from the config file.
712
+ queries (List[`torch.LongTensor`]):
713
+ List of tensors containing the encoded queries of shape (`query_length`)
714
+ responses (List[`torch.LongTensor`]):
715
+ List of tensors containing the encoded responses of shape (`response_length`)
716
+ scores (List[`torch.FloatTensor`]):
717
+ List of tensors containing the scores.
718
+ masks (List[`torch.LongTensor`], *optional*):
719
+ list of optional tensors containing the masks of shape (`response_length`)
720
+
721
+ Returns:
722
+ `tuple`: The input processed data.
723
+ """
724
+ for name, tensor_list in zip(["queries", "responses", "scores"], [queries, responses, scores]):
725
+ if not isinstance(tensor_list, list):
726
+ raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
727
+ if not isinstance(tensor_list[0], torch.Tensor):
728
+ raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
729
+ if batch_size is not None and len(tensor_list) != batch_size:
730
+ raise ValueError(
731
+ f"Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}"
732
+ )
733
+
734
+ # add queries, scores and responses on the correct device
735
+ queries = [tensor.to(self.current_device) for tensor in queries]
736
+ responses = [tensor.to(self.current_device) for tensor in responses]
737
+ scores = [tensor.to(self.current_device) for tensor in scores]
738
+ masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None
739
+
740
+ # squeeze scores if needed
741
+ for i, score in enumerate(scores):
742
+ if score.dim() > 1:
743
+ raise ValueError(f"Scores must be 1-dimensional - got {score.dim()} for {score}")
744
+ elif score.dim() == 1:
745
+ scores[i] = score.squeeze()
746
+
747
+ return queries, responses, scores, masks
748
+
749
+ @PPODecorators.empty_device_cache()
750
+ def step(
751
+ self,
752
+ queries: List[torch.LongTensor],
753
+ responses: List[torch.LongTensor],
754
+ scores: List[torch.FloatTensor],
755
+ response_masks: Optional[List[torch.LongTensor]] = None,
756
+ ):
757
+ """
758
+ Run a PPO optimisation step given a list of queries, model responses, and rewards.
759
+
760
+ Args:
761
+ queries (List[`torch.LongTensor`]):
762
+ List of tensors containing the encoded queries of shape (`query_length`)
763
+ responses (List[`torch.LongTensor`]):
764
+ List of tensors containing the encoded responses of shape (`response_length`)
765
+ scores (List[`torch.FloatTensor`]):
766
+ List of tensors containing the scores.
767
+ response_masks (List[`torch.FloatTensor`], *optional*)):
768
+ List of tensors containing masks of the response tokens.
769
+
770
+ Returns:
771
+ `dict[str, Any]`: A summary of the training statistics
772
+ """
773
+ bs = self.config.batch_size
774
+
775
+ queries, responses, scores, response_masks = self._step_safety_checker(
776
+ bs, queries, responses, scores, response_masks
777
+ )
778
+ scores = torch.tensor(scores, device=self.current_device)
779
+ if self.config.use_score_scaling:
780
+ # Score scaling
781
+ scores_mean, scores_std = self.running.update(scores)
782
+ tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device)
783
+ score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps
784
+ if self.config.use_score_norm:
785
+ scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor
786
+ else:
787
+ scores /= score_scaling_factor
788
+
789
+ if self.config.score_clip is not None:
790
+ # Score clipping
791
+ scores_dtype = scores.dtype
792
+ scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype)
793
+
794
+ # if we want to push best model to the hub
795
+ if hasattr(self, "highest_reward"):
796
+ if self.compare_step % self.config.compare_steps == 0:
797
+ curr_mean_reward = scores.mean()
798
+ # if the best reward ever seen
799
+ if curr_mean_reward > self.highest_reward:
800
+ self.highest_reward = curr_mean_reward
801
+ # push model to hub
802
+ self.push_to_hub(**self.push_to_hub_kwargs)
803
+ self.compare_step += 1
804
+
805
+ timing = dict()
806
+ t0 = time.time()
807
+
808
+ t = time.time()
809
+
810
+ model_inputs = self.prepare_model_inputs(queries, responses)
811
+
812
+ if self.is_distributed:
813
+ pad_first = self.tokenizer.padding_side == "left"
814
+
815
+ model_inputs["input_ids"] = self.accelerator.pad_across_processes(
816
+ model_inputs["input_ids"],
817
+ dim=1,
818
+ pad_index=self.tokenizer.pad_token_id,
819
+ pad_first=pad_first,
820
+ )
821
+ model_inputs["attention_mask"] = self.accelerator.pad_across_processes(
822
+ model_inputs["attention_mask"], dim=1, pad_index=0, pad_first=pad_first
823
+ )
824
+ if self.is_encoder_decoder:
825
+ model_inputs["decoder_input_ids"] = self.accelerator.pad_across_processes(
826
+ model_inputs["decoder_input_ids"],
827
+ dim=1,
828
+ pad_index=self.tokenizer.pad_token_id,
829
+ pad_first=pad_first,
830
+ )
831
+ model_inputs["decoder_attention_mask"] = self.accelerator.pad_across_processes(
832
+ model_inputs["decoder_attention_mask"],
833
+ dim=1,
834
+ pad_index=0,
835
+ pad_first=pad_first,
836
+ )
837
+
838
+ model_inputs_names = list(model_inputs.keys())
839
+
840
+ full_kl_penalty = self.config.kl_penalty == "full"
841
+
842
+ with torch.no_grad():
843
+ all_logprobs, logits_or_none, values, masks = self.batched_forward_pass(
844
+ self.model,
845
+ queries,
846
+ responses,
847
+ model_inputs,
848
+ response_masks=response_masks,
849
+ return_logits=full_kl_penalty,
850
+ )
851
+ with self.optional_peft_ctx():
852
+ ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass(
853
+ self.model if self.is_peft_model else self.ref_model,
854
+ queries,
855
+ responses,
856
+ model_inputs,
857
+ return_logits=full_kl_penalty,
858
+ )
859
+
860
+ timing["time/ppo/forward_pass"] = time.time() - t
861
+
862
+ with torch.no_grad():
863
+ t = time.time()
864
+ if full_kl_penalty:
865
+ active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False)
866
+ ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False)
867
+
868
+ rewards, non_score_reward, kls = self.compute_rewards(
869
+ scores, active_full_logprobs, ref_full_logprobs, masks
870
+ )
871
+ else:
872
+ rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks)
873
+ timing["time/ppo/compute_rewards"] = time.time() - t
874
+
875
+ t = time.time()
876
+ values, advantages, returns = self.compute_advantages(values, rewards, masks)
877
+ timing["time/ppo/compute_advantages"] = time.time() - t
878
+
879
+ # upcast to float32 to avoid dataset issues
880
+ batch_dict = {
881
+ "queries": queries,
882
+ "responses": responses,
883
+ "logprobs": all_logprobs.to(torch.float32),
884
+ "values": values.to(torch.float32),
885
+ "masks": masks,
886
+ "advantages": advantages,
887
+ "returns": returns,
888
+ }
889
+ batch_dict.update(model_inputs)
890
+
891
+ t = time.time()
892
+ all_stats = []
893
+ early_stop = False
894
+ for ppo_epoch in range(self.config.ppo_epochs):
895
+ if early_stop:
896
+ break
897
+ b_inds = np.random.permutation(bs)
898
+ for backward_batch_start in range(0, bs, self.config.backward_batch_size):
899
+ backward_batch_end = backward_batch_start + self.config.backward_batch_size
900
+ backward_batch_inds = b_inds[backward_batch_start:backward_batch_end]
901
+
902
+ for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size):
903
+ mini_batch_end = mini_batch_start + self.config.mini_batch_size
904
+ mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end]
905
+ mini_batch_dict = {
906
+ "logprobs": batch_dict["logprobs"][mini_batch_inds],
907
+ "values": batch_dict["values"][mini_batch_inds],
908
+ "masks": batch_dict["masks"][mini_batch_inds],
909
+ # hacks: the queries and responses are ragged.
910
+ "queries": [batch_dict["queries"][i] for i in mini_batch_inds],
911
+ "responses": [batch_dict["responses"][i] for i in mini_batch_inds],
912
+ "advantages": batch_dict["advantages"][mini_batch_inds],
913
+ "returns": batch_dict["returns"][mini_batch_inds],
914
+ }
915
+ for k in model_inputs_names:
916
+ mini_batch_dict[k] = batch_dict[k][mini_batch_inds]
917
+ with self.accelerator.accumulate(self.model):
918
+ model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names}
919
+
920
+ logprobs, logits, vpreds, _ = self.batched_forward_pass(
921
+ self.model,
922
+ mini_batch_dict["queries"],
923
+ mini_batch_dict["responses"],
924
+ model_inputs,
925
+ return_logits=True,
926
+ )
927
+
928
+ tmp = {
929
+ 'learing_rate': self._get_learning_rate(),
930
+ 'ppo_epoch': ppo_epoch,
931
+ 'backward_batch_start': backward_batch_start,
932
+ 'mini_batch_start': mini_batch_start,
933
+ 'key': "before optimizer-step",
934
+ }
935
+
936
+ logger.info("####before step####")
937
+ # logger.info(self.lr_scheduler.last_batch_iteration)
938
+ logger.info(self.lr_scheduler.lr_lambdas)
939
+ logger.info(json.dumps(tmp))
940
+
941
+ train_stats = self.train_minibatch(
942
+ mini_batch_dict["logprobs"],
943
+ mini_batch_dict["values"],
944
+ logprobs,
945
+ logits,
946
+ vpreds,
947
+ mini_batch_dict["masks"],
948
+ mini_batch_dict["advantages"],
949
+ mini_batch_dict["returns"],
950
+ )
951
+ all_stats.append(train_stats)
952
+
953
+ tmp = {
954
+ 'learing_rate': self._get_learning_rate(),
955
+ 'ppo_epoch': ppo_epoch,
956
+ 'backward_batch_start': backward_batch_start,
957
+ 'mini_batch_start': mini_batch_start,
958
+ 'key': "after optimizer-step",
959
+ }
960
+ logger.info("####after step####")
961
+ # logger.info(self.lr_scheduler.last_batch_iteration)
962
+ logger.info(self.lr_scheduler.lr_lambdas)
963
+ logger.info(json.dumps(tmp))
964
+
965
+ # typically, early stopping is done at the epoch level
966
+ if self.config.early_stopping:
967
+ policykl = train_stats["policy/policykl"]
968
+ early_stop = self._early_stop(policykl)
969
+ if early_stop:
970
+ break
971
+
972
+ logger.info('===finished ppo-step===')
973
+
974
+ timing["time/ppo/optimize_step"] = time.time() - t
975
+
976
+ t = time.time()
977
+ train_stats = stack_dicts(all_stats)
978
+
979
+ # reshape advantages/ratios such that they are not averaged.
980
+ train_stats["policy/advantages"] = torch.flatten(train_stats["policy/advantages"]).unsqueeze(0)
981
+ train_stats["policy/advantages"] = torch.nan_to_num(train_stats["policy/advantages"], WANDB_PADDING)
982
+ train_stats["policy/ratio"] = torch.flatten(train_stats["policy/ratio"]).unsqueeze(0)
983
+
984
+ stats = self.record_step_stats(
985
+ scores=scores,
986
+ logprobs=all_logprobs,
987
+ ref_logprobs=ref_logprobs,
988
+ non_score_reward=non_score_reward,
989
+ train_stats=train_stats,
990
+ kl_coef=self.kl_ctl.value,
991
+ masks=masks,
992
+ queries=queries,
993
+ responses=responses,
994
+ kls=kls,
995
+ )
996
+ # Gather/Reduce stats from all processes
997
+ if self.is_distributed:
998
+ stats = self.gather_stats(stats)
999
+ stats = stats_to_np(stats)
1000
+ timing["time/ppo/calc_stats"] = time.time() - t
1001
+ if self.is_deepspeed_used:
1002
+ stats["ppo/learning_rate"] = self._get_learning_rate()
1003
+ stats["ppo/learning_rate_optimizer"] = self.optimizer.param_groups[0]["lr"]
1004
+ else:
1005
+ stats["ppo/learning_rate"] = self.optimizer.param_groups[0]["lr"]
1006
+
1007
+ # Update the KL control - multiply the batch_size by the number of processes
1008
+ self.kl_ctl.update(
1009
+ stats["objective/kl"],
1010
+ self.config.batch_size * self.accelerator.num_processes,
1011
+ )
1012
+
1013
+ # Log the total ppo time
1014
+ timing["time/ppo/total"] = time.time() - t0
1015
+ stats.update(timing)
1016
+
1017
+ # post-process stats for tensorboard and other loggers
1018
+ if self.config.log_with != "wandb":
1019
+ stats = convert_to_scalar(stats)
1020
+
1021
+ if self.lr_scheduler is not None:
1022
+ logger.info("###before-lr_scheduler_step###")
1023
+ logger.info(self._get_learning_rate())
1024
+ self.lr_scheduler.step()
1025
+ logger.info("###after-lr_scheduler_step###")
1026
+ logger.info(self._get_learning_rate())
1027
+
1028
+ return stats
1029
+
1030
+ def _early_stop(self, policykl):
1031
+ r"""
1032
+ Handles the early stopping logic. If the policy KL is greater than the target KL, then the gradient is zeroed and
1033
+ the optimization step is skipped.
1034
+ This also handles the multi-gpu case where the policy KL is averaged across all processes.
1035
+
1036
+ Args:
1037
+ policy_kl (torch.Tensor):
1038
+ the policy KL
1039
+
1040
+ Returns:
1041
+ `bool`: whether to early stop or not
1042
+ """
1043
+ early_stop = False
1044
+ if not self.config.early_stopping:
1045
+ return early_stop
1046
+
1047
+ if not self.is_distributed and policykl > 1.5 * self.config.target_kl:
1048
+ self.optimizer.zero_grad()
1049
+ early_stop = True
1050
+ elif self.is_distributed:
1051
+ import torch.distributed as dist
1052
+
1053
+ # Wait for all processes to finish
1054
+ dist.barrier()
1055
+
1056
+ # all gather the policykl
1057
+ dist.all_reduce(policykl, dist.ReduceOp.SUM)
1058
+ policykl /= self.accelerator.num_processes
1059
+
1060
+ if policykl > 1.5 * self.config.target_kl:
1061
+ self.optimizer.zero_grad()
1062
+ early_stop = True
1063
+ return early_stop
1064
+
1065
+ def gather_stats(self, stats):
1066
+ """
1067
+ Gather stats from all processes. Useful in the context of distributed training.
1068
+
1069
+ Args:
1070
+ stats (dict[str, Any]):
1071
+ a dictionary of stats to be gathered. The stats should contain torch tensors.
1072
+
1073
+ Returns:
1074
+ `dict[str, Any]`: A dictionary of stats with the tensors gathered.
1075
+ """
1076
+ import torch.distributed as dist
1077
+
1078
+ # Wait for all processes to finish
1079
+ dist.barrier()
1080
+
1081
+ for k, v in stats.items():
1082
+ if isinstance(v, torch.Tensor):
1083
+ dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM)
1084
+ v /= self.accelerator.num_processes
1085
+ stats[k] = v
1086
+ return stats
1087
+
1088
+ def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor):
1089
+ if self.is_encoder_decoder:
1090
+ input_data = self.data_collator(
1091
+ [{"input_ids": q, "attention_mask": torch.ones_like(q)} for q in queries]
1092
+ ).to(self.current_device)
1093
+
1094
+ decoder_inputs = self.data_collator(
1095
+ [{"input_ids": r, "attention_mask": torch.ones_like(r)} for r in responses]
1096
+ ).to(self.current_device)
1097
+
1098
+ input_data["decoder_input_ids"] = decoder_inputs["input_ids"]
1099
+ input_data["decoder_attention_mask"] = decoder_inputs["attention_mask"]
1100
+ else:
1101
+ input_ids = [torch.cat([q, r]) for q, r in zip(queries, responses)]
1102
+ input_data = self.data_collator(
1103
+ [{"input_ids": ids, "attention_mask": torch.ones_like(ids)} for ids in input_ids]
1104
+ ).to(self.current_device)
1105
+
1106
+ input_data.pop("labels", None) # we don't want to compute LM losses
1107
+ return input_data
1108
+
1109
+ @PPODecorators.empty_device_cache()
1110
+ def batched_forward_pass(
1111
+ self,
1112
+ model: PreTrainedModelWrapper,
1113
+ queries: torch.Tensor,
1114
+ responses: torch.Tensor,
1115
+ model_inputs: dict,
1116
+ return_logits: bool = False,
1117
+ response_masks: Optional[torch.Tensor] = None,
1118
+ ):
1119
+ """
1120
+ Calculate model outputs in multiple batches.
1121
+
1122
+ Args:
1123
+ queries (`torch.LongTensor`):
1124
+ List of tensors containing the encoded queries, shape (`batch_size`, `query_length`)
1125
+ responses (`torch.LongTensor`):
1126
+ List of tensors containing the encoded responses, shape (`batch_size`, `response_length`)
1127
+ return_logits (`bool`, *optional*, defaults to `False`):
1128
+ Whether to return all_logits. Set to `False` if logits are not needed to reduce memory consumption.
1129
+
1130
+ Returns:
1131
+ (tuple):
1132
+ - all_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
1133
+ shape (`batch_size`, `response_length`)
1134
+ - all_ref_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
1135
+ shape (`batch_size`, `response_length`)
1136
+ - all_values (`torch.FloatTensor`): Values of the responses, shape (`batch_size`, `response_length`)
1137
+ """
1138
+ bs = len(queries)
1139
+ fbs = self.config.mini_batch_size
1140
+ all_logprobs = []
1141
+ all_logits = []
1142
+ all_masks = []
1143
+ all_values = []
1144
+
1145
+ model.eval()
1146
+
1147
+ for i in range(math.ceil(bs / fbs)):
1148
+ input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
1149
+ query_batch = queries[i * fbs : (i + 1) * fbs]
1150
+ response_batch = responses[i * fbs : (i + 1) * fbs]
1151
+ if response_masks is not None:
1152
+ response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
1153
+ logits, _, values = model(**input_kwargs)
1154
+
1155
+ if self.is_encoder_decoder:
1156
+ input_ids = input_kwargs["decoder_input_ids"]
1157
+ attention_mask = input_kwargs["decoder_attention_mask"]
1158
+ else:
1159
+ input_ids = input_kwargs["input_ids"]
1160
+ attention_mask = input_kwargs["attention_mask"]
1161
+
1162
+ logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
1163
+ masks = torch.zeros_like(attention_mask)
1164
+ masks[:, :-1] = attention_mask[:, 1:]
1165
+
1166
+ for j in range(len(query_batch)):
1167
+ if self.is_encoder_decoder:
1168
+ # Decoder sentence starts always in the index 1 after padding in the Enc-Dec Models
1169
+ start = 1
1170
+ end = attention_mask[j, :].sum() - 1
1171
+ else:
1172
+ start = len(query_batch[j]) - 1 # logprobs starts from the second query token
1173
+ if attention_mask[j, 0] == 0: # offset left padding
1174
+ start += attention_mask[j, :].nonzero()[0]
1175
+ end = start + len(response_batch[j])
1176
+
1177
+ masks[j, :start] = 0
1178
+ masks[j, end:] = 0
1179
+ if response_masks is not None:
1180
+ masks[j, start:end] = masks[j, start:end] * response_masks_batch[j]
1181
+
1182
+ if return_logits:
1183
+ all_logits.append(logits)
1184
+ else:
1185
+ del logits
1186
+ all_values.append(values)
1187
+ all_logprobs.append(logprobs)
1188
+ all_masks.append(masks)
1189
+
1190
+ return (
1191
+ torch.cat(all_logprobs),
1192
+ torch.cat(all_logits)[:, :-1] if return_logits else None,
1193
+ torch.cat(all_values)[:, :-1],
1194
+ torch.cat(all_masks)[:, :-1],
1195
+ )
1196
+
1197
+ @PPODecorators.empty_device_cache()
1198
+ def train_minibatch(
1199
+ self,
1200
+ old_logprobs: torch.FloatTensor,
1201
+ values: torch.FloatTensor,
1202
+ logprobs: torch.FloatTensor,
1203
+ logits: torch.FloatTensor,
1204
+ vpreds: torch.FloatTensor,
1205
+ mask: torch.LongTensor,
1206
+ advantages: torch.FloatTensor,
1207
+ returns: torch.FloatTensor,
1208
+ ):
1209
+ """
1210
+ Train one PPO minibatch
1211
+
1212
+ Args:
1213
+ logprobs (`torch.FloatTensor`):
1214
+ Log probabilities of the model, shape [mini_batch_size, response_length]
1215
+ values (`torch.FloatTensor`):
1216
+ Values of the value head, shape [mini_batch_size, response_length]
1217
+ query (`torch.LongTensor`):
1218
+ Encoded queries, shape [mini_batch_size, query_length]
1219
+ response (`torch.LongTensor`):
1220
+ Encoded responses, shape [mini_batch_size, response_length]
1221
+ model_input (`torch.LongTensor`):
1222
+ Concatenated queries and responses, shape [mini_batch_size, query_length+response_length]
1223
+
1224
+ Returns:
1225
+ train_stats (dict[str, `torch.Tensor`]):
1226
+ Dictionary of training statistics
1227
+ """
1228
+ self.model.train()
1229
+ loss_p, loss_v, train_stats = self.loss(
1230
+ old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns
1231
+ )
1232
+ loss = loss_p + loss_v
1233
+ self.accelerator.backward(loss)
1234
+ if self.config.max_grad_norm is not None:
1235
+ if self.accelerator.sync_gradients:
1236
+ self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm)
1237
+ self.optimizer.step()
1238
+ # we call optimizer.zero_grad() every time and let `accelerator` handle accumulation
1239
+ # see https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation#the-finished-code
1240
+ self.optimizer.zero_grad()
1241
+ return train_stats
1242
+
1243
+ def compute_rewards(
1244
+ self,
1245
+ scores: torch.FloatTensor,
1246
+ logprobs: torch.FloatTensor,
1247
+ ref_logprobs: torch.FloatTensor,
1248
+ masks: torch.LongTensor,
1249
+ ):
1250
+ """
1251
+ Compute per token rewards from scores and KL-penalty.
1252
+
1253
+ Args:
1254
+ scores (`torch.FloatTensor`):
1255
+ Scores from the reward model, shape (`batch_size`)
1256
+ logprobs (`torch.FloatTensor`):
1257
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1258
+ ref_logprobs (`torch.FloatTensor`):
1259
+ Log probabilities of the reference model, shape (`batch_size`, `response_length`)
1260
+
1261
+ Returns:
1262
+ `torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`)
1263
+ `torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`)
1264
+ `torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`)
1265
+ """
1266
+ rewards, non_score_rewards, kls = [], [], []
1267
+ for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks):
1268
+ # compute KL penalty (from difference in logprobs)
1269
+ kl = self._kl_penalty(logprob, ref_logprob)
1270
+ kls.append(kl)
1271
+ non_score_reward = -self.kl_ctl.value * kl
1272
+ non_score_rewards.append(non_score_reward)
1273
+ reward = non_score_reward.clone()
1274
+ last_non_masked_index = mask.nonzero()[-1]
1275
+
1276
+ # reward is preference model score + KL penalty
1277
+ reward[last_non_masked_index] += score
1278
+ rewards.append(reward)
1279
+ return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls)
1280
+
1281
+ def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor:
1282
+ if self.config.kl_penalty == "kl":
1283
+ return logprob - ref_logprob
1284
+
1285
+ if self.config.kl_penalty == "abs":
1286
+ return (logprob - ref_logprob).abs()
1287
+
1288
+ if self.config.kl_penalty == "mse":
1289
+ return 0.5 * (logprob - ref_logprob).square()
1290
+
1291
+ if self.config.kl_penalty == "full":
1292
+ # Flip is required due to this issue? :https://github.com/pytorch/pytorch/issues/57459
1293
+ return F.kl_div(ref_logprob, logprob, log_target=True, reduction="none").sum(-1)
1294
+ if self.config.kl_penalty == "control_variate":
1295
+ log_ratio = ref_logprob - logprob
1296
+ control_variate = torch.exp(log_ratio) - log_ratio - 1
1297
+ return control_variate
1298
+ if self.config.kl_penalty == "seq_control_variate":
1299
+ log_ratio = ref_logprob - logprob
1300
+ prob_ratio = torch.exp(log_ratio.sum(dim=-1, keepdim=True))
1301
+ seq_control_variate = prob_ratio - log_ratio - 1
1302
+ return seq_control_variate
1303
+
1304
+ raise NotImplementedError
1305
+
1306
+ def compute_advantages(
1307
+ self,
1308
+ values: torch.FloatTensor,
1309
+ rewards: torch.FloatTensor,
1310
+ mask: torch.FloatTensor,
1311
+ ):
1312
+ lastgaelam = 0
1313
+ advantages_reversed = []
1314
+ gen_len = rewards.shape[-1]
1315
+
1316
+ values = values * mask
1317
+ rewards = rewards * mask
1318
+
1319
+ if self.config.whiten_rewards:
1320
+ rewards = masked_whiten(rewards, mask, shift_mean=False)
1321
+
1322
+ for t in reversed(range(gen_len)):
1323
+ nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
1324
+ delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t]
1325
+ lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam
1326
+ advantages_reversed.append(lastgaelam)
1327
+ advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)
1328
+
1329
+ returns = advantages + values
1330
+ if self.config.whiten_advantages:
1331
+ advantages = masked_whiten(advantages, mask)
1332
+ advantages = advantages.detach()
1333
+ return values, advantages, returns
1334
+
1335
+ def loss(
1336
+ self,
1337
+ old_logprobs: torch.FloatTensor,
1338
+ values: torch.FloatTensor,
1339
+ logits: torch.FloatTensor,
1340
+ vpreds: torch.FloatTensor,
1341
+ logprobs: torch.FloatTensor,
1342
+ mask: torch.LongTensor,
1343
+ advantages: torch.FloatTensor,
1344
+ returns: torch.FloatTensor,
1345
+ ):
1346
+ """
1347
+ Calculate policy and value losses.
1348
+
1349
+ Args:
1350
+ old_logprobs (`torch.FloatTensor`):
1351
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1352
+ values (`torch.FloatTensor`):
1353
+ Values of the value head, shape (`batch_size`, `response_length`)
1354
+ rewards (`torch.FloatTensor`):
1355
+ Rewards from the reward model, shape (`batch_size`, `response_length`)
1356
+ logits (`torch.FloatTensor`):
1357
+ Logits of the model, shape (`batch_size`, `response_length`, `vocab_size`)
1358
+ v_pred (`torch.FloatTensor`):
1359
+ Values of the value head, shape (`batch_size`, `response_length`)
1360
+ logprobs (`torch.FloatTensor`):
1361
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1362
+ """
1363
+
1364
+ vpredclipped = clip_by_value(
1365
+ vpreds,
1366
+ values - self.config.cliprange_value,
1367
+ values + self.config.cliprange_value,
1368
+ )
1369
+
1370
+ vf_losses1 = (vpreds - returns) ** 2
1371
+ vf_losses2 = (vpredclipped - returns) ** 2
1372
+ vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask)
1373
+ vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask)
1374
+
1375
+ ratio = torch.exp(logprobs - old_logprobs)
1376
+
1377
+ pg_losses = -advantages * ratio
1378
+ pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange)
1379
+
1380
+ pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask)
1381
+ pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask)
1382
+
1383
+ loss = pg_loss + self.config.vf_coef * vf_loss
1384
+
1385
+ avg_ratio = masked_mean(ratio, mask).item()
1386
+ if avg_ratio > self.config.ratio_threshold:
1387
+ warnings.warn(
1388
+ f"The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch."
1389
+ )
1390
+ pg_loss = pg_loss * 0.0
1391
+ vf_loss = vf_loss * 0.0
1392
+ loss = loss * 0.0
1393
+
1394
+ entropy = masked_mean(entropy_from_logits(logits), mask)
1395
+
1396
+ approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)
1397
+ policykl = masked_mean(old_logprobs - logprobs, mask)
1398
+
1399
+ return_mean, return_var = masked_mean(returns, mask), masked_var(returns, mask)
1400
+ value_mean, value_var = masked_mean(values, mask), masked_var(values, mask)
1401
+
1402
+ stats = dict(
1403
+ loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()),
1404
+ policy=dict(
1405
+ entropy=entropy.detach(),
1406
+ approxkl=approxkl.detach(),
1407
+ policykl=policykl.detach(),
1408
+ clipfrac=pg_clipfrac.detach(),
1409
+ advantages=advantages.detach(),
1410
+ advantages_mean=masked_mean(advantages, mask).detach(),
1411
+ ratio=ratio.detach(),
1412
+ ),
1413
+ returns=dict(mean=return_mean.detach(), var=return_var.detach()),
1414
+ val=dict(
1415
+ vpred=masked_mean(vpreds, mask).detach(),
1416
+ error=masked_mean((vpreds - returns) ** 2, mask).detach(),
1417
+ clipfrac=vf_clipfrac.detach(),
1418
+ mean=value_mean.detach(),
1419
+ var=value_var.detach(),
1420
+ ),
1421
+ )
1422
+ return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)
1423
+
1424
+ def record_step_stats(self, kl_coef: float, **data):
1425
+ """
1426
+ Record training step statistics.
1427
+
1428
+
1429
+ Args:
1430
+ kl_coef (`float`):
1431
+ KL coefficient
1432
+ data (`dict`):
1433
+ Dictionary of training step data
1434
+
1435
+ Returns:
1436
+ stats (`dict`):
1437
+ Dictionary of training step statistics
1438
+ """
1439
+ mask = data.pop("masks")
1440
+
1441
+ kls = data.pop("kls")
1442
+ kl_list = ((kls) * mask).sum(axis=-1)
1443
+ mean_kl = kl_list.mean()
1444
+ mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean()
1445
+
1446
+ mean_non_score_reward = masked_mean(
1447
+ data["non_score_reward"], mask
1448
+ ) # non_score_reward is size `batch_size`, `response_length`
1449
+ mean_scores = data["scores"].mean() # scores is size `batch_size`
1450
+ std_scores = data["scores"].std()
1451
+
1452
+ if mean_kl.item() < -1.0:
1453
+ # warn users
1454
+ warnings.warn(
1455
+ f"KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training."
1456
+ " sometimes this happens because the generation kwargs are not correctly set. Please make sure"
1457
+ " that the generation kwargs are set correctly, or review your training hyperparameters."
1458
+ )
1459
+
1460
+ stats = {
1461
+ "objective/kl": mean_kl,
1462
+ "objective/kl_dist": kl_list,
1463
+ "objective/logprobs": data["logprobs"],
1464
+ "objective/ref_logprobs": data["ref_logprobs"],
1465
+ "objective/kl_coef": kl_coef,
1466
+ "objective/entropy": mean_entropy,
1467
+ "ppo/mean_non_score_reward": mean_non_score_reward,
1468
+ "ppo/mean_scores": mean_scores,
1469
+ "ppo/std_scores": std_scores,
1470
+ }
1471
+
1472
+ # Log text properties
1473
+ query_lens = torch.tensor([len(query) for query in data["queries"]], dtype=torch.float)
1474
+ response_lens = torch.tensor([len(response) for response in data["responses"]], dtype=torch.float)
1475
+
1476
+ stats["tokens/queries_len_mean"] = torch.mean(query_lens).cpu().numpy().item()
1477
+ stats["tokens/queries_len_std"] = torch.std(query_lens).cpu().numpy().item()
1478
+ stats["tokens/queries_dist"] = query_lens.cpu().numpy()
1479
+ stats["tokens/responses_len_mean"] = torch.mean(response_lens).cpu().numpy().item()
1480
+ stats["tokens/responses_len_std"] = torch.std(response_lens).cpu().numpy().item()
1481
+ stats["tokens/responses_dist"] = response_lens.cpu().numpy()
1482
+
1483
+ for k, v in data["train_stats"].items():
1484
+ stats[f"ppo/{k}"] = torch.mean(v, axis=0)
1485
+ stats["ppo/val/var_explained"] = 1 - stats["ppo/val/error"] / stats["ppo/returns/var"]
1486
+ return stats
1487
+
1488
+ def log_stats(
1489
+ self,
1490
+ stats: dict,
1491
+ batch: dict,
1492
+ rewards: List[torch.FloatTensor],
1493
+ columns_to_log: typing.Iterable[str] = ("query", "response"),
1494
+ ):
1495
+ """
1496
+ A function that logs all the training stats. Call it at the end of each epoch.
1497
+
1498
+ Args:
1499
+ stats (dict[str, Any]):
1500
+ A dictionary of training stats.
1501
+ batch (dict[str, Any]):
1502
+ A dictionary of batch data, this contains the queries and responses.
1503
+ rewards (`List[torch.FloatTensor]`):
1504
+ A tensor of rewards.
1505
+ """
1506
+
1507
+ # all gather stats
1508
+ if not isinstance(rewards, torch.Tensor):
1509
+ rewards = torch.tensor(rewards).to(self.current_device)
1510
+ rewards = self.accelerator.gather(rewards).flatten()
1511
+
1512
+ if self.config.log_with == "wandb":
1513
+ import wandb
1514
+
1515
+ if any(column_to_log not in batch.keys() for column_to_log in columns_to_log):
1516
+ raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.")
1517
+
1518
+ batch_list = [batch[column_to_log] for column_to_log in columns_to_log]
1519
+ if self.is_distributed:
1520
+ gathered_batch_list = []
1521
+ for b in batch_list:
1522
+ flattened = gather_object(b)
1523
+ gathered_batch_list.append(flattened)
1524
+ batch_list = gathered_batch_list
1525
+
1526
+ # Log only if we are in the main process
1527
+ if self.accelerator.is_main_process:
1528
+ logs = {}
1529
+
1530
+ # Log stats
1531
+ if "query" not in batch.keys() and "response" not in batch.keys():
1532
+ # warn the user that the game logs will not be logged
1533
+ warnings.warn(
1534
+ "The game logs will not be logged because the batch does not contain the keys 'query' and "
1535
+ "'response'. "
1536
+ )
1537
+ elif self.config.log_with == "wandb":
1538
+ table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())]
1539
+ logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)})
1540
+
1541
+ logs.update(stats)
1542
+
1543
+ # manually cast in fp32 for bf16 torch tensors
1544
+ for k, v in logs.items():
1545
+ if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16:
1546
+ logs[k] = v.float()
1547
+
1548
+ logs["env/reward_mean"] = torch.mean(rewards).cpu().numpy().item()
1549
+ logs["env/reward_std"] = torch.std(rewards).cpu().numpy().item()
1550
+ logs["env/reward_dist"] = rewards.cpu().numpy()
1551
+
1552
+ if self.config.log_with == "tensorboard":
1553
+ # update the current step
1554
+ self.current_step += 1
1555
+
1556
+ self.accelerator.log(
1557
+ logs,
1558
+ step=self.current_step if self.config.log_with == "tensorboard" else None,
1559
+ )
1560
+
1561
+ def create_model_card(self, path: str, model_name: Optional[str] = "TRL Model") -> None:
1562
+ """Creates and saves a model card for a TRL model.
1563
+
1564
+ Args:
1565
+ path (`str`): The path to save the model card to.
1566
+ model_name (`str`, *optional*): The name of the model, defaults to `TRL Model`.
1567
+ """
1568
+ try:
1569
+ user = whoami()["name"]
1570
+ # handle the offline case
1571
+ except Exception:
1572
+ warnings.warn("Cannot retrieve user information assuming you are running in offline mode.")
1573
+ return
1574
+
1575
+ if not os.path.exists(path):
1576
+ os.makedirs(path)
1577
+
1578
+ model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}")
1579
+ with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f:
1580
+ f.write(model_card_content)
1581
+
1582
+ def _save_pretrained(self, save_directory: str) -> None:
1583
+ self.accelerator.unwrap_model(self.model).save_pretrained(save_directory)
1584
+ self.tokenizer.save_pretrained(save_directory)
1585
+ self.create_model_card(save_directory)
1586
+
1587
+ def _show_tokens(self, tokens, masks):
1588
+ from rich import print
1589
+ from rich.text import Text
1590
+
1591
+ text = Text()
1592
+
1593
+ for _i, (token, mask) in enumerate(zip(tokens, masks)):
1594
+ if mask == 1:
1595
+ text.append(self.tokenizer.decode(token.item()), style="black on deep_sky_blue1")
1596
+ text.append(" ")
1597
+ else:
1598
+ text.append(self.tokenizer.decode(token.item()), style="black on cyan3")
1599
+ text.append(" ")
1600
+ print(text)
1601
+
1602
+ def my_prepare_deepspeed(self, model):
1603
+ deepspeed_states = self.accelerator.state.deepspeed_plugin
1604
+ config_kwargs = deepspeed_states.deepspeed_config
1605
+ if config_kwargs["zero_optimization"]["stage"] != 3:
1606
+ stage = 0
1607
+ else:
1608
+ stage = 3
1609
+ ds_config = get_eval_ds_config(deepspeed_states, offload=True, stage=stage)
1610
+ logger.info("####ds_config####")
1611
+ logger.info(ds_config)
1612
+ logger.info("####config_kwargs####")
1613
+ logger.info(config_kwargs)
1614
+ model, *_ = deepspeed.initialize(model=model, config=ds_config)
1615
+ model.eval()
1616
+ return model
1617
+
1618
+ def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
1619
+ # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
1620
+ deepspeed_plugin = self.accelerator.state.deepspeed_plugin
1621
+ config_kwargs = deepspeed_plugin.deepspeed_config
1622
+ if model is not None:
1623
+ if hasattr(model, "config"):
1624
+ hidden_size = (
1625
+ max(model.config.hidden_sizes)
1626
+ if getattr(model.config, "hidden_sizes", None)
1627
+ else getattr(model.config, "hidden_size", None)
1628
+ )
1629
+ if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
1630
+ # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
1631
+ # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
1632
+ config_kwargs.update(
1633
+ {
1634
+ "zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
1635
+ "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
1636
+ "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
1637
+ }
1638
+ )
1639
+
1640
+ # If ZeRO-3 is used, we shard both the active and reference model.
1641
+ # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
1642
+ if config_kwargs["zero_optimization"]["stage"] != 3:
1643
+ config_kwargs["zero_optimization"]["stage"] = 0
1644
+ model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
1645
+ model.eval()
1646
+ return model
step_dpo_ppo.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
2
+ #
3
+ # This code is inspired by the HuggingFace's TRL library.
4
+ # https://github.com/huggingface/trl/blob/v0.8.0/examples/scripts/ppo.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ import random
18
+ random.seed(42)
19
+
20
+ from typing import TYPE_CHECKING, List, Optional
21
+ from dataclasses import dataclass, field
22
+ from typing import Optional, Dict, Sequence
23
+ import json
24
+ from dataclasses import asdict, dataclass, field
25
+ from typing import Literal, Optional
26
+ import warnings
27
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
28
+ # from llamafactory.extras.ploting import plot_loss
29
+ from llamafactory.train.trainer_utils import create_ref_model, create_reward_model
30
+ from llamafactory.model import load_model, load_tokenizer
31
+ from llamafactory.hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
32
+ from ppo_save_utils import FixValueHeadModelCallback, my_fix_valuehead_checkpoint
33
+
34
+ # from my_ppo_trainer import CustomPPOTrainer
35
+ from my_ppo_trainer_v1 import CustomPPOTrainer
36
+
37
+ from transformers import Seq2SeqTrainingArguments, TrainerCallback
38
+ from transformers import DataCollatorWithPadding
39
+ import transformers
40
+
41
+ from torch.utils.data import Dataset
42
+ import torch
43
+ import transformers
44
+ from transformers import HfArgumentParser, Seq2SeqTrainingArguments
45
+ from transformers.integrations import is_deepspeed_zero3_enabled
46
+ from transformers.trainer_utils import get_last_checkpoint
47
+ from transformers.training_args import ParallelMode
48
+ from transformers.utils import is_torch_bf16_gpu_available
49
+ from transformers.utils.versions import require_version
50
+ from transformers.trainer_callback import (
51
+ DefaultFlowCallback
52
+ )
53
+ import os
54
+ import torch.distributed as dist
55
+
56
+ import logging
57
+
58
+ logging.basicConfig()
59
+ logger = logging.getLogger(__name__)
60
+ logger.setLevel(logging.INFO)
61
+
62
+ @dataclass
63
+ class Seq2SeqTrainingArguments(transformers.Seq2SeqTrainingArguments):
64
+ optim: str = field(default="adamw_torch")
65
+ model_max_length: int = field(
66
+ default=512,
67
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
68
+ )
69
+ ppo_whiten_advantages: bool = field(
70
+ default=True,
71
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
72
+ )
73
+ init_kl_coef: float = field(
74
+ default=0.01,
75
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
76
+ )
77
+ adap_kl_ctrl: bool = field(
78
+ default=False,
79
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
80
+ )
81
+ kl_penalty: str = field(
82
+ default="kl",
83
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
84
+ )
85
+ lam: float = field(
86
+ default=1.0,
87
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
88
+ )
89
+
90
+
91
+ PROMPT_DICT = {
92
+ 'alpaca_without_response': (
93
+ "Below is an instruction that describes a task. "
94
+ "Write a response that appropriately completes the request.\n\n"
95
+ "### Instruction:\n{prompt}\n\n### Response: Let's think step by step. Step 1:"
96
+ ),
97
+ 'alpaca_with_response': (
98
+ "Below is an instruction that describes a task. "
99
+ "Write a response that appropriately completes the request.\n\n"
100
+ "### Instruction:\n{prompt}\n\n### Response: {initial_reason_steps}"
101
+ )
102
+ }
103
+
104
+ def my_filter(example, tokenizer):
105
+ query = PROMPT_DICT['alpaca_without_response'].format_map(tmp)
106
+ if len(tokenizer(query)['input_ids']) > 1024:
107
+ return False
108
+ return True
109
+
110
+ Reverse_Curriculum_RL = os.environ.get('Reverse_Curriculum_RL', '')
111
+ import re
112
+
113
+ pattern = r"Step \d+:.*?(?=Step \d+:|$)"
114
+ print(Reverse_Curriculum_RL, '==Reverse_Curriculum_RL==')
115
+
116
+ class PPODataset(Dataset):
117
+ """Dataset for supervised fine-tuning."""
118
+
119
+ def __init__(self, data_path: str,
120
+ tokenizer: transformers.PreTrainedTokenizer, training_args):
121
+ super(PPODataset, self).__init__()
122
+ self.training_args = training_args
123
+ from datasets import load_dataset
124
+ print('==data_path==', data_path)
125
+ df = load_dataset('json',
126
+ data_files=data_path)['train']
127
+ self.df_ = df.filter(lambda example: my_filter,
128
+ num_proc=8)
129
+
130
+ if Reverse_Curriculum_RL == 'apply':
131
+ data_dict = {}
132
+ self.df = []
133
+ for d in self.df_:
134
+ if d['initial_reason_steps']:
135
+ steps = re.findall(pattern, d['initial_reason_steps'], re.DOTALL)
136
+ else:
137
+ steps = []
138
+ for step_idx, step in enumerate(steps):
139
+ tmp = {}
140
+ for key in d:
141
+ if key in ['initial_reason_steps']:
142
+ continue
143
+ tmp[key] = d[key]
144
+ prefix = ''.join(steps[:step_idx+1])
145
+ tmp['initial_reason_steps'] = "Let's think step by step. " + ''.join(prefix)
146
+ if tmp['prompt']+tmp['initial_reason_steps'] in data_dict:
147
+ continue
148
+ self.df.append(tmp)
149
+ data_dict[tmp['prompt']+tmp['initial_reason_steps']] = 0
150
+ tmp = {}
151
+ for key in d:
152
+ if key in ['initial_reason_steps']:
153
+ continue
154
+ tmp[key] = d[key]
155
+ if tmp['prompt'] in data_dict:
156
+ continue
157
+ self.df.append(tmp)
158
+ data_dict[tmp['prompt']] = 0
159
+ else:
160
+ self.df = []
161
+ for d in self.df_:
162
+ self.df.append(d)
163
+
164
+ self.df += self.df
165
+ self.idx = list(range(len(self.df)))
166
+ random.shuffle(self.idx)
167
+
168
+ print(len(self.df), '===dataset===')
169
+ self.tokenizer = tokenizer
170
+
171
+ def __len__(self):
172
+ return len(self.df)
173
+
174
+ def __getitem__(self, i):
175
+ tmp = self.df[self.idx[i]]
176
+ if Reverse_Curriculum_RL == 'apply':
177
+ if tmp.get('initial_reason_steps', ''):
178
+ query = PROMPT_DICT['alpaca_with_response'].format_map(tmp)
179
+ else:
180
+ query = PROMPT_DICT['alpaca_without_response'].format_map(tmp)
181
+ else:
182
+ query = PROMPT_DICT['alpaca_without_response'].format_map(tmp)
183
+
184
+ tokenized_question = self.tokenizer(
185
+ query,
186
+ padding="longest",
187
+ max_length=self.training_args.model_max_length,
188
+ truncation=True,
189
+ add_special_tokens=True
190
+ )
191
+
192
+ tokenizer_answer = self.tokenizer(
193
+ tmp['answer'],
194
+ padding="longest",
195
+ max_length=self.training_args.model_max_length,
196
+ truncation=True,
197
+ add_special_tokens=True
198
+ )
199
+
200
+ return {
201
+ 'input_ids': tokenized_question['input_ids'],
202
+ 'answer_ids': tokenizer_answer['input_ids']
203
+ }
204
+
205
+ def padding(inputs, padding_token, cutoff = None):
206
+ num_elems = len(inputs)
207
+ if cutoff is None:
208
+ cutoff = max([len(item) for item in inputs])
209
+ else:
210
+ cutoff = min(max([len(item) for item in inputs]), cutoff)
211
+
212
+ tokens = torch.ones(num_elems, cutoff).long().to(inputs[0].device) * padding_token
213
+ for i in range(num_elems):
214
+ toks = inputs[i]
215
+ length = min(cutoff, len(toks))
216
+ tokens[i, -length:] = toks[-length:]
217
+ return tokens
218
+
219
+ @dataclass
220
+ class PPODataCollatorWithPadding(DataCollatorWithPadding):
221
+ def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, Any]:
222
+ concatenated_features = {
223
+ 'input_ids': []
224
+ }
225
+ answer_ids = []
226
+ for feature in features:
227
+ concatenated_features['input_ids'].append(feature['input_ids'])
228
+ answer_ids.append(torch.tensor(feature['answer_ids']))
229
+
230
+ batch = self.tokenizer.pad(
231
+ concatenated_features,
232
+ padding=self.padding,
233
+ max_length=self.max_length,
234
+ pad_to_multiple_of=self.pad_to_multiple_of,
235
+ return_tensors=self.return_tensors,
236
+ )
237
+ patched_answer_ids = padding(answer_ids,
238
+ padding_token=self.tokenizer.pad_token_id)
239
+ batch['answer_ids'] = patched_answer_ids
240
+ return batch
241
+
242
+ def run_ppo(
243
+ model_args: "ModelArguments",
244
+ data_args: "DataArguments",
245
+ training_args: "Seq2SeqTrainingArguments",
246
+ finetuning_args: "FinetuningArguments",
247
+ generating_args: "GeneratingArguments",
248
+ callbacks: Optional[List["TrainerCallback"]] = None,
249
+ ):
250
+ tokenizer_module = load_tokenizer(model_args)
251
+ tokenizer = tokenizer_module["tokenizer"]
252
+
253
+ dataset = PPODataset(data_args.dataset, tokenizer, training_args)
254
+
255
+ dist.barrier()
256
+
257
+ model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train, add_valuehead=True)
258
+
259
+ logger.info('###model-config###')
260
+ logger.info(model.config)
261
+
262
+ dist.barrier()
263
+
264
+ tokenizer.padding_side = "left" # use left-padding in generation while using right-padding in training
265
+
266
+ # data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
267
+ data_collator = PPODataCollatorWithPadding(tokenizer=tokenizer, max_length=4096)
268
+ from torch.utils.data import DataLoader
269
+
270
+ test_loader = DataLoader(dataset,
271
+ collate_fn=data_collator,
272
+ sampler=None,
273
+ batch_size=1)
274
+ for idx, d in enumerate(test_loader):
275
+ input_ids = d['input_ids']
276
+ print(tokenizer.batch_decode(input_ids), '==input==')
277
+ print(input_ids, '==input_ids==')
278
+ if idx >= 0:
279
+ break
280
+
281
+ # Create reference model and reward model
282
+ ref_model = create_ref_model(model_args, finetuning_args, add_valuehead=True)
283
+ reward_model = 'api'
284
+
285
+ dist.barrier()
286
+
287
+ # Initialize our Trainer
288
+ ppo_trainer = CustomPPOTrainer(
289
+ model_args=model_args,
290
+ training_args=training_args,
291
+ finetuning_args=finetuning_args,
292
+ generating_args=generating_args,
293
+ callbacks=callbacks+[DefaultFlowCallback()],
294
+ model=model,
295
+ reward_model=reward_model,
296
+ ref_model=ref_model,
297
+ dataset=dataset,
298
+ data_collator=data_collator,
299
+ **tokenizer_module,
300
+ )
301
+
302
+ dist.barrier()
303
+
304
+ # Training
305
+
306
+ if training_args.do_train:
307
+ if training_args.resume_from_checkpoint:
308
+ ppo_trainer.ppo_train(resume_from_checkpoint=training_args.resume_from_checkpoint)
309
+ else:
310
+ ppo_trainer.ppo_train()
311
+ ppo_trainer.save_model()
312
+ if training_args.should_save:
313
+ my_fix_valuehead_checkpoint(model, training_args.output_dir,
314
+ training_args.save_safetensors)
315
+
316
+ ppo_trainer.save_state() # must be called after save_model to have a folder
317
+ if ppo_trainer.is_world_process_zero() and finetuning_args.plot_loss:
318
+ plot_loss(training_args.output_dir, keys=["loss", "reward"])
319
+
320
+ if __name__ == "__main__":
321
+ from llamafactory.hparams import get_infer_args, get_train_args
322
+ from transformers import TrainerCallback
323
+ from llamafactory.train.callbacks import LogCallback
324
+
325
+ from llamafactory.hparams.data_args import DataArguments
326
+ from llamafactory.hparams.finetuning_args import FinetuningArguments
327
+ from llamafactory.hparams.model_args import ModelArguments
328
+ from llamafactory.hparams.generating_args import GeneratingArguments
329
+
330
+ parser = transformers.HfArgumentParser((ModelArguments,
331
+ DataArguments,
332
+ Seq2SeqTrainingArguments,
333
+ FinetuningArguments,
334
+ GeneratingArguments
335
+ ))
336
+
337
+ (model_args, data_args,
338
+ training_args, finetuning_args,
339
+ generating_args) = parser.parse_args_into_dataclasses()
340
+
341
+ callbacks = [LogCallback()]
342
+
343
+ run_ppo(model_args, data_args, training_args,
344
+ finetuning_args, generating_args, callbacks)
step_dpo_reward.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from evaluation.eval.eval_script import eval_math
4
+ from evaluation.data_processing.answer_extraction import extract_math_answer
5
+ import re, json
6
+ import argparse
7
+ import json
8
+ import pdb
9
+ import jsonlines
10
+ import os
11
+ import torch
12
+
13
+ from datasets import load_dataset
14
+ import transformers
15
+
16
+ import logging
17
+
18
+ logging.basicConfig()
19
+ logger = logging.getLogger(__name__)
20
+ logger.setLevel(logging.INFO)
21
+
22
+ df = load_dataset('json',
23
+ data_files=['/cpfs/user/chenhao/hf_datasets/Math-Step-DPO-10K/step_dpo.jsonl'])['train']
24
+
25
+ template = (
26
+ "Below is an instruction that describes a task. "
27
+ "Write a response that appropriately completes the request.\n\n"
28
+ "### Instruction:\n{prompt}\n\n### Response: Let's think step by step. Step 1:"
29
+ )
30
+
31
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
32
+ '/cpfs/user/chenhao/pretrained_models/DeepSeekMath-Base-SFT/',
33
+ model_max_length=4096,
34
+ use_fast=True
35
+ )
36
+
37
+ DATA_DICT = {}
38
+ for d in df:
39
+ prompt = template.format_map(d).strip()
40
+ prompt_decoded = tokenizer.decode(tokenizer(prompt)['input_ids'], skip_special_tokens=True)
41
+ prompt_key = prompt_decoded.split('### Response:')[0].strip()
42
+ if prompt_key not in DATA_DICT:
43
+ DATA_DICT[prompt_key] = d
44
+
45
+ print(len(DATA_DICT), '==DATA_DICT==')
46
+
47
+ def reward_rule_model(prompt, completion, **kwargs):
48
+
49
+ # if kwargs.get('answer', None):
50
+ # prompt_answer = kwargs['answer']
51
+ # else:
52
+ prompt_key = prompt
53
+ prompt_answer = DATA_DICT[prompt_key]['answer']
54
+
55
+ if isinstance(prompt_answer, str) and prompt_answer.startswith("\\text{"):
56
+ prompt_answer = remove_text(prompt_answer)
57
+
58
+ if "The answer is:" in completion and (isinstance(prompt_answer, list) and len(prompt_answer) == 1 and "\\begin{pmatrix}" in prompt_answer[0]):
59
+ prompt_answer[0] = prompt_answer[0].replace("\\\\", "\\")
60
+ completion = completion.replace("\\\\", "\\")
61
+
62
+ item = {
63
+ 'question': prompt,
64
+ 'model_output': completion,
65
+ 'prediction': extract_math_answer(prompt, completion, task='cot'),
66
+ 'answer': prompt_answer if isinstance(prompt_answer, list) else [prompt_answer],
67
+ }
68
+
69
+ # if "The answer is:" in completion and (isinstance(prompt_answer, list) and len(prompt_answer) == 1 and "\\begin{pmatrix}" in prompt_answer[0]):
70
+ # prompt_answer[0] = prompt_answer[0].replace("\\\\", "\\")
71
+ # completion = completion.replace("\\\\", "\\")
72
+
73
+ # item = {
74
+ # 'question': prompt,
75
+ # 'model_output': completion,
76
+ # 'prediction': extract_math_answer(prompt, completion, task='cot'),
77
+ # 'answer': prompt_answer if isinstance(prompt_answer, list) else [prompt_answer],
78
+ # }
79
+
80
+ if len(item['prediction']) == 0:
81
+ return 0
82
+ else:
83
+ extract_ans = item['prediction']
84
+ res = eval_math(item)
85
+ ans_tmp = {
86
+ 'extract_ans': extract_ans,
87
+ 'prompt_answer': prompt_answer,
88
+ 'res': res
89
+ }
90
+ # logger.info("########ans_tmp########")
91
+ # logger.info(json.dumps(ans_tmp, ensure_ascii=False))
92
+ # print(extract_ans, '====', prompt_answer, '====', res)
93
+ if res:
94
+ return 1
95
+ else:
96
+ return 0
97
+
98
+ def get_rewards_from_rule(prompt_list, completion_list, **kwargs):
99
+ rewards = []
100
+ if 'answers' in kwargs:
101
+ answer_list = kwargs['answers']
102
+ for idx in range(len(prompt_list)):
103
+ prompt = prompt_list[idx].split('### Response:')[0].strip()
104
+ completion = completion_list[idx]
105
+ if 'answers' in kwargs:
106
+ answer = answer_list[idx].replace('<|begin▁of▁sentence|>', '')
107
+ reward = reward_rule_model(prompt.strip(),
108
+ completion.strip(),
109
+ answer=answer)
110
+ else:
111
+ reward = reward_rule_model(prompt.strip(),
112
+ completion.strip())
113
+ rewards.append(reward)
114
+ reward_1 = sum([reward for reward in rewards if reward == 1])
115
+ # print(reward_1/(len(rewards)+1e-10), '====pass-rate====')
116
+ return torch.Tensor(rewards)
117
+
118
+ import requests, os, time
119
+ url = os.environ.get('RM_URL', 'http://22.5.6.221:8000/get_rm')
120
+ s = requests.Session()
121
+
122
+ def get_rewards_from_server(query, response, **kwargs):
123
+ r"""
124
+ Gets reward scores from the API server.
125
+ """
126
+ headers = {"Content-Type": "application/json"}
127
+ data = {
128
+ 'prompt': query,
129
+ 'response': response
130
+ }
131
+ for _ in range(100):
132
+ try:
133
+ response = s.post(url, json=data, headers=headers)
134
+ time.sleep(1.0)
135
+ break
136
+ except:
137
+ time.sleep(1.0)
138
+ continue
139
+ rewards = json.loads(response.text)["score"]
140
+ return torch.Tensor(rewards)