Spaces:
Sleeping
Sleeping
Commit ·
4ea4da5
1
Parent(s): 5cfa3a6
update
Browse files- examples/tutorials/by_deepspeed/ds_config/deepspeed_stage_3_config.json +58 -0
- examples/tutorials/by_deepspeed/requirements.txt +4 -0
- examples/tutorials/by_deepspeed/step_2_train_model.py +170 -0
- examples/tutorials/lora_transformers/step_2_train_model.py +2 -2
- examples/tutorials/lora_transformers/step_3_inter_model.py +1 -1
examples/tutorials/by_deepspeed/ds_config/deepspeed_stage_3_config.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"gradient_accumulation_steps": "auto",
|
| 3 |
+
"gradient_clipping": "auto",
|
| 4 |
+
"steps_per_print": 200,
|
| 5 |
+
"train_batch_size": "auto",
|
| 6 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 7 |
+
"wall_clock_breakdown": false,
|
| 8 |
+
"optimizer": {
|
| 9 |
+
"type": "Adam",
|
| 10 |
+
"params": {
|
| 11 |
+
"lr": "auto",
|
| 12 |
+
"betas": "auto",
|
| 13 |
+
"eps": "auto",
|
| 14 |
+
"weight_decay": "auto"
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"fp16": {
|
| 18 |
+
"enabled": "auto",
|
| 19 |
+
"loss_scale": 0,
|
| 20 |
+
"loss_scale_window": 1000,
|
| 21 |
+
"initial_scale_power": 16,
|
| 22 |
+
"hysteresis": 2,
|
| 23 |
+
"min_loss_scale": 1
|
| 24 |
+
},
|
| 25 |
+
"zero_optimization": {
|
| 26 |
+
"stage": 3,
|
| 27 |
+
"offload_optimizer": {
|
| 28 |
+
"device": "cpu",
|
| 29 |
+
"pin_memory": true
|
| 30 |
+
},
|
| 31 |
+
"offload_param": {
|
| 32 |
+
"device": "cpu",
|
| 33 |
+
"pin_memory": true
|
| 34 |
+
},
|
| 35 |
+
"overlap_comm": true,
|
| 36 |
+
"contiguous_gradients": true,
|
| 37 |
+
"sub_group_size": 1e9,
|
| 38 |
+
"reduce_bucket_size": "auto",
|
| 39 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 40 |
+
"stage3_param_persistence_threshold": "auto",
|
| 41 |
+
"stage3_max_live_parameters": 1e9,
|
| 42 |
+
"stage3_max_reuse_distance": 1e9,
|
| 43 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 44 |
+
},
|
| 45 |
+
"scheduler": {
|
| 46 |
+
"type": "WarmupLR",
|
| 47 |
+
"params": {
|
| 48 |
+
"warmup_min_lr": "auto",
|
| 49 |
+
"warmup_max_lr": "auto",
|
| 50 |
+
"warmup_num_steps": "auto"
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"activation_checkpointing": {
|
| 54 |
+
"enabled": true,
|
| 55 |
+
"partition_activations": true,
|
| 56 |
+
"contiguous_memory_optimization": true
|
| 57 |
+
}
|
| 58 |
+
}
|
examples/tutorials/by_deepspeed/requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
datasets
|
| 2 |
+
unsloth
|
| 3 |
+
modelscope
|
| 4 |
+
|
examples/tutorials/by_deepspeed/step_2_train_model.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import platform
|
| 7 |
+
|
| 8 |
+
# os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
| 9 |
+
|
| 10 |
+
if platform.system() in ("Windows", "Darwin"):
|
| 11 |
+
from project_settings import project_path
|
| 12 |
+
else:
|
| 13 |
+
project_path = os.path.abspath("../../../")
|
| 14 |
+
project_path = Path(project_path)
|
| 15 |
+
|
| 16 |
+
from peft import LoraConfig
|
| 17 |
+
# from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 18 |
+
from modelscope import AutoConfig, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 19 |
+
from trl import SFTTrainer, SFTConfig
|
| 20 |
+
from datasets import load_dataset
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_args():
|
| 25 |
+
parser = argparse.ArgumentParser()
|
| 26 |
+
parser.add_argument(
|
| 27 |
+
"--model_name",
|
| 28 |
+
default="unsloth/Qwen3-8B",
|
| 29 |
+
type=str
|
| 30 |
+
)
|
| 31 |
+
parser.add_argument(
|
| 32 |
+
"--dataset_path",
|
| 33 |
+
default="miyuki2026/tutorials",
|
| 34 |
+
type=str
|
| 35 |
+
),
|
| 36 |
+
parser.add_argument("--dataset_name", default=None, type=str),
|
| 37 |
+
parser.add_argument("--dataset_split", default=None, type=str),
|
| 38 |
+
parser.add_argument(
|
| 39 |
+
"--dataset_cache_dir",
|
| 40 |
+
default=(project_path / "hub_datasets").as_posix(),
|
| 41 |
+
type=str
|
| 42 |
+
),
|
| 43 |
+
parser.add_argument("--dataset_streaming", default=None, type=str),
|
| 44 |
+
parser.add_argument("--valid_dataset_size", default=100, type=str),
|
| 45 |
+
parser.add_argument("--shuffle_buffer_size", default=5000, type=str),
|
| 46 |
+
|
| 47 |
+
parser.add_argument(
|
| 48 |
+
"--num_workers",
|
| 49 |
+
default=None if platform.system() == "Windows" else os.cpu_count() // 2,
|
| 50 |
+
type=str
|
| 51 |
+
),
|
| 52 |
+
args = parser.parse_args()
|
| 53 |
+
return args
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def main():
|
| 57 |
+
args = get_args()
|
| 58 |
+
|
| 59 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 60 |
+
pretrained_model_name_or_path=args.model_name,
|
| 61 |
+
quantization_config=None,
|
| 62 |
+
# device_map="auto",
|
| 63 |
+
trust_remote_code=True
|
| 64 |
+
)
|
| 65 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 66 |
+
pretrained_model_name_or_path=args.model_name,
|
| 67 |
+
trust_remote_code=True
|
| 68 |
+
)
|
| 69 |
+
print(model)
|
| 70 |
+
|
| 71 |
+
def format_func(example):
|
| 72 |
+
formated_text = tokenizer.apply_chat_template(
|
| 73 |
+
example["conversation"],
|
| 74 |
+
tokenize=False, # 训练时部分词,true返回的是张量
|
| 75 |
+
add_generation_prompt=False, # 训练期间要关闭,如果是推理则设为True
|
| 76 |
+
)
|
| 77 |
+
return {"formated_text": formated_text}
|
| 78 |
+
|
| 79 |
+
dataset_dict = load_dataset(
|
| 80 |
+
path=args.dataset_path,
|
| 81 |
+
name=args.dataset_name,
|
| 82 |
+
data_dir="keywords",
|
| 83 |
+
# data_dir="psychology",
|
| 84 |
+
split=args.dataset_split,
|
| 85 |
+
cache_dir=args.dataset_cache_dir,
|
| 86 |
+
# num_proc=args.num_workers if not args.dataset_streaming else None,
|
| 87 |
+
streaming=args.dataset_streaming,
|
| 88 |
+
)
|
| 89 |
+
dataset = dataset_dict["train"]
|
| 90 |
+
print(dataset)
|
| 91 |
+
|
| 92 |
+
if args.dataset_streaming:
|
| 93 |
+
valid_dataset = dataset.take(args.valid_dataset_size)
|
| 94 |
+
train_dataset = dataset.skip(args.valid_dataset_size)
|
| 95 |
+
train_dataset = train_dataset.shuffle(buffer_size=args.shuffle_buffer_size, seed=None)
|
| 96 |
+
else:
|
| 97 |
+
dataset = dataset.train_test_split(test_size=args.valid_dataset_size, seed=None)
|
| 98 |
+
train_dataset = dataset["train"]
|
| 99 |
+
valid_dataset = dataset["test"]
|
| 100 |
+
|
| 101 |
+
train_dataset = valid_dataset
|
| 102 |
+
train_dataset = train_dataset.map(
|
| 103 |
+
format_func,
|
| 104 |
+
batched=False,
|
| 105 |
+
remove_columns=train_dataset.column_names,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
trainer = SFTTrainer(
|
| 109 |
+
model=model,
|
| 110 |
+
processing_class=tokenizer, # 新写法
|
| 111 |
+
train_dataset=train_dataset,
|
| 112 |
+
eval_dataset=None, # Can set up evaluation!
|
| 113 |
+
args=SFTConfig(
|
| 114 |
+
dataset_text_field="formated_text",
|
| 115 |
+
deepspeed="./ds_config/deepspeed_stage_3_config.json", # 添加deepspeed配置文件
|
| 116 |
+
per_device_train_batch_size=1,
|
| 117 |
+
gradient_accumulation_steps=64, # Use GA to mimic batch size!
|
| 118 |
+
warmup_steps=100,
|
| 119 |
+
num_train_epochs=1, # Set this for 1 full training run.
|
| 120 |
+
# max_steps = 30,
|
| 121 |
+
learning_rate=3e-5, # Reduce to 2e-5 for long training runs
|
| 122 |
+
logging_steps=1,
|
| 123 |
+
optim="adamw_8bit",
|
| 124 |
+
weight_decay=0,
|
| 125 |
+
lr_scheduler_type="constant_with_warmup",
|
| 126 |
+
seed=3407,
|
| 127 |
+
report_to="none", # Use this for WandB etc
|
| 128 |
+
),
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# 显示当前内存统计信息
|
| 132 |
+
gpu_stats = torch.cuda.get_device_properties(0)
|
| 133 |
+
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 134 |
+
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
| 135 |
+
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
| 136 |
+
print(f"{start_gpu_memory} GB of memory reserved.")
|
| 137 |
+
|
| 138 |
+
trainer_stats = trainer.train()
|
| 139 |
+
|
| 140 |
+
# 显示最终内存和时间统计信息
|
| 141 |
+
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 142 |
+
used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
|
| 143 |
+
used_percentage = round(used_memory / max_memory * 100, 3)
|
| 144 |
+
lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
|
| 145 |
+
print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
|
| 146 |
+
print(
|
| 147 |
+
f"{round(trainer_stats.metrics['train_runtime'] / 60, 2)} minutes used for training."
|
| 148 |
+
)
|
| 149 |
+
print(f"Peak reserved memory = {used_memory} GB.")
|
| 150 |
+
print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
|
| 151 |
+
print(f"Peak reserved memory % of max memory = {used_percentage} %.")
|
| 152 |
+
print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")
|
| 153 |
+
|
| 154 |
+
# 只保存lora适配器参数
|
| 155 |
+
trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-lora-adapter-transformers"
|
| 156 |
+
trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 157 |
+
trainer.model.save_pretrained(trained_models_dir.as_posix())
|
| 158 |
+
tokenizer.save_pretrained(trained_models_dir.as_posix())
|
| 159 |
+
|
| 160 |
+
# trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-fp16"
|
| 161 |
+
# trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 162 |
+
# trainer.model.save_pretrained_merged(trained_models_dir.as_posix(), tokenizer, save_method="merged_16bit",)
|
| 163 |
+
# trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-int4"
|
| 164 |
+
# trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 165 |
+
# trainer.model.save_pretrained_merged(trained_models_dir.as_posix(), tokenizer, save_method="merged_4bit",)
|
| 166 |
+
return
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
examples/tutorials/lora_transformers/step_2_train_model.py
CHANGED
|
@@ -41,7 +41,7 @@ def get_args():
|
|
| 41 |
type=str
|
| 42 |
),
|
| 43 |
parser.add_argument("--dataset_streaming", default=None, type=str),
|
| 44 |
-
parser.add_argument("--valid_dataset_size", default=
|
| 45 |
parser.add_argument("--shuffle_buffer_size", default=5000, type=str),
|
| 46 |
|
| 47 |
parser.add_argument(
|
|
@@ -115,7 +115,7 @@ def main():
|
|
| 115 |
train_dataset = dataset["train"]
|
| 116 |
valid_dataset = dataset["test"]
|
| 117 |
|
| 118 |
-
train_dataset = valid_dataset
|
| 119 |
train_dataset = train_dataset.map(
|
| 120 |
format_func,
|
| 121 |
batched=False,
|
|
|
|
| 41 |
type=str
|
| 42 |
),
|
| 43 |
parser.add_argument("--dataset_streaming", default=None, type=str),
|
| 44 |
+
parser.add_argument("--valid_dataset_size", default=1000, type=str),
|
| 45 |
parser.add_argument("--shuffle_buffer_size", default=5000, type=str),
|
| 46 |
|
| 47 |
parser.add_argument(
|
|
|
|
| 115 |
train_dataset = dataset["train"]
|
| 116 |
valid_dataset = dataset["test"]
|
| 117 |
|
| 118 |
+
# train_dataset = valid_dataset
|
| 119 |
train_dataset = train_dataset.map(
|
| 120 |
format_func,
|
| 121 |
batched=False,
|
examples/tutorials/lora_transformers/step_3_inter_model.py
CHANGED
|
@@ -69,7 +69,7 @@ def main():
|
|
| 69 |
|
| 70 |
# 注入lora适配器
|
| 71 |
model = PeftModel.from_pretrained(model, args.lora_adapter_path)
|
| 72 |
-
# model.merge_and_unload()
|
| 73 |
model.eval()
|
| 74 |
# print(model)
|
| 75 |
|
|
|
|
| 69 |
|
| 70 |
# 注入lora适配器
|
| 71 |
model = PeftModel.from_pretrained(model, args.lora_adapter_path)
|
| 72 |
+
# model.merge_and_unload() #这一步,真正将LoRA的AB矩阵融入进取。
|
| 73 |
model.eval()
|
| 74 |
# print(model)
|
| 75 |
|