Spaces:
Sleeping
Sleeping
Commit ·
352b475
1
Parent(s): abe8bbe
update
Browse files
examples/tutorials/lora_unsloth/requirements.txt
CHANGED
|
@@ -1,3 +1,4 @@
|
|
| 1 |
datasets
|
| 2 |
unsloth
|
|
|
|
| 3 |
|
|
|
|
| 1 |
datasets
|
| 2 |
unsloth
|
| 3 |
+
modelscope
|
| 4 |
|
examples/tutorials/lora_unsloth/step_2_train_model.py
CHANGED
|
@@ -50,20 +50,6 @@ def get_args():
|
|
| 50 |
return args
|
| 51 |
|
| 52 |
|
| 53 |
-
def convert_to_qwen_format(example):
|
| 54 |
-
"""
|
| 55 |
-
:param example: {"conversation_id": 612, "category": "", "conversation": [{"human": "", "assistant": ""}], "dataset": ""}
|
| 56 |
-
"""
|
| 57 |
-
conversation_ = []
|
| 58 |
-
for conversation in example["conversation"]:
|
| 59 |
-
conversation_.append([
|
| 60 |
-
{"role": "user", "content": conversation["human"].strip()},
|
| 61 |
-
{"role": "assistant", "content": conversation["assistant"].strip()},
|
| 62 |
-
])
|
| 63 |
-
result = {"conversation": conversation_}
|
| 64 |
-
return result
|
| 65 |
-
|
| 66 |
-
|
| 67 |
def main():
|
| 68 |
args = get_args()
|
| 69 |
|
|
@@ -99,7 +85,7 @@ def main():
|
|
| 99 |
tokenize=False, # 训练时部分词,true返回的是张量
|
| 100 |
add_generation_prompt=False, # 训练期间要关闭,如果是推理则设为True
|
| 101 |
)
|
| 102 |
-
return {"
|
| 103 |
|
| 104 |
dataset_dict = load_dataset(
|
| 105 |
path=args.dataset_path,
|
|
@@ -115,19 +101,70 @@ def main():
|
|
| 115 |
train_dataset = dataset_dict["train"]
|
| 116 |
|
| 117 |
train_dataset = train_dataset.map(
|
| 118 |
-
|
| 119 |
batched=False,
|
| 120 |
-
remove_columns=["conversation_id", "category", "dataset"]
|
| 121 |
)
|
| 122 |
print(train_dataset)
|
| 123 |
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
)
|
| 129 |
-
print(train_dataset)
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
return
|
| 132 |
|
| 133 |
|
|
|
|
| 50 |
return args
|
| 51 |
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
def main():
|
| 54 |
args = get_args()
|
| 55 |
|
|
|
|
| 85 |
tokenize=False, # 训练时部分词,true返回的是张量
|
| 86 |
add_generation_prompt=False, # 训练期间要关闭,如果是推理则设为True
|
| 87 |
)
|
| 88 |
+
return {"formated_text": formated_text}
|
| 89 |
|
| 90 |
dataset_dict = load_dataset(
|
| 91 |
path=args.dataset_path,
|
|
|
|
| 101 |
train_dataset = dataset_dict["train"]
|
| 102 |
|
| 103 |
train_dataset = train_dataset.map(
|
| 104 |
+
format_func,
|
| 105 |
batched=False,
|
|
|
|
| 106 |
)
|
| 107 |
print(train_dataset)
|
| 108 |
|
| 109 |
+
trainer = SFTTrainer(
|
| 110 |
+
model=model,
|
| 111 |
+
# processing_class = tokenizer,
|
| 112 |
+
# tokenizer=tokenizer,
|
| 113 |
+
train_dataset=train_dataset,
|
| 114 |
+
eval_dataset=None, # Can set up evaluation!
|
| 115 |
+
args=SFTConfig(
|
| 116 |
+
dataset_text_field="formated_text",
|
| 117 |
+
per_device_train_batch_size=8,
|
| 118 |
+
gradient_accumulation_steps=4, # Use GA to mimic batch size!
|
| 119 |
+
warmup_steps=5,
|
| 120 |
+
num_train_epochs=1, # Set this for 1 full training run.
|
| 121 |
+
# max_steps = 30,
|
| 122 |
+
learning_rate=2e-4, # Reduce to 2e-5 for long training runs
|
| 123 |
+
logging_steps=1,
|
| 124 |
+
optim="adamw_8bit",
|
| 125 |
+
weight_decay=0.01,
|
| 126 |
+
lr_scheduler_type="linear",
|
| 127 |
+
seed=3407,
|
| 128 |
+
report_to="none", # Use this for WandB etc
|
| 129 |
+
),
|
| 130 |
)
|
|
|
|
| 131 |
|
| 132 |
+
# 显示当前内存统计信息
|
| 133 |
+
gpu_stats = torch.cuda.get_device_properties(0)
|
| 134 |
+
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 135 |
+
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
|
| 136 |
+
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
| 137 |
+
print(f"{start_gpu_memory} GB of memory reserved.")
|
| 138 |
+
|
| 139 |
+
trainer_stats = trainer.train()
|
| 140 |
+
|
| 141 |
+
# 显示最终内存和时间统计信息
|
| 142 |
+
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
|
| 143 |
+
used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
|
| 144 |
+
used_percentage = round(used_memory / max_memory * 100, 3)
|
| 145 |
+
lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
|
| 146 |
+
print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
|
| 147 |
+
print(
|
| 148 |
+
f"{round(trainer_stats.metrics['train_runtime'] / 60, 2)} minutes used for training."
|
| 149 |
+
)
|
| 150 |
+
print(f"Peak reserved memory = {used_memory} GB.")
|
| 151 |
+
print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
|
| 152 |
+
print(f"Peak reserved memory % of max memory = {used_percentage} %.")
|
| 153 |
+
print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")
|
| 154 |
+
|
| 155 |
+
# ==================== 4.保存训练结果 ====================================
|
| 156 |
+
# 只保存lora适配器参数
|
| 157 |
+
trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-lora-adapter-unsloth"
|
| 158 |
+
trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 159 |
+
model.save_pretrained(trained_models_dir.as_posix())
|
| 160 |
+
tokenizer.save_pretrained(trained_models_dir.as_posix())
|
| 161 |
+
|
| 162 |
+
# trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-fp16"
|
| 163 |
+
# trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 164 |
+
# model.save_pretrained_merged(trained_models_dir.as_posix(), tokenizer, save_method="merged_16bit",)
|
| 165 |
+
# trained_models_dir = project_path / "trained_models" / "Qwen3-8B-sft-int4"
|
| 166 |
+
# trained_models_dir.mkdir(parents=True, exist_ok=True)
|
| 167 |
+
# model.save_pretrained_merged(trained_models_dir.as_posix(), tokenizer, save_method="merged_4bit",)
|
| 168 |
return
|
| 169 |
|
| 170 |
|