Update README.md
Browse files
README.md
CHANGED
|
@@ -1,73 +1,146 @@
|
|
| 1 |
-
--
|
| 2 |
-
# (!! 关键修正 !!)
|
| 3 |
-
# 将 base_model 修正为 Hugging Face Hub 上的公开 ID
|
| 4 |
-
# 这是为了通过 HF 的 YAML 验证
|
| 5 |
-
base_model: meta-llama/Meta-Llama-3-8B
|
| 6 |
-
library_name: peft
|
| 7 |
-
pipeline_tag: text-generation
|
| 8 |
-
tags:
|
| 9 |
-
- lora
|
| 10 |
-
- peft
|
| 11 |
-
- llama-3
|
| 12 |
-
- cpt
|
| 13 |
-
- medical
|
| 14 |
-
- chinese
|
| 15 |
-
- bootscoder
|
| 16 |
-
- transformers
|
| 17 |
-
- trl
|
| 18 |
-
---
|
| 19 |
|
| 20 |
-
#
|
| 21 |
|
| 22 |
-
|
| 23 |
|
| 24 |
-
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
|
| 30 |
-
|
| 31 |
-
- **基础模型:** `meta-llama/Meta-Llama-3-8B`
|
| 32 |
-
- **语言:** 中文 (zh), 英文 (en)
|
| 33 |
-
- **License:** meta-llama-3
|
| 34 |
|
| 35 |
-
##
|
| 36 |
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
```python
|
| 40 |
import torch
|
| 41 |
from peft import PeftModel
|
| 42 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 43 |
|
| 44 |
-
# 基础模型
|
| 45 |
-
base_model_id = "meta-llama/Meta-Llama-3-8B"
|
| 46 |
-
|
| 47 |
-
# 你的 LoRA 适配器仓库 ID (替换为你的最终上传路径)
|
| 48 |
-
# 例如: "bootscoder/Llama-3-Medical-8B-CPT-lora"
|
| 49 |
-
lora_adapter_id = "bootscoder/Llama-3-Medical-8B-CPT-lora"
|
| 50 |
-
|
| 51 |
-
# 1. 加载基础模型和分词器
|
| 52 |
-
print(f"正在加载基础模型: {base_model_id}")
|
| 53 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 54 |
-
|
| 55 |
torch_dtype=torch.bfloat16,
|
| 56 |
-
device_map="auto"
|
| 57 |
-
trust_remote_code=True
|
| 58 |
)
|
| 59 |
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
-
#
|
| 67 |
-
# 这将返回一个标准的 Llama-3 模型,但已包含医疗知识
|
| 68 |
-
print("正在合并 LoRA 适配器...")
|
| 69 |
-
merged_model = model.merge_and_unload()
|
| 70 |
-
print("合并完成。")
|
| 71 |
|
| 72 |
-
|
| 73 |
-
# 'tokenizer' 也应随模型一起保存和使用
|
|
|
|
| 1 |
+
# Medical-ChatBot-CPT LoRA 模型
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
## 模型概述
|
| 4 |
|
| 5 |
+
基于 LLaMA-3.1-8B 的医疗聊天机器人持续预训练(Continual Pre-Training, CPT)LoRA 适配器。
|
| 6 |
|
| 7 |
+
- **基础模型**: meta-llama/Llama-3.1-8B
|
| 8 |
+
- **训练阶段**: Continual Pre-Training (CPT)
|
| 9 |
+
- **适配器大小**: ~26.5MB
|
| 10 |
|
| 11 |
+
## 1. 数据集
|
| 12 |
|
| 13 |
+
**数据集**: [bootscoder/Medical-ChatBot-CPT](https://huggingface.co/datasets/bootscoder/Medical-ChatBot-CPT)
|
| 14 |
|
| 15 |
+
详细数据集信息请查看上述链接。
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
## 2. 训练流程
|
| 18 |
|
| 19 |
+
### 技术栈
|
| 20 |
+
|
| 21 |
+
- **DeepSpeed**: ZeRO Stage 1 分布式训练
|
| 22 |
+
- **PEFT**: LoRA 参数高效微调
|
| 23 |
+
- **BitsAndBytes**: 4-bit NF4 量化
|
| 24 |
+
- **Flash Attention 2**: 加速注意力计算
|
| 25 |
+
- **TRL**: SFTTrainer 训练接口
|
| 26 |
+
|
| 27 |
+
### 训练阶段
|
| 28 |
+
|
| 29 |
+
1. **模型初始化**: 加载 LLaMA-3.1-8B 并应用 4-bit 量化
|
| 30 |
+
2. **LoRA 配置**: 初始化低秩适配器(r=32, alpha=8)
|
| 31 |
+
3. **分布式训练**: DeepSpeed 8卡并行训练,1 epoch
|
| 32 |
+
4. **保存模型**: 保存 LoRA 适配器权重
|
| 33 |
+
|
| 34 |
+
## 3. 参数配置
|
| 35 |
+
|
| 36 |
+
### 硬件配置
|
| 37 |
+
|
| 38 |
+
```
|
| 39 |
+
GPU: 8 × NVIDIA A5000 (24GB VRAM)
|
| 40 |
+
分布式: DeepSpeed ZeRO Stage 1
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
### 训练超参数
|
| 44 |
+
|
| 45 |
+
```yaml
|
| 46 |
+
seq_length: 2048 # 序列长度
|
| 47 |
+
batch_size: 2 # 每卡批次大小
|
| 48 |
+
gradient_accumulation_steps: 16 # 梯度累积
|
| 49 |
+
effective_batch_size: 256 # 2 × 8 × 16
|
| 50 |
+
num_train_epochs: 1 # 训练轮数
|
| 51 |
+
learning_rate: 1e-5 # 学习率
|
| 52 |
+
lr_scheduler_type: cosine # 余弦调度
|
| 53 |
+
warmup_ratio: 0.1 # 预热比例
|
| 54 |
+
bf16: true # BF16 混合精度
|
| 55 |
+
gradient_checkpointing: true # 梯度检查点
|
| 56 |
+
packing: true # 序列打包
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
### QLoRA 配置
|
| 60 |
+
|
| 61 |
+
**量化配置**:
|
| 62 |
+
```python
|
| 63 |
+
load_in_4bit: True # 4-bit 量化
|
| 64 |
+
bnb_4bit_quant_type: nf4 # NF4 量化
|
| 65 |
+
bnb_4bit_compute_dtype: bfloat16 # BF16 计算
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
**LoRA 配置**:
|
| 69 |
+
```python
|
| 70 |
+
r: 32 # LoRA 秩
|
| 71 |
+
lora_alpha: 8 # 缩放因子 (alpha/r = 0.25)
|
| 72 |
+
target_modules: [q_proj, k_proj] # Q, K 投影层
|
| 73 |
+
bias: none # 不训练 bias
|
| 74 |
+
trainable_params: ~26.5MB # 可训练参数 (~0.2%)
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
**显存优化效果**:
|
| 78 |
+
- 原始全参数训练 (FP16): ~72GB per GPU
|
| 79 |
+
- 使用 QLoRA: ~7-8GB per GPU
|
| 80 |
+
- **显存节约: ~90%**
|
| 81 |
+
|
| 82 |
+
## 4. 峰值显存占用
|
| 83 |
+
|
| 84 |
+
**单卡峰值**: ____________ GB
|
| 85 |
+
**8卡总计**: ____________ GB
|
| 86 |
+
|
| 87 |
+
## 5. 模型预期表现
|
| 88 |
+
|
| 89 |
+
### 相比 Base LLaMA-3.1-8B 的改进
|
| 90 |
+
|
| 91 |
+
**改进**:
|
| 92 |
+
- 更好理解医疗术语和概念
|
| 93 |
+
- 输出更符合医疗领域语言风格
|
| 94 |
+
- 为后续 SFT 训练提供更好初始化
|
| 95 |
+
|
| 96 |
+
**局限**:
|
| 97 |
+
- 未经指令微调,不理解指令格式
|
| 98 |
+
- 输出结构化程度不足
|
| 99 |
+
- 不建议直接部署使用
|
| 100 |
+
|
| 101 |
+
## 使用方法
|
| 102 |
+
|
| 103 |
+
### 加载模型
|
| 104 |
|
| 105 |
```python
|
| 106 |
import torch
|
| 107 |
from peft import PeftModel
|
| 108 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 109 |
|
| 110 |
+
# 加载基础模型
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 112 |
+
"meta-llama/Llama-3.1-8B",
|
| 113 |
torch_dtype=torch.bfloat16,
|
| 114 |
+
device_map="auto"
|
|
|
|
| 115 |
)
|
| 116 |
|
| 117 |
+
# 加载 LoRA 适配器
|
| 118 |
+
model = PeftModel.from_pretrained(base_model, "/path/to/pretrained-lora")
|
| 119 |
+
tokenizer = AutoTokenizer.from_pretrained("/path/to/pretrained-lora")
|
| 120 |
+
|
| 121 |
+
# 合并适配器(可选)
|
| 122 |
+
model = model.merge_and_unload()
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### 生成文本
|
| 126 |
+
|
| 127 |
+
```python
|
| 128 |
+
inputs = tokenizer("高血压是一种", return_tensors="pt").to(model.device)
|
| 129 |
+
outputs = model.generate(**inputs, max_new_tokens=128)
|
| 130 |
+
print(tokenizer.decode(outputs[0]))
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
## 模型文件
|
| 134 |
|
| 135 |
+
```
|
| 136 |
+
pretrained-lora/
|
| 137 |
+
├── adapter_config.json # LoRA 配置
|
| 138 |
+
├── adapter_model.safetensors # LoRA 权重 (~26.5MB)
|
| 139 |
+
├── special_tokens_map.json # 特殊 token 映射
|
| 140 |
+
├── tokenizer.json # 分词器
|
| 141 |
+
└── tokenizer_config.json # 分词器配置
|
| 142 |
+
```
|
| 143 |
|
| 144 |
+
## 许可证
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
+
遵循 [Llama 3.1 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE)
|
|
|