llm_from_scratch / convert_checkpoint.py
tranquilk's picture
Upload folder using huggingface_hub
8c546f3 verified
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# --- 1. 配置你的路径 ---
# 你微调时使用的“基础模型”的Hugging Face Hub ID
# !!!这一步至关重要,必须是你开始训练时用的那个模型!!!
base_model_id = "Qwen/Qwen1.5-7B"
# 你保存的训练检查点 .pth 文件的路径
checkpoint_path = "/path/to/your/checkpoint.pth" # <--- 修改这里:你的.pth文件路径
# 你想将转换后的、可供评测的模型保存到哪里?
# 这会是一个新的目录,lm-evaluation-harness将从这里加载
output_dir = "../my_pretrain_for_eval" # <--- 修改这里:定义你的输出目录名
# --- 2. 加载基础模型和Tokenizer ---
# 这一步是获取模型的“骨架”(架构)和分词器
print(f"Loading base model architecture from: {base_model_id}")
# trust_remote_code=True 对于Qwen等模型是必需的
model = AutoModelForCausalLM.from_pretrained(base_model_id, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
# --- 3. 加载你的检查点并提取模型权重 ---
print(f"Loading training checkpoint from: {checkpoint_path}")
# 加载你保存的字典
checkpoint = torch.load(checkpoint_path, map_location='cpu') # 使用map_location='cpu'以避免GPU内存问题
# 从字典中提取模型的 state_dict
# 根据你保存的代码,权重在 'model' 这个键下
model_weights = checkpoint['model']
# --- 4. 将你的权重加载到模型“骨架”中 ---
print("Loading fine-tuned weights into the model...")
model.load_state_dict(model_weights)
print("Weights loaded successfully.")
# --- 5. 将模型和Tokenizer保存为标准的Hugging Face格式 ---
print(f"Saving model and tokenizer to: {output_dir}")
os.makedirs(output_dir, exist_ok=True)
# model.save_pretrained() 会自动创建 config.json, pytorch_model.bin 等文件
model.save_pretrained(output_dir)
# tokenizer.save_pretrained() 会自动创建 tokenizer.json 等文件
tokenizer.save_pretrained(output_dir)
print("Conversion complete!")
print(f"Your model is now ready for evaluation at: {output_dir}")