| import torch |
| from safetensors.torch import load_file, save_file |
| import os |
|
|
| |
|
|
| |
| |
| OLD_LORA_FILE = "./Turbo_Booster_v1.safetensors" |
|
|
| |
| NEW_LORA_FILE = "Turbo_Booster_v1-fp16.safetensors" |
|
|
| |
| TARGET_DTYPE = torch.float16 |
|
|
| |
|
|
| def fix_lora_precision(): |
| if not os.path.exists(OLD_LORA_FILE): |
| print(f"❌ 错误: 找不到旧的 LoRA 文件: {OLD_LORA_FILE}") |
| return |
|
|
| print(f"✅ 正在从 '{OLD_LORA_FILE}' 加载权重...") |
| try: |
| state_dict = load_file(OLD_LORA_FILE) |
| print(f" - 成功加载 {len(state_dict)} 个权重张量。") |
| except Exception as e: |
| print(f"❌ 加载权重时出错: {e}") |
| return |
|
|
| |
| first_key = next(iter(state_dict)) |
| if state_dict[first_key].dtype != torch.float32: |
| print(f"⚠️ 警告: 权重似乎不是 fp32 格式 (检测到 {state_dict[first_key].dtype})。脚本仍会继续转换。") |
| |
| new_state_dict = {} |
| print(f"\n🔄 正在将所有权重转换为 {TARGET_DTYPE}...") |
| |
| for key, tensor in state_dict.items(): |
| new_state_dict[key] = tensor.to(TARGET_DTYPE) |
| |
| print(f"\n💾 正在将转换后的权重保存到 '{NEW_LORA_FILE}'...") |
| try: |
| save_file(new_state_dict, NEW_LORA_FILE) |
| print("\n✨ 修复完成!") |
| print(f" - 新的、与 fp8 兼容的 LoRA 文件已保存在: {NEW_LORA_FILE}") |
| print(" - 你现在可以把这个新文件重命名为 'pytorch_lora_weights.safetensors' 并上传更新了。") |
| except Exception as e: |
| print(f"❌ 保存新文件时出错: {e}") |
|
|
| if __name__ == "__main__": |
| fix_lora_precision() |
|
|