import os import torch from llmcompressor import oneshot from llmcompressor.modifiers.awq import AWQModifier, AWQMapping from datasets import Dataset # ============================================================ # 配置区(批量量化,仅需修改此处全局配置,无需逐个调整模型) # ============================================================ ROOT_MODEL_DIR = "./" QUANT_SUFFIX = "_awq_w4a16" # 校准数据 CALIB_DATA = [ """You are a helpful assistant. User: 帮我写一份关于全球气候变化的报告大纲。 Assistant: 当然,这是一个关于全球气候变化报告的大纲建议: I. 引言 A. 什么是全球气候变化 B. 报告的目的和范围 II. 气候变化的原因 A. 自然原因(太阳活动、火山喷发) B. 人为原因(温室气体排放、土地利用变化) """ ] # AWQ 映射表(适配Qwen系列模型) LLAMA_MAPPINGS = [ AWQMapping( "re:.*input_layernorm", ["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], ), AWQMapping("re:.*v_proj", ["re:.*o_proj"]), AWQMapping( "re:.*post_attention_layernorm", ["re:.*gate_proj", "re:.*up_proj"], ), AWQMapping( "re:.*up_proj", ["re:.*down_proj"], ), ] # ============================================================ # 工具函数:获取所有待量化的sft_qwenxxx模型目录 # ============================================================ def get_target_model_dirs(): """ 遍历ROOT_MODEL_DIR,筛选出所有sft_qwen开头的目录(待量化模型) 排除已经量化过的模型(包含_awq_的目录) """ target_dirs = [] skipped_dirs = [] for item in os.listdir(ROOT_MODEL_DIR): item_path = os.path.abspath(os.path.join(ROOT_MODEL_DIR, item)) # 筛选条件:是目录 + 以sft_qwen开头 if os.path.isdir(item_path) and item.startswith("sft_qwen"): # 【修复】排除已经量化过的模型(包含_awq_的目录) if "_awq_" in item: skipped_dirs.append(item) print(f"[跳过已量化模型] {item}") else: target_dirs.append(item) print(f"[发现待量化模型] {item}") if skipped_dirs: print(f"\n⏭️ 跳过 {len(skipped_dirs)} 个已量化模型") if not target_dirs: print("⚠️ 未发现任何待量化的sft_qwen模型目录") else: print(f"\n✅ 共发现 {len(target_dirs)} 个待量化模型\n") return target_dirs # ============================================================ # 核心函数:单个模型量化 # ============================================================ def quantize_single_model(model_name): """ 量化单个模型 :param model_name: 模型目录名(如sft_qwen3_4b) """ MODEL_PATH = os.path.join(ROOT_MODEL_DIR, model_name) QUANT_PATH = os.path.join(ROOT_MODEL_DIR, f"{model_name}{QUANT_SUFFIX}") print(f"\n" + "="*100) print(f"开始量化模型: {model_name}") print(f"模型输入路径: {MODEL_PATH}") print(f"量化输出路径: {QUANT_PATH}") print("="*100 + "\n") if not torch.cuda.is_available(): print("❌ 错误:此过程需要GPU支持,无法继续量化") return False try: calib_dataset = Dataset.from_dict({"text": CALIB_DATA}) except Exception as e: print(f"❌ 构建校准数据集失败,错误:{e}") return False # 每次量化创建全新的AWQModifier实例 recipe = [ AWQModifier( scheme="W4A16_ASYM", mappings=LLAMA_MAPPINGS, ignore=["lm_head"], targets=["Linear"] ), ] try: print("[步骤 1/2] 正在执行AWQ W4A16 oneshot量化...") print(" 此过程会进行权重缩放和低比特量化,耗时较长,请耐心等待...") # 【修复】移除不支持的 tokenizer_kwargs 参数 oneshot( model=MODEL_PATH, dataset=calib_dataset, recipe=recipe, output_dir=QUANT_PATH, num_calibration_samples=len(CALIB_DATA), max_seq_length=4096, ) print("\n[步骤 2/2] oneshot量化完成!") except Exception as e: print(f"\n❌ 量化模型 {model_name} 过程中发生错误") print(f"错误详情: {e}") import traceback traceback.print_exc() return False finally: if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() print("\n" + "="*80) print(f"🎉 模型 {model_name} 量化成功!") print(f"4-bit AWQ模型已保存到: {QUANT_PATH}") print("="*80 + "\n") return True # ============================================================ # 主函数:批量执行所有模型量化 # ============================================================ def run_batch_quantization(): print("🚀 启动Qwen系列模型批量W4A16量化任务") print(f"工作目录: {os.path.abspath(ROOT_MODEL_DIR)}\n") target_models = get_target_model_dirs() if not target_models: return success_count = 0 fail_count = 0 for idx, model_name in enumerate(target_models, 1): print(f"\n========== 批量量化 {idx}/{len(target_models)} ==========") if quantize_single_model(model_name): success_count += 1 else: fail_count += 1 print("\n" + "="*100) print("📊 批量量化任务全部结束") print(f"✅ 成功量化:{success_count} 个模型") print(f"❌ 量化失败:{fail_count} 个模型") print("="*100) if __name__ == "__main__": run_batch_quantization()