Update inference/bf16_cast_channel_int8.py
#10
by
HandH1998
- opened
inference/bf16_cast_channel_int8.py
CHANGED
|
@@ -35,8 +35,21 @@ def main(bf16_path, int8_path, model_name="deepseek-ai/DeepSeek-R1"):
|
|
| 35 |
|
| 36 |
# modify config.json and save it
|
| 37 |
config = json.load(open(config_file))
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
with open(config_file, "w", encoding="utf-8") as f:
|
| 41 |
json.dump(config, f, indent=2, ensure_ascii=False, sort_keys=True)
|
| 42 |
print(f"config.json modified and saved to {config_file}")
|
|
|
|
| 35 |
|
| 36 |
# modify config.json and save it
|
| 37 |
config = json.load(open(config_file))
|
| 38 |
+
if "quantization_config" in config:
|
| 39 |
+
quant_config = config["quantization_config"]
|
| 40 |
+
quant_config.pop("fmt", None)
|
| 41 |
+
quant_config.pop("weight_block_size", None)
|
| 42 |
+
quant_config["quant_method"] = "w8a8_int8"
|
| 43 |
+
quant_config["group_size"] = -1
|
| 44 |
+
quant_config["activation_scheme"] = "dynamic"
|
| 45 |
+
quant_config["bits"] = 8
|
| 46 |
+
else:
|
| 47 |
+
config["quantization_config"] = {
|
| 48 |
+
"activation_scheme": "dynamic",
|
| 49 |
+
"quant_method": "w8a8_int8",
|
| 50 |
+
"group_size": -1,
|
| 51 |
+
"bits": 8
|
| 52 |
+
}
|
| 53 |
with open(config_file, "w", encoding="utf-8") as f:
|
| 54 |
json.dump(config, f, indent=2, ensure_ascii=False, sort_keys=True)
|
| 55 |
print(f"config.json modified and saved to {config_file}")
|