| import random | |
| import pandas as pd | |
| from awq import AutoAWQForCausalLM | |
| from transformers import AutoTokenizer | |
| model_path = 'stockmark/Stockmark-2-100B-Instruct-beta' | |
| quant_path = 'Stockmark-2-100B-Instruct-beta-AWQ' | |
| quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" } | |
| # Load model | |
| model = AutoAWQForCausalLM.from_pretrained(model_path) | |
| tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) | |
| # load dataset | |
| ds = pd.read_json("caliblation.jsonl", lines=True).to_dict("records") | |
| ds = [ tokenizer.apply_chat_template(doc["messages"], tokenize=False) for doc in ds ] | |
| random.shuffle(ds) | |
| # Quantize | |
| model.quantize( | |
| tokenizer, | |
| quant_config=quant_config, | |
| calib_data=ds, | |
| n_parallel_calib_samples=64, | |
| max_calib_samples=128, | |
| max_calib_seq_len=1024 | |
| ) | |
| # Save quantized model | |
| model.save_quantized(quant_path) | |
| tokenizer.save_pretrained(quant_path) | |