| |
| """ |
| Streaming weight-only INT8 quantizer for large ONNX models. |
| |
| Implements the same transformation as: |
| quantize_dynamic(..., MatMulConstBOnly=True, per_channel=False, weight_type=QInt8) |
| |
| Fully streaming: reads and writes one tensor at a time. |
| Peak RAM: ~1.5 GB (for the largest single tensor, the embedding table ~1.2 GB). |
| |
| Usage: python stream_int8.py |
| """ |
|
|
| import gc |
| from pathlib import Path |
| import numpy as np |
| import onnx |
| from onnx import TensorProto, numpy_helper, helper |
|
|
| FP32_ONNX = Path("/Volumes/backups/ai/zerank_fp32_tmp/model_fp32.onnx") |
| FP32_DATA = Path("/Volumes/backups/ai/zerank_fp32_tmp/model_fp32.onnx_data") |
| INT8_OUT = Path("/Volumes/backups/ai/zerank_onnx_int8/model_int8.onnx") |
| INT8_DATA = Path("/Volumes/backups/ai/zerank_onnx_int8/model_int8.onnx_data") |
| MODEL_ID = "zeroentropy/zerank-1-small" |
|
|
| INT8_OUT.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
| def quantize_tensor_per_tensor(arr: np.ndarray): |
| """Symmetric per-tensor INT8 quantization (zero_point = 0).""" |
| arr = arr.astype(np.float32) |
| abs_max = np.max(np.abs(arr)) |
| if abs_max == 0: |
| scale = np.float32(1.0) |
| quantized = np.zeros_like(arr, dtype=np.int8) |
| else: |
| scale = np.float32(abs_max / 127.0) |
| quantized = np.clip(np.round(arr / scale), -127, 127).astype(np.int8) |
| return quantized, scale |
|
|
|
|
| def add_external_data(init: onnx.TensorProto, offset: int, length: int, data_file_name: str): |
| """Update an initializer proto to point to external data.""" |
| init.data_location = TensorProto.EXTERNAL |
| init.ClearField("external_data") |
| for k, v in [("location", data_file_name), ("offset", str(offset)), ("length", str(length))]: |
| e = init.external_data.add() |
| e.key, e.value = k, v |
|
|
|
|
| def quantize_model(): |
| print(f"Loading proto skeleton (no external data)...") |
| m = onnx.load(str(FP32_ONNX), load_external_data=False) |
| print(f" Nodes: {len(m.graph.node)}, Initializers: {len(m.graph.initializer)}") |
|
|
| |
| ext_index = {} |
| inline_index = {} |
| for init in m.graph.initializer: |
| if init.data_location == TensorProto.EXTERNAL: |
| info = {e.key: e.value for e in init.external_data} |
| ext_index[init.name] = { |
| "offset": int(info.get("offset", 0)), |
| "length": int(info.get("length", 0)), |
| "dtype": init.data_type, |
| "dims": list(init.dims), |
| } |
| else: |
| inline_index[init.name] = init |
|
|
| |
| matmul_b_names = set() |
| for node in m.graph.node: |
| if node.op_type == "MatMul" and len(node.input) >= 2: |
| b_name = node.input[1] |
| if b_name in ext_index or b_name in inline_index: |
| matmul_b_names.add(b_name) |
|
|
| print(f" MatMul B weights to quantize: {len(matmul_b_names)}") |
| non_matmul = [name for name, meta in ext_index.items() if name not in matmul_b_names] |
| print(f" Non-MatMul external tensors (kept as FP32): {len(non_matmul)}") |
|
|
| |
| print(f"\nPhase 1: Writing tensor data to {INT8_DATA.name}") |
| data_file_name = INT8_DATA.name |
|
|
| |
| |
| out_positions = {} |
| |
| scale_values = {} |
|
|
| try: |
| from tqdm import tqdm |
| except ImportError: |
| tqdm = None |
|
|
| offset = 0 |
| with open(str(FP32_DATA), "rb") as fp32_f, open(str(INT8_DATA), "wb") as int8_f: |
| |
| matmul_list = sorted(matmul_b_names) |
| if tqdm: |
| it = tqdm(matmul_list, desc=" Quantizing MatMul weights") |
| else: |
| it = matmul_list |
|
|
| for w_name in it: |
| if w_name in ext_index: |
| meta = ext_index[w_name] |
| fp32_f.seek(meta["offset"]) |
| raw = fp32_f.read(meta["length"]) |
| arr = np.frombuffer(raw, dtype=np.float32).reshape(meta["dims"]) |
| else: |
| arr = numpy_helper.to_array(inline_index[w_name]).astype(np.float32) |
|
|
| q_arr, scale_val = quantize_tensor_per_tensor(arr) |
| del arr |
| scale_values[w_name] = scale_val |
|
|
| raw_int8 = q_arr.tobytes() |
| int8_f.write(raw_int8) |
| out_positions[w_name + "_quantized"] = (offset, len(raw_int8)) |
| offset += len(raw_int8) |
| del q_arr |
|
|
| |
| print(f" Copying {len(non_matmul)} non-MatMul tensors...") |
| for name in non_matmul: |
| meta = ext_index[name] |
| fp32_f.seek(meta["offset"]) |
| raw = fp32_f.read(meta["length"]) |
| int8_f.write(raw) |
| out_positions[name] = (offset, len(raw)) |
| offset += len(raw) |
|
|
| print(f" Data file written: {INT8_DATA.stat().st_size / 1e9:.2f} GB") |
|
|
| |
| print("\nPhase 2: Rebuilding ONNX proto...") |
|
|
| |
| new_nodes = [] |
| dql_inserted = set() |
| for node in m.graph.node: |
| if node.op_type == "MatMul" and node.input[1] in matmul_b_names: |
| b_name = node.input[1] |
| dql_out_name = b_name + "_dequant" |
| if b_name not in dql_inserted: |
| dql_node = helper.make_node( |
| "DequantizeLinear", |
| inputs=[b_name + "_quantized", b_name + "_scale", b_name + "_zero_point"], |
| outputs=[dql_out_name], |
| ) |
| new_nodes.append(dql_node) |
| dql_inserted.add(b_name) |
| new_node = helper.make_node( |
| "MatMul", |
| inputs=[node.input[0], dql_out_name], |
| outputs=list(node.output), |
| name=node.name, |
| ) |
| new_nodes.append(new_node) |
| else: |
| new_nodes.append(node) |
|
|
| del m.graph.node[:] |
| m.graph.node.extend(new_nodes) |
|
|
| |
| new_initializers = [] |
|
|
| |
| for w_name in matmul_b_names: |
| meta = ext_index.get(w_name) or { |
| "dims": list(numpy_helper.to_array(inline_index[w_name]).shape) |
| } |
| dims = meta["dims"] |
|
|
| q_init = TensorProto() |
| q_init.name = w_name + "_quantized" |
| q_init.data_type = TensorProto.INT8 |
| q_init.dims.extend(dims) |
| off, length = out_positions[w_name + "_quantized"] |
| add_external_data(q_init, off, length, data_file_name) |
|
|
| scale_init = numpy_helper.from_array( |
| np.array([scale_values[w_name]], dtype=np.float32), name=w_name + "_scale" |
| ) |
| zp_init = numpy_helper.from_array( |
| np.array([0], dtype=np.int8), name=w_name + "_zero_point" |
| ) |
|
|
| new_initializers.extend([q_init, scale_init, zp_init]) |
|
|
| |
| for name in non_matmul: |
| meta = ext_index[name] |
| init = TensorProto() |
| init.name = name |
| init.data_type = meta["dtype"] |
| init.dims.extend(meta["dims"]) |
| off, length = out_positions[name] |
| add_external_data(init, off, length, data_file_name) |
| new_initializers.append(init) |
|
|
| |
| for init in m.graph.initializer: |
| if init.name not in ext_index: |
| new_initializers.append(init) |
|
|
| del m.graph.initializer[:] |
| m.graph.initializer.extend(new_initializers) |
| del m.graph.value_info[:] |
|
|
| print(f" Saving proto β {INT8_OUT}") |
| onnx.save(m, str(INT8_OUT)) |
| print(f" Proto size: {INT8_OUT.stat().st_size / 1e6:.1f} MB") |
| total_gb = (INT8_OUT.stat().st_size + INT8_DATA.stat().st_size) / 1e9 |
| print(f" Total INT8 size: {total_gb:.2f} GB") |
|
|
|
|
| def verify(): |
| import onnxruntime as ort |
| from transformers import AutoTokenizer |
|
|
| print(f"\nVerifying {INT8_OUT.name}...") |
| sess_opts = ort.SessionOptions() |
| sess = ort.InferenceSession( |
| str(INT8_OUT), sess_opts, providers=["CPUExecutionProvider"] |
| ) |
| for inp in sess.get_inputs(): |
| print(f" in: {inp.name} {inp.shape}") |
| for out in sess.get_outputs(): |
| print(f" out: {out.name} {out.shape}") |
|
|
| tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
| pairs = [ |
| ("what is a panda?", "A panda is a large black-and-white bear native to China."), |
| ("what is a panda?", "The sky is blue and the grass is green."), |
| ] |
| scores = [] |
| for q, d in pairs: |
| enc = tok(q, d, return_tensors="np", truncation=True, max_length=256) |
| logit = sess.run(["logits"], { |
| "input_ids": enc["input_ids"].astype(np.int64), |
| "attention_mask": enc["attention_mask"].astype(np.int64), |
| })[0] |
| scores.append(float(logit[0][0])) |
|
|
| print(f" logits: {[f'{s:.3f}' for s in scores]}") |
| assert scores[0] > scores[1], \ |
| f"Relevant doc should score higher: {scores[0]:.3f} vs {scores[1]:.3f}" |
| print(" OK β relevant doc ranked higher") |
|
|
|
|
| if __name__ == "__main__": |
| for p in [INT8_OUT, INT8_DATA]: |
| if p.exists(): |
| p.unlink() |
| print(f"Deleted {p.name}") |
|
|
| quantize_model() |
| gc.collect() |
| verify() |
|
|
| print("\nAll done. Upload commands:") |
| print(" huggingface-cli upload cstr/zerank-1-small-ONNX /private/tmp/zerank_export/zerank_onnx . --repo-type model") |
| print(f" huggingface-cli upload cstr/zerank-1-small-ONNX {INT8_OUT.parent}/ . --commit-message 'add INT8' --repo-type model --include '*.onnx*'") |
| print(f" huggingface-cli upload cstr/zerank-1-small-ONNX /Volumes/backups/ai/zerank_onnx_int4/model_int4_full.onnx model_int4_full.onnx --repo-type model") |
|
|