pytorch / quantization_info.json
Thomaschtl's picture
Upload quantization_info.json with huggingface_hub
306e94d verified
{
"quantization_method": "hybrid_int8_fp16",
"linear_layers": "0/0 (INT8)",
"embedding_layers": "1/1 (FP16)",
"total_quantized": "1/1",
"original_model": "luca-deandrea/MNLP_M3_mcqa_model",
"quantization_timestamp": "2025-06-10 22:10:59",
"pytorch_version": "2.6.0+cu118",
"estimated_compression_ratio": "1.3x",
"estimated_size_mb": 1704.5302734375,
"original_size_mb": 2272.70703125,
"formats_included": [
"pytorch_bin_only"
],
"lighteval_compatible": true,
"notes": "Linear layers: INT8 quantization, Embedding layers: FP16 conversion"
}