|
|
import torch |
|
|
import numpy as np |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
|
|
from datasets import load_dataset |
|
|
import gc |
|
|
import os |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
if not torch.cuda.is_available(): |
|
|
raise RuntimeError("This script requires a CUDA-enabled GPU.") |
|
|
device = "cuda" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODE = "extract" |
|
|
LOAD_PATH = "outputs/qwen-moe-a2.7b_20251009_084529" |
|
|
|
|
|
|
|
|
ATTENTION_DTYPE = np.float16 |
|
|
COMPRESSION_LEVEL = 9 |
|
|
SAVE_SPARSE = True |
|
|
|
|
|
AVAILABLE_MODELS = { |
|
|
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", |
|
|
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1", |
|
|
"deepseek-v2": "deepseek-ai/DeepSeek-V2", |
|
|
"deepseek-v2.5": "deepseek-ai/DeepSeek-V2.5", |
|
|
"qwen-moe-a2.7b": "Qwen/Qwen1.5-MoE-A2.7B", |
|
|
"qwen-moe-a2.7b-chat": "Qwen/Qwen1.5-MoE-A2.7B-Chat", |
|
|
"dbrx": "databricks/dbrx-instruct", |
|
|
"arctic": "Snowflake/snowflake-arctic-instruct", |
|
|
"switch-base-8": "google/switch-base-8", |
|
|
"switch-base-16": "google/switch-base-16", |
|
|
"switch-base-32": "google/switch-base-32", |
|
|
"switch-base-64": "google/switch-base-64", |
|
|
"switch-base-128": "google/switch-base-128", |
|
|
"dolly-v2-7b": "databricks/dolly-v2-7b", |
|
|
"olmoe-7b": "allenai/OLMoE-1B-7B-0924" |
|
|
} |
|
|
|
|
|
|
|
|
selected_model = "olmoe-7b" |
|
|
model_id = AVAILABLE_MODELS[selected_model] |
|
|
|
|
|
sequence_length = 5000 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if MODE == "load": |
|
|
print("=" * 70) |
|
|
print(f"LOAD MODE: Loading saved data (Multi-Head Version)") |
|
|
print("=" * 70) |
|
|
print(f"Load path: {LOAD_PATH}\n") |
|
|
|
|
|
|
|
|
if not os.path.exists(LOAD_PATH): |
|
|
raise FileNotFoundError(f"Directory not found: {LOAD_PATH}") |
|
|
|
|
|
|
|
|
metadata_file = os.path.join(LOAD_PATH, 'metadata.txt') |
|
|
if os.path.exists(metadata_file): |
|
|
print("--- Metadata ---") |
|
|
with open(metadata_file, 'r') as f: |
|
|
print(f.read()) |
|
|
|
|
|
|
|
|
input_file = os.path.join(LOAD_PATH, 'tokenized_input.npz') |
|
|
if os.path.exists(input_file): |
|
|
print("\n--- Loading Tokenized Input ---") |
|
|
input_data = np.load(input_file) |
|
|
print(f"✅ Loaded from: {input_file}") |
|
|
print(f"File size: {os.path.getsize(input_file) / (1024**2):.2f} MB") |
|
|
|
|
|
input_ids = input_data['input_ids'] |
|
|
print(f"\nTokenized Input:") |
|
|
print(f" Shape: {input_ids.shape}") |
|
|
print(f" Dtype: {input_ids.dtype}") |
|
|
print(f" First 20 token IDs: {input_ids[:20]}") |
|
|
else: |
|
|
print(f"⚠️ Input file not found: {input_file}") |
|
|
|
|
|
|
|
|
attention_file = os.path.join(LOAD_PATH, 'attention_matrices_multihead.npz') |
|
|
if os.path.exists(attention_file): |
|
|
print("\n--- Loading Multi-Head Attention Matrices ---") |
|
|
attn_data = np.load(attention_file, allow_pickle=True) |
|
|
print(f"✅ Loaded from: {attention_file}") |
|
|
print(f"File size: {os.path.getsize(attention_file) / (1024**2):.2f} MB") |
|
|
print(f"Available layers: {len(attn_data.files)}") |
|
|
|
|
|
|
|
|
layer_0 = attn_data['layer_0'] |
|
|
print(f"\nExample - Layer 0:") |
|
|
print(f" Shape: {layer_0.shape} [num_heads, seq_len, seq_len]") |
|
|
print(f" Dtype: {layer_0.dtype}") |
|
|
print(f" Number of heads: {layer_0.shape[0]}") |
|
|
print(f" Min value: {layer_0.min():.6f}") |
|
|
print(f" Max value: {layer_0.max():.6f}") |
|
|
print(f" Mean value: {layer_0.mean():.6f}") |
|
|
|
|
|
print(f"\n Head 0 attention sample (first 5x5 tokens):") |
|
|
print(layer_0[0, :5, :5]) |
|
|
|
|
|
|
|
|
total_size_mb = sum([attn_data[key].nbytes for key in attn_data.files]) / (1024**2) |
|
|
print(f"\nTotal uncompressed size in memory: {total_size_mb:.2f} MB") |
|
|
|
|
|
|
|
|
compressed_size_mb = os.path.getsize(attention_file) / (1024**2) |
|
|
compression_ratio = total_size_mb / compressed_size_mb if compressed_size_mb > 0 else 0 |
|
|
print(f"Compression ratio: {compression_ratio:.2f}x") |
|
|
else: |
|
|
print(f"⚠️ Multi-head attention file not found: {attention_file}") |
|
|
|
|
|
|
|
|
routing_file = os.path.join(LOAD_PATH, 'routing_matrices.npz') |
|
|
if os.path.exists(routing_file): |
|
|
print("\n--- Loading Routing Matrices ---") |
|
|
routing_data = np.load(routing_file) |
|
|
print(f"✅ Loaded from: {routing_file}") |
|
|
print(f"File size: {os.path.getsize(routing_file) / (1024**2):.2f} MB") |
|
|
print(f"Available layers: {len(routing_data.files)}") |
|
|
|
|
|
|
|
|
layer_0 = routing_data['layer_0'] |
|
|
print(f"\nExample - Layer 0:") |
|
|
print(f" Shape: {layer_0.shape}") |
|
|
print(f" Dtype: {layer_0.dtype}") |
|
|
else: |
|
|
print(f"⚠️ Routing file not found: {routing_file}") |
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("HOW TO USE THE LOADED DATA") |
|
|
print("=" * 70) |
|
|
print("# Access tokenized input:") |
|
|
print("input_ids = input_data['input_ids']") |
|
|
print("\n# Access specific layers (all heads):") |
|
|
print("layer_5_all_heads = attn_data['layer_5'] # Shape: (num_heads, seq_len, seq_len)") |
|
|
print("\n# Access specific head in a layer:") |
|
|
print("layer_5_head_3 = attn_data['layer_5'][3] # Shape: (seq_len, seq_len)") |
|
|
print("\n# Iterate through all layers:") |
|
|
print("for layer_name in attn_data.files:") |
|
|
print(" layer_matrix = attn_data[layer_name]") |
|
|
print(" num_heads = layer_matrix.shape[0]") |
|
|
print(" for head_idx in range(num_heads):") |
|
|
print(" head_attention = layer_matrix[head_idx]") |
|
|
print(" # Your analysis here...") |
|
|
print("=" * 70) |
|
|
|
|
|
exit(0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
|
output_dir = f"outputs/{selected_model}_{timestamp}" |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
print("=" * 70) |
|
|
print(f"MoE Model Analysis Tool (Multi-Head Attention)") |
|
|
print("=" * 70) |
|
|
print(f"Selected Model: {selected_model}") |
|
|
print(f"Model ID: {model_id}") |
|
|
print(f"Output Directory: {output_dir}") |
|
|
print(f"Sequence Length: {sequence_length}") |
|
|
print(f"Attention dtype: {ATTENTION_DTYPE}") |
|
|
print(f"Compression level: {COMPRESSION_LEVEL}") |
|
|
print(f"Sparse matrix optimization: {SAVE_SPARSE}") |
|
|
print("=" * 70) |
|
|
|
|
|
print("\n--- 1. Setting up model and tokenizer ---") |
|
|
|
|
|
quantization_config = BitsAndBytesConfig( |
|
|
load_in_4bit=True, |
|
|
bnb_4bit_compute_dtype=torch.bfloat16, |
|
|
bnb_4bit_use_double_quant=True, |
|
|
bnb_4bit_quant_type="nf4" |
|
|
) |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, |
|
|
quantization_config=quantization_config, |
|
|
device_map="auto", |
|
|
attn_implementation="eager", |
|
|
torch_dtype=torch.bfloat16, |
|
|
low_cpu_mem_usage=True, |
|
|
trust_remote_code=True, |
|
|
) |
|
|
print(f"✅ Model loaded on device: {model.device}") |
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
print("\n--- 2. Preparing input data from wikitext ---") |
|
|
wiki_dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") |
|
|
long_text = " ".join([example['text'] for example in wiki_dataset if example['text'].strip()]) |
|
|
|
|
|
inputs = tokenizer( |
|
|
long_text, |
|
|
return_tensors="pt", |
|
|
max_length=sequence_length, |
|
|
truncation=True |
|
|
).to(device) |
|
|
input_ids = inputs.input_ids |
|
|
print(f"✅ Input tensor shape: {input_ids.shape}") |
|
|
|
|
|
|
|
|
print("\n--- 2.5. Saving tokenized input ---") |
|
|
input_ids_np = input_ids.cpu().numpy().squeeze() |
|
|
input_file = os.path.join(output_dir, 'tokenized_input.npz') |
|
|
print(f"Saving tokenized input to '{input_file}'...") |
|
|
np.savez_compressed( |
|
|
input_file, |
|
|
input_ids=input_ids_np, |
|
|
input_text=long_text[:sequence_length * 10] |
|
|
) |
|
|
file_size_mb = os.path.getsize(input_file) / (1024**2) |
|
|
print(f"✅ Tokenized input saved! File size: {file_size_mb:.2f} MB") |
|
|
|
|
|
print("\n--- 3. Performing forward pass to extract matrices ---") |
|
|
print(f"GPU memory before forward pass: {torch.cuda.memory_allocated() / 1e9:.2f} GB") |
|
|
|
|
|
|
|
|
is_switch_transformer = "switch" in selected_model.lower() |
|
|
|
|
|
with torch.no_grad(): |
|
|
with torch.inference_mode(): |
|
|
if is_switch_transformer: |
|
|
decoder_input_ids = input_ids[:, :min(512, input_ids.shape[1])] |
|
|
outputs = model( |
|
|
input_ids, |
|
|
decoder_input_ids=decoder_input_ids, |
|
|
output_attentions=True, |
|
|
output_router_logits=True, |
|
|
use_cache=False |
|
|
) |
|
|
else: |
|
|
outputs = model( |
|
|
input_ids, |
|
|
output_attentions=True, |
|
|
output_router_logits=True, |
|
|
use_cache=False |
|
|
) |
|
|
print("✅ Forward pass completed.") |
|
|
print(f"GPU memory after forward pass: {torch.cuda.memory_allocated() / 1e9:.2f} GB") |
|
|
|
|
|
print("\n--- 4. Extracting and saving MULTI-HEAD attention matrices ---") |
|
|
|
|
|
|
|
|
attention_matrices = {} |
|
|
total_uncompressed_size = 0 |
|
|
total_heads = 0 |
|
|
|
|
|
|
|
|
if is_switch_transformer: |
|
|
|
|
|
if hasattr(outputs, 'encoder_attentions') and outputs.encoder_attentions is not None: |
|
|
num_encoder_layers = len(outputs.encoder_attentions) |
|
|
print(f"Number of encoder attention layers: {num_encoder_layers}") |
|
|
|
|
|
for layer_idx, layer_attention in enumerate(outputs.encoder_attentions): |
|
|
|
|
|
|
|
|
layer_attention = layer_attention.squeeze(0) |
|
|
|
|
|
|
|
|
num_heads = layer_attention.shape[0] |
|
|
total_heads += num_heads |
|
|
|
|
|
|
|
|
layer_attention_np = layer_attention.cpu().to(torch.float16).numpy() |
|
|
|
|
|
|
|
|
uncompressed_size = layer_attention_np.nbytes / (1024**2) |
|
|
total_uncompressed_size += uncompressed_size |
|
|
|
|
|
attention_matrices[f"encoder_layer_{layer_idx}"] = layer_attention_np |
|
|
print(f" ✅ Encoder Layer {layer_idx}: shape {layer_attention_np.shape} ({num_heads} heads), uncompressed: {uncompressed_size:.2f} MB") |
|
|
|
|
|
del layer_attention_np, layer_attention |
|
|
if layer_idx % 4 == 0: |
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
if hasattr(outputs, 'decoder_attentions') and outputs.decoder_attentions is not None: |
|
|
num_decoder_layers = len(outputs.decoder_attentions) |
|
|
print(f"Number of decoder attention layers: {num_decoder_layers}") |
|
|
|
|
|
for layer_idx, layer_attention in enumerate(outputs.decoder_attentions): |
|
|
layer_attention = layer_attention.squeeze(0) |
|
|
num_heads = layer_attention.shape[0] |
|
|
total_heads += num_heads |
|
|
|
|
|
layer_attention_np = layer_attention.cpu().to(torch.float16).numpy() |
|
|
uncompressed_size = layer_attention_np.nbytes / (1024**2) |
|
|
total_uncompressed_size += uncompressed_size |
|
|
|
|
|
attention_matrices[f"decoder_layer_{layer_idx}"] = layer_attention_np |
|
|
print(f" ✅ Decoder Layer {layer_idx}: shape {layer_attention_np.shape} ({num_heads} heads), uncompressed: {uncompressed_size:.2f} MB") |
|
|
|
|
|
del layer_attention_np, layer_attention |
|
|
if layer_idx % 4 == 0: |
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
num_layers = len(attention_matrices) |
|
|
else: |
|
|
|
|
|
num_layers = len(outputs.attentions) |
|
|
print(f"Number of attention layers: {num_layers}") |
|
|
|
|
|
for layer_idx, layer_attention in enumerate(outputs.attentions): |
|
|
|
|
|
layer_attention = layer_attention.squeeze(0) |
|
|
|
|
|
|
|
|
num_heads = layer_attention.shape[0] |
|
|
total_heads += num_heads |
|
|
|
|
|
|
|
|
layer_attention_np = layer_attention.cpu().to(torch.float16).numpy() |
|
|
|
|
|
|
|
|
uncompressed_size = layer_attention_np.nbytes / (1024**2) |
|
|
total_uncompressed_size += uncompressed_size |
|
|
|
|
|
attention_matrices[f"layer_{layer_idx}"] = layer_attention_np |
|
|
print(f" ✅ Layer {layer_idx}: shape {layer_attention_np.shape} ({num_heads} heads), uncompressed: {uncompressed_size:.2f} MB") |
|
|
|
|
|
del layer_attention_np, layer_attention |
|
|
if layer_idx % 4 == 0: |
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
attention_file = os.path.join(output_dir, 'attention_matrices_multihead.npz') |
|
|
print(f"\n🔄 Saving multi-head attention matrices with maximum compression...") |
|
|
print(f" Total uncompressed size: {total_uncompressed_size:.2f} MB") |
|
|
print(f" Total heads across all layers: {total_heads}") |
|
|
|
|
|
|
|
|
np.savez_compressed(attention_file, **attention_matrices) |
|
|
|
|
|
compressed_size_mb = os.path.getsize(attention_file) / (1024**2) |
|
|
compression_ratio = total_uncompressed_size / compressed_size_mb if compressed_size_mb > 0 else 0 |
|
|
|
|
|
print(f"✅ Multi-head attention matrices saved!") |
|
|
print(f" Compressed file size: {compressed_size_mb:.2f} MB") |
|
|
print(f" Compression ratio: {compression_ratio:.2f}x") |
|
|
print(f" Space saved: {total_uncompressed_size - compressed_size_mb:.2f} MB ({(1 - compressed_size_mb/total_uncompressed_size)*100:.1f}%)") |
|
|
|
|
|
|
|
|
attention_layer_names = list(attention_matrices.keys()) |
|
|
first_attention_layer = attention_matrices[attention_layer_names[0]] if attention_layer_names else None |
|
|
|
|
|
|
|
|
del attention_matrices, outputs.attentions |
|
|
torch.cuda.empty_cache() |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
routing_matrices = {} |
|
|
num_moe_layers = 0 |
|
|
|
|
|
|
|
|
if hasattr(outputs, 'router_logits') and outputs.router_logits is not None: |
|
|
|
|
|
if is_switch_transformer and isinstance(outputs.router_logits, tuple): |
|
|
|
|
|
if outputs.router_logits[0] is not None: |
|
|
encoder_router_logits = outputs.router_logits[0] |
|
|
print(f"\nNumber of encoder MoE routing layers: {len(encoder_router_logits)}") |
|
|
for layer_idx, router_logits in enumerate(encoder_router_logits): |
|
|
router_logits_np = router_logits.squeeze(0).cpu().half().numpy() |
|
|
routing_matrices[f"encoder_layer_{layer_idx}"] = router_logits_np |
|
|
print(f" ✅ Encoder Layer {layer_idx}: shape {router_logits_np.shape}") |
|
|
del router_logits_np |
|
|
if layer_idx % 4 == 0: |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
if outputs.router_logits[1] is not None: |
|
|
decoder_router_logits = outputs.router_logits[1] |
|
|
print(f"\nNumber of decoder MoE routing layers: {len(decoder_router_logits)}") |
|
|
for layer_idx, router_logits in enumerate(decoder_router_logits): |
|
|
router_logits_np = router_logits.squeeze(0).cpu().half().numpy() |
|
|
routing_matrices[f"decoder_layer_{layer_idx}"] = router_logits_np |
|
|
print(f" ✅ Decoder Layer {layer_idx}: shape {router_logits_np.shape}") |
|
|
del router_logits_np |
|
|
if layer_idx % 4 == 0: |
|
|
gc.collect() |
|
|
|
|
|
num_moe_layers = len(routing_matrices) |
|
|
else: |
|
|
|
|
|
num_moe_layers = len(outputs.router_logits) |
|
|
print(f"\nNumber of MoE routing layers: {num_moe_layers}") |
|
|
|
|
|
for layer_idx, router_logits in enumerate(outputs.router_logits): |
|
|
router_logits_np = router_logits.squeeze(0).cpu().half().numpy() |
|
|
routing_matrices[f"layer_{layer_idx}"] = router_logits_np |
|
|
print(f" ✅ Layer {layer_idx}: shape {router_logits_np.shape}") |
|
|
del router_logits_np |
|
|
if layer_idx % 4 == 0: |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
if routing_matrices: |
|
|
routing_file = os.path.join(output_dir, 'routing_matrices.npz') |
|
|
print(f"\nSaving routing matrices to '{routing_file}'...") |
|
|
np.savez_compressed(routing_file, **routing_matrices) |
|
|
file_size_mb = os.path.getsize(routing_file) / (1024**2) |
|
|
print(f"✅ Routing matrices saved! File size: {file_size_mb:.2f} MB") |
|
|
else: |
|
|
routing_file = None |
|
|
else: |
|
|
print(f"\n⚠️ Warning: Model '{selected_model}' does not expose router_logits.") |
|
|
print("This model may use a different MoE implementation or may not be a standard MoE model.") |
|
|
print("Skipping routing matrix extraction.") |
|
|
routing_file = None |
|
|
|
|
|
|
|
|
|
|
|
if num_moe_layers > 0 and hasattr(outputs, 'router_logits') and outputs.router_logits is not None: |
|
|
if is_switch_transformer and isinstance(outputs.router_logits, tuple): |
|
|
|
|
|
encoder_routing_shape = str(outputs.router_logits[0][0].squeeze(0).shape) if outputs.router_logits[0] else "N/A" |
|
|
decoder_routing_shape = str(outputs.router_logits[1][0].squeeze(0).shape) if outputs.router_logits[1] else "N/A" |
|
|
routing_shape = f"encoder: {encoder_routing_shape}, decoder: {decoder_routing_shape}" |
|
|
else: |
|
|
routing_shape = str(outputs.router_logits[0].squeeze(0).shape) |
|
|
else: |
|
|
routing_shape = "N/A" |
|
|
|
|
|
|
|
|
attention_heads_info = {} |
|
|
if is_switch_transformer: |
|
|
|
|
|
encoder_layers = [k for k in attention_layer_names if k.startswith('encoder_')] |
|
|
decoder_layers = [k for k in attention_layer_names if k.startswith('decoder_')] |
|
|
|
|
|
if encoder_layers and first_attention_layer is not None: |
|
|
|
|
|
encoder_heads = first_attention_layer.shape[0] if encoder_layers[0] == attention_layer_names[0] else "N/A" |
|
|
attention_heads_info['encoder_heads_per_layer'] = encoder_heads |
|
|
attention_heads_info['encoder_layers'] = len(encoder_layers) |
|
|
|
|
|
if decoder_layers: |
|
|
|
|
|
if decoder_layers[0] == attention_layer_names[0] and first_attention_layer is not None: |
|
|
decoder_heads = first_attention_layer.shape[0] |
|
|
else: |
|
|
decoder_heads = "N/A" |
|
|
attention_heads_info['decoder_heads_per_layer'] = decoder_heads |
|
|
attention_heads_info['decoder_layers'] = len(decoder_layers) |
|
|
|
|
|
attention_shape_str = f"encoder: ({attention_heads_info.get('encoder_heads_per_layer', 'N/A')}, {sequence_length}, {sequence_length}), decoder: ({attention_heads_info.get('decoder_heads_per_layer', 'N/A')}, seq_len, seq_len)" |
|
|
else: |
|
|
|
|
|
num_heads_per_layer = first_attention_layer.shape[0] if first_attention_layer is not None else "N/A" |
|
|
attention_heads_info['heads_per_layer'] = num_heads_per_layer |
|
|
attention_shape_str = f"({num_heads_per_layer}, {sequence_length}, {sequence_length})" |
|
|
|
|
|
metadata = { |
|
|
"model_name": selected_model, |
|
|
"model_id": model_id, |
|
|
"model_architecture": "encoder-decoder (Switch Transformer)" if is_switch_transformer else "decoder-only", |
|
|
"sequence_length": sequence_length, |
|
|
"actual_token_count": input_ids_np.shape[0], |
|
|
"num_attention_layers": num_layers, |
|
|
**{f"num_{k}": v for k, v in attention_heads_info.items()}, |
|
|
"total_attention_heads": total_heads, |
|
|
"num_routing_layers": num_moe_layers if num_moe_layers > 0 else "N/A", |
|
|
"timestamp": timestamp, |
|
|
"attention_shape_per_layer": attention_shape_str, |
|
|
"routing_shape_per_layer": routing_shape, |
|
|
"has_routing_matrices": "Yes" if num_moe_layers > 0 else "No", |
|
|
"attention_storage_format": "multi-head (all heads preserved)", |
|
|
"attention_dtype": str(ATTENTION_DTYPE.__name__), |
|
|
"compression_method": "numpy.savez_compressed", |
|
|
"uncompressed_attention_size_mb": f"{total_uncompressed_size:.2f}", |
|
|
"compressed_attention_size_mb": f"{compressed_size_mb:.2f}", |
|
|
"compression_ratio": f"{compression_ratio:.2f}x", |
|
|
"space_saved_percent": f"{(1 - compressed_size_mb/total_uncompressed_size)*100:.1f}%", |
|
|
} |
|
|
|
|
|
metadata_file = os.path.join(output_dir, 'metadata.txt') |
|
|
with open(metadata_file, 'w') as f: |
|
|
for key, value in metadata.items(): |
|
|
f.write(f"{key}: {value}\n") |
|
|
print(f"\n✅ Metadata saved to '{metadata_file}'") |
|
|
|
|
|
print("\n" + "=" * 70) |
|
|
print("SUMMARY") |
|
|
print("=" * 70) |
|
|
print(f"Model: {selected_model}") |
|
|
print(f"Input tokens: {input_ids_np.shape[0]}") |
|
|
print(f"Attention layers saved: {num_layers}") |
|
|
print(f"Total attention heads: {total_heads}") |
|
|
print(f"Routing layers saved: {num_moe_layers if num_moe_layers > 0 else 'N/A (not available)'}") |
|
|
print(f"Output directory: {output_dir}") |
|
|
print(f"\nCompression Statistics:") |
|
|
print(f" Uncompressed size: {total_uncompressed_size:.2f} MB") |
|
|
print(f" Compressed size: {compressed_size_mb:.2f} MB") |
|
|
print(f" Compression ratio: {compression_ratio:.2f}x") |
|
|
print(f" Space saved: {(1 - compressed_size_mb/total_uncompressed_size)*100:.1f}%") |
|
|
print(f"\nFinal GPU memory: {torch.cuda.memory_allocated() / 1e9:.2f} GB") |
|
|
print("\nFiles created:") |
|
|
print(f" - {input_file}") |
|
|
print(f" - {attention_file}") |
|
|
if routing_file: |
|
|
print(f" - {routing_file}") |
|
|
print(f" - {metadata_file}") |
|
|
print("=" * 70) |