llm-attention / usage.py
sg-nta's picture
Upload 4 files
8709e9b verified
import torch
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from datasets import load_dataset
import gc
import os
# MODEL SELECTION
selected_model = "qwen-moe-a2.7b" # Change this
LOAD_PATH = "outputs/qwen-moe-a2.7b_20251009_084529" # Change this
input_file = os.path.join(LOAD_PATH, 'tokenized_input.npz')
metadata_file = os.path.join(LOAD_PATH, 'metadata.txt')
attention_file = os.path.join(LOAD_PATH, 'attention_matrices_multihead.npz')
routing_file = os.path.join(LOAD_PATH, 'routing_matrices.npz')
# METADATA
print("--- Metadata ---")
with open(metadata_file, 'r') as f:
print(f.read())
# INPUT DATA
print("\n--- Input data ---")
input_data = np.load(input_file)
print(f"File size: {os.path.getsize(input_file) / (1024**2):.2f} MB")
input_ids = input_data['input_ids']
print(f"\nTokenized Input:")
print(f"Shape: {input_ids.shape}")
print(f"First 20 token IDs: {input_ids[:20]}")
input_text = str(input_data['input_text'])
print(f"\nOriginal text length: {len(input_text)} characters")
print(f"First 200 characters: {input_text[:200]}...")
# ATTENTION MATRICES
print("\n--- Attention matrices ---")
attn_data = np.load(attention_file, allow_pickle=True)
print(f"File size: {os.path.getsize(attention_file) / (1024**2):.2f} MB")
print(f"Available layers: {len(attn_data.files)}")
print(f"Layer names: {attn_data.files}")
# Print info for first layer
layer_0 = attn_data['layer_0']
print(f"\nLayer 0 attention matrix:")
print(f"Shape: {layer_0.shape} [num_heads, seq_len, seq_len]")
print(f"Number of heads: {layer_0.shape[0]}")
print(f"Min value: {layer_0.min():.6f}")
print(f"Max value: {layer_0.max():.6f}")
print(f"Mean value: {layer_0.mean():.6f}")
print(f"\n Head 0 attention sample (first 5x5 tokens):")
print(layer_0[0, :5, :5])
# ROUTING MATRICES
print("\n--- Routing matrices ---")
routing_data = np.load(routing_file)
print(f"File size: {os.path.getsize(routing_file) / (1024**2):.2f} MB")
print(f"Available layers: {len(routing_data.files)}")
print(f"Layer names: {routing_data.files}")
layer_0 = routing_data['layer_0']
print(f"Number of experts: {len(layer_0[0])}")
print(f"\nRouting logits sample (first 5 tokens, all experts):")
print(layer_0[:5, :])