Create parameters.py
Browse files- parameters.py +143 -0
parameters.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import safetensors.torch
|
| 3 |
+
import concurrent.futures
|
| 4 |
+
import zlib
|
| 5 |
+
import logging
|
| 6 |
+
from typing import Dict, Tuple
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Configure logging
|
| 10 |
+
logging.basicConfig(
|
| 11 |
+
level=logging.INFO,
|
| 12 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
| 13 |
+
handlers=[logging.StreamHandler()]
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
class AdvancedModelParameters:
|
| 17 |
+
def __init__(self, num_shards=2089, base_filename="charm15", hidden_size=16384, layers_per_shard=100):
|
| 18 |
+
"""Initialize model parameters for a massive transformer model."""
|
| 19 |
+
self.num_shards = num_shards
|
| 20 |
+
self.base_filename = base_filename
|
| 21 |
+
self.hidden_size = hidden_size
|
| 22 |
+
self.layers_per_shard = layers_per_shard
|
| 23 |
+
self.ffn_multiplier = 4
|
| 24 |
+
self.shape = (hidden_size, hidden_size)
|
| 25 |
+
self.dtype = torch.float16
|
| 26 |
+
self.base_path = Path("model_shards")
|
| 27 |
+
self.base_path.mkdir(parents=True, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
def generate_layer_parameters(self, layer_idx: int) -> Dict[str, torch.Tensor]:
|
| 30 |
+
"""Generate parameters for a single transformer layer."""
|
| 31 |
+
params = {}
|
| 32 |
+
prefix = f"layer_{layer_idx}"
|
| 33 |
+
|
| 34 |
+
# Attention weights (Q, K, V, O)
|
| 35 |
+
for name in ["query_weight", "key_weight", "value_weight", "output_weight"]:
|
| 36 |
+
params[f"{prefix}.attention.{name}"] = torch.randn(
|
| 37 |
+
self.shape, dtype=self.dtype
|
| 38 |
+
) * (1.0 / self.hidden_size ** 0.5)
|
| 39 |
+
|
| 40 |
+
# FFN weights
|
| 41 |
+
intermediate_size = self.hidden_size * self.ffn_multiplier
|
| 42 |
+
params[f"{prefix}.ffn.intermediate_weight"] = torch.randn(
|
| 43 |
+
self.hidden_size, intermediate_size, dtype=self.dtype
|
| 44 |
+
) * (1.0 / self.hidden_size ** 0.5)
|
| 45 |
+
params[f"{prefix}.ffn.output_weight"] = torch.randn(
|
| 46 |
+
intermediate_size, self.hidden_size, dtype=self.dtype
|
| 47 |
+
) * (1.0 / intermediate_size ** 0.5)
|
| 48 |
+
|
| 49 |
+
return params
|
| 50 |
+
|
| 51 |
+
def generate_shard_parameters(self, shard_index: int) -> Dict[str, torch.Tensor]:
|
| 52 |
+
"""Generate parameters for a single shard."""
|
| 53 |
+
params = {}
|
| 54 |
+
start_layer = (shard_index - 1) * self.layers_per_shard
|
| 55 |
+
end_layer = start_layer + self.layers_per_shard
|
| 56 |
+
|
| 57 |
+
# Generate layers for this shard
|
| 58 |
+
for layer_idx in range(start_layer, end_layer):
|
| 59 |
+
params.update(self.generate_layer_parameters(layer_idx))
|
| 60 |
+
|
| 61 |
+
# Add embeddings and output layer to the first shard
|
| 62 |
+
if shard_index == 1:
|
| 63 |
+
params["embedding.word_embeddings"] = torch.randn(
|
| 64 |
+
50000, self.hidden_size, dtype=self.dtype
|
| 65 |
+
) * (1.0 / self.hidden_size ** 0.5)
|
| 66 |
+
params["embedding.position_embeddings"] = torch.randn(
|
| 67 |
+
4096, self.hidden_size, dtype=self.dtype
|
| 68 |
+
) * (1.0 / self.hidden_size ** 0.5)
|
| 69 |
+
params["output_layer"] = torch.randn(
|
| 70 |
+
self.hidden_size, 50000, dtype=self.dtype
|
| 71 |
+
) * (1.0 / self.hidden_size ** 0.5)
|
| 72 |
+
|
| 73 |
+
return params
|
| 74 |
+
|
| 75 |
+
def compress_tensor(self, tensor: torch.Tensor) -> bytes:
|
| 76 |
+
"""Apply zlib compression to tensor data."""
|
| 77 |
+
tensor_bytes = tensor.numpy().tobytes()
|
| 78 |
+
return zlib.compress(tensor_bytes, level=9)
|
| 79 |
+
|
| 80 |
+
def save_single_shard(self, shard_index: int) -> None:
|
| 81 |
+
"""Save a single model shard with compression."""
|
| 82 |
+
params = self.generate_shard_parameters(shard_index)
|
| 83 |
+
filename = self.base_path / f"{self.base_filename}_{shard_index}_of_{self.num_shards}.safetensors"
|
| 84 |
+
|
| 85 |
+
# Compress tensors
|
| 86 |
+
compressed_data = {key: self.compress_tensor(value) for key, value in params.items()}
|
| 87 |
+
|
| 88 |
+
# Save with metadata
|
| 89 |
+
metadata = {
|
| 90 |
+
"shard_index": shard_index,
|
| 91 |
+
"total_shards": self.num_shards,
|
| 92 |
+
"layers": self.layers_per_shard,
|
| 93 |
+
"hidden_size": self.hidden_size
|
| 94 |
+
}
|
| 95 |
+
safetensors.torch.save_file(compressed_data, str(filename), metadata=metadata)
|
| 96 |
+
logging.info(f"[✔] Shard {shard_index}/{self.num_shards} saved: {filename}")
|
| 97 |
+
|
| 98 |
+
def save_sharded_parameters(self) -> None:
|
| 99 |
+
"""Save all shards in parallel."""
|
| 100 |
+
logging.info(f"Starting to save {self.num_shards} shards...")
|
| 101 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 102 |
+
executor.map(self.save_single_shard, range(1, self.num_shards + 1))
|
| 103 |
+
logging.info("All shards saved successfully.")
|
| 104 |
+
|
| 105 |
+
def estimate_parameters(self) -> Tuple[int, float]:
|
| 106 |
+
"""Estimate total parameters and memory usage."""
|
| 107 |
+
params_per_layer = (
|
| 108 |
+
4 * (self.hidden_size * self.hidden_size) + # Attention weights
|
| 109 |
+
self.hidden_size * (self.hidden_size * self.ffn_multiplier) + # FFN intermediate
|
| 110 |
+
(self.hidden_size * self.ffn_multiplier) * self.hidden_size # FFN output
|
| 111 |
+
)
|
| 112 |
+
params_per_shard = params_per_layer * self.layers_per_shard
|
| 113 |
+
total_params = params_per_shard * self.num_shards
|
| 114 |
+
|
| 115 |
+
# Add embedding and output layer from first shard
|
| 116 |
+
total_params += (
|
| 117 |
+
50000 * self.hidden_size + # word_embeddings
|
| 118 |
+
4096 * self.hidden_size + # position_embeddings
|
| 119 |
+
self.hidden_size * 50000 # output_layer
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
memory_gb = (total_params * 2) / 1024**3 # 2 bytes per float16
|
| 123 |
+
return total_params, memory_gb
|
| 124 |
+
|
| 125 |
+
def main():
|
| 126 |
+
"""Main execution flow."""
|
| 127 |
+
model_storage = AdvancedModelParameters(
|
| 128 |
+
num_shards=2089,
|
| 129 |
+
base_filename="charm15",
|
| 130 |
+
hidden_size=16384,
|
| 131 |
+
layers_per_shard=100
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
# Estimate parameters
|
| 135 |
+
total_params, memory_gb = model_storage.estimate_parameters()
|
| 136 |
+
logging.info(f"Estimated total parameters: {total_params:,}")
|
| 137 |
+
logging.info(f"Estimated memory usage: {memory_gb:.2f} GB")
|
| 138 |
+
|
| 139 |
+
# Save shards
|
| 140 |
+
model_storage.save_sharded_parameters()
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
main()
|