File size: 3,982 Bytes
d9adc0d
 
 
 
 
 
 
faa378a
 
d9adc0d
faa378a
 
 
d9adc0d
 
faa378a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9adc0d
 
 
faa378a
 
 
d9adc0d
 
 
 
 
 
faa378a
 
d9adc0d
faa378a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9adc0d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
{
  "metadata": {
    "total_size": 140737488355328,
    "format": "safetensors",
    "model_name": "DeepXR/Helion-2.5-Rnd",
    "version": "2.5.0-rnd",
    "precision": "bfloat16",
    "parameters": "70B",
    "total_shards": 96,
    "created_at": "2025-01-30T00:00:00Z",
    "sha256_checksums_available": true,
    "shard_size_avg": "1.46GB",
    "note": "SafeTensors shards will be available soon. Model weights distributed across 96 files for optimal loading."
  },
  "weight_map": {
    "model.embed_tokens.weight": "model-00001-of-00096.safetensors",
    "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00096.safetensors",
    "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00096.safetensors",
    "model.layers.0.self_attn.v_proj.weight": "model-00003-of-00096.safetensors",
    "model.layers.0.self_attn.o_proj.weight": "model-00003-of-00096.safetensors",
    "model.layers.0.mlp.gate_proj.weight": "model-00004-of-00096.safetensors",
    "model.layers.0.mlp.up_proj.weight": "model-00004-of-00096.safetensors",
    "model.layers.0.mlp.down_proj.weight": "model-00005-of-00096.safetensors",
    "model.layers.0.input_layernorm.weight": "model-00005-of-00096.safetensors",
    "model.layers.0.post_attention_layernorm.weight": "model-00005-of-00096.safetensors",
    "model.layers.1.self_attn.q_proj.weight": "model-00006-of-00096.safetensors",
    "model.layers.1.self_attn.k_proj.weight": "model-00006-of-00096.safetensors",
    "model.layers.1.self_attn.v_proj.weight": "model-00007-of-00096.safetensors",
    "model.layers.1.self_attn.o_proj.weight": "model-00007-of-00096.safetensors",
    "model.layers.1.mlp.gate_proj.weight": "model-00008-of-00096.safetensors",
    "model.layers.1.mlp.up_proj.weight": "model-00008-of-00096.safetensors",
    "model.layers.1.mlp.down_proj.weight": "model-00009-of-00096.safetensors",
    "model.layers.31.self_attn.q_proj.weight": "model-00092-of-00096.safetensors",
    "model.layers.31.self_attn.k_proj.weight": "model-00093-of-00096.safetensors",
    "model.layers.31.self_attn.v_proj.weight": "model-00093-of-00096.safetensors",
    "model.layers.31.self_attn.o_proj.weight": "model-00094-of-00096.safetensors",
    "model.layers.31.mlp.gate_proj.weight": "model-00094-of-00096.safetensors",
    "model.layers.31.mlp.up_proj.weight": "model-00095-of-00096.safetensors",
    "model.layers.31.mlp.down_proj.weight": "model-00095-of-00096.safetensors",
    "model.layers.31.input_layernorm.weight": "model-00096-of-00096.safetensors",
    "model.layers.31.post_attention_layernorm.weight": "model-00096-of-00096.safetensors",
    "model.norm.weight": "model-00096-of-00096.safetensors",
    "lm_head.weight": "model-00096-of-00096.safetensors"
  },
  "safetensors_info": {
    "description": "SafeTensors format provides secure, fast, and zero-copy tensor serialization",
    "total_shards": 96,
    "parameters": "70B",
    "shard_distribution": "Each transformer layer distributed across ~3 shards for balanced loading",
    "benefits": [
      "No arbitrary code execution during loading",
      "Lazy loading support for memory efficiency",
      "Fast deserialization without pickle",
      "Tensor metadata validation",
      "Cross-platform compatibility",
      "Memory-mapped file support",
      "Parallel loading across multiple GPUs"
    ],
    "verification": "Each file will include SHA256 checksum for integrity verification",
    "status": "In preparation - 96 shards to be released soon"
  },
  "shard_structure": {
    "embedding_layer": {
      "shards": ["model-00001-of-00096.safetensors"],
      "size_estimate": "~1.0GB"
    },
    "transformer_layers": {
      "layers_per_shard": "~0.33",
      "shards_range": "model-00002-of-00096 to model-00095-of-00096",
      "size_estimate": "~1.5GB per shard"
    },
    "output_layer": {
      "shards": ["model-00096-of-00096.safetensors"],
      "includes": ["model.norm.weight", "lm_head.weight"],
      "size_estimate": "~1.0GB"
    }
  }
}