BICORP commited on
Commit
5854bd8
·
verified ·
1 Parent(s): af2099c

Upload directory

Browse files
Files changed (1) hide show
  1. main.py +128 -0
main.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from safetensors.torch import save_file, load_file
3
+ import sys
4
+ import json
5
+
6
+ def create_gemma_safetensors(output_file="model.safetensors", dtype=torch.bfloat16, num_heads=64, num_layers=48, image_feature_dim=2048):
7
+ """
8
+ Creates a safetensors file with initialized Gemma model weights,
9
+ including vision components for a vision-text-to-text model.
10
+
11
+ Args:
12
+ output_file (str): The name of the output safetensors file.
13
+ dtype (torch.dtype): The data type of the model weights.
14
+ num_heads (int): The number of attention heads.
15
+ num_layers (int): The number of transformer layers.
16
+ image_feature_dim (int): The dimension of the image features.
17
+ """
18
+
19
+ tensors = {}
20
+ embedding_dim = 5376 // 4 # reducing embedding_dim to reduce model size
21
+ intermediate_size = 21504 // 4 # reducing intermediate_size to reduce model size
22
+ head_dim = embedding_dim // num_heads
23
+
24
+ # Text embedding
25
+ tensors["language_model.model.embed_tokens.weight"] = torch.randn(262208 // 4, embedding_dim, dtype=dtype) * 0.02
26
+
27
+ # Text layers
28
+ for layer_idx in range(num_layers):
29
+ layer_prefix = f"language_model.model.layers.{layer_idx}"
30
+ tensors[f"{layer_prefix}.input_layernorm.weight"] = torch.randn(embedding_dim, dtype=dtype)
31
+ tensors[f"{layer_prefix}.mlp.down_proj.weight"] = torch.randn(embedding_dim, intermediate_size, dtype=dtype) * (intermediate_size ** -0.5)
32
+ tensors[f"{layer_prefix}.mlp.gate_proj.weight"] = torch.randn(intermediate_size, embedding_dim, dtype=dtype) * (embedding_dim ** -0.5)
33
+ tensors[f"{layer_prefix}.mlp.up_proj.weight"] = torch.randn(intermediate_size, embedding_dim, dtype=dtype) * (embedding_dim ** -0.5)
34
+ tensors[f"{layer_prefix}.post_attention_layernorm.weight"] = torch.randn(embedding_dim, dtype=dtype)
35
+ tensors[f"{layer_prefix}.post_feedforward_layernorm.weight"] = torch.randn(embedding_dim, dtype=dtype)
36
+ tensors[f"{layer_prefix}.pre_feedforward_layernorm.weight"] = torch.randn(embedding_dim, dtype=dtype)
37
+ tensors[f"{layer_prefix}.self_attn.k_norm.weight"] = torch.randn(head_dim, dtype=dtype)
38
+ tensors[f"{layer_prefix}.self_attn.k_proj.weight"] = torch.randn(head_dim * num_heads // 4, embedding_dim, dtype=dtype) * ((embedding_dim * head_dim // 4) ** -0.5)
39
+ tensors[f"{layer_prefix}.self_attn.o_proj.weight"] = torch.randn(embedding_dim, head_dim * num_heads // 4 * 2, dtype=dtype) * ((embedding_dim * head_dim // 4 * 2) ** -0.5)
40
+ tensors[f"{layer_prefix}.self_attn.q_norm.weight"] = torch.randn(head_dim, dtype=dtype)
41
+ tensors[f"{layer_prefix}.self_attn.q_proj.weight"] = torch.randn(head_dim * num_heads // 4 * 2, embedding_dim, dtype=dtype) * ((embedding_dim * head_dim // 4 * 2) ** -0.5)
42
+ tensors[f"{layer_prefix}.self_attn.v_proj.weight"] = torch.randn(head_dim * num_heads // 4, embedding_dim, dtype=dtype) * ((embedding_dim * head_dim // 4) ** -0.5)
43
+
44
+ # Vision encoder (simplified example)
45
+ tensors["vision_encoder.projection.weight"] = torch.randn(image_feature_dim, embedding_dim, dtype=dtype) * (image_feature_dim ** -0.5)
46
+ tensors["vision_encoder.projection.bias"] = torch.zeros(embedding_dim, dtype=dtype)
47
+
48
+ total_params = 0
49
+ for tensor in tensors.values():
50
+ total_params += tensor.numel() * tensor.element_size()
51
+
52
+ total_params_gb = total_params / (1024 ** 3)
53
+
54
+ print(f"Estimated total parameter size: {total_params_gb:.2f} GB ({dtype}).")
55
+ proceed = input("Do you want to proceed and create the safetensors file? (y/n): ").lower()
56
+
57
+ if proceed == 'y':
58
+ save_file(tensors, output_file)
59
+ print(f"Gemma safetensors file created: {output_file}")
60
+ else:
61
+ print("Safetensors file creation cancelled.")
62
+ sys.exit()
63
+
64
+ def analyze_model_file(model_file):
65
+ """
66
+ Analyzes a safetensors model file, extracts relevant information,
67
+ prints it to the console, and saves it to a config.json file.
68
+
69
+ Args:
70
+ model_file (str): The path to the safetensors model file.
71
+ """
72
+ try:
73
+ tensors = load_file(model_file, device="cpu") # Load safetensors
74
+
75
+ config = {}
76
+
77
+ # Extract basic information (example, adjust based on your model)
78
+ config["architectures"] = ["Gemma3ForConditionalGeneration"] # Changed architecture
79
+ config["model_type"] = "gemma3" # changed model_type
80
+ config["torch_dtype"] = str(tensors["language_model.model.embed_tokens.weight"].dtype)
81
+
82
+ # Extract dimensions (adjust based on your model's tensor names)
83
+ config["hidden_size"] = tensors["language_model.model.embed_tokens.weight"].shape[1]
84
+ config["num_hidden_layers"] = len(
85
+ [key for key in tensors if "language_model.model.layers." in key]
86
+ ) // 12
87
+ num_heads = tensors["language_model.model.layers.0.self_attn.q_proj.weight"].shape[0] // (config["hidden_size"] // 4)
88
+ config["num_attention_heads"] = num_heads
89
+ config["intermediate_size"] = tensors["language_model.model.layers.0.mlp.gate_proj.weight"].shape[0]
90
+ config["head_dim"] = config["hidden_size"] // config["num_attention_heads"]
91
+
92
+ # Add other relevant information as needed
93
+ # Example:
94
+ config["vocab_size"] = tensors["language_model.model.embed_tokens.weight"].shape[0]
95
+ config["image_feature_dim"] = tensors["vision_encoder.projection.weight"].shape[0]
96
+
97
+ # Print to console
98
+ print(json.dumps(config, indent=2))
99
+
100
+ # Save to config.json
101
+ with open("config.json", "w") as f:
102
+ json.dump(config, f, indent=2)
103
+
104
+ print("Model analysis completed and saved to config.json")
105
+
106
+ except FileNotFoundError:
107
+ print(f"Error: Model file '{model_file}' not found.")
108
+ except Exception as e:
109
+ print(f"An error occurred during model analysis: {e}")
110
+
111
+ if __name__ == "__main__":
112
+ dtype_input = input("Enter the desired data type (bfloat16, float16, float32, etc.): ").lower()
113
+ if dtype_input == "bfloat16":
114
+ dtype = torch.bfloat16
115
+ elif dtype_input == "float16":
116
+ dtype = torch.float16
117
+ elif dtype_input == "float32":
118
+ dtype = torch.float32
119
+ elif dtype_input == "float8":
120
+ dtype = torch.float8_e4m3fn
121
+ else:
122
+ print("Invalid data type. Using bfloat16 as default.")
123
+ dtype = torch.bfloat16
124
+
125
+ create_gemma_safetensors(dtype=dtype)
126
+
127
+ # Analyze the created model file
128
+ analyze_model_file("model.safetensors")