| import glob |
| import re |
| import shutil |
| import sys |
|
|
| import accelerate |
| import torch |
| from configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig |
| from modeling_glm4_moe_lite_scm import Glm4MoeLiteSCMForCausalLM |
| from transformers.models.glm4_moe_lite.configuration_glm4_moe_lite import Glm4MoeLiteConfig |
| from safetensors import safe_open |
|
|
| input_model = sys.argv[1] |
| output_model_path = sys.argv[2] |
|
|
| auto_map = { |
| "AutoConfig": "configuration_glm4_moe_lite_scm.Glm4MoeLiteSCMConfig", |
| "AutoModel": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMModel", |
| "AutoModelForCausalLM": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMForCausalLM" |
| }, |
|
|
| cfg_standard_moe = Glm4MoeLiteConfig.from_pretrained(input_model) |
| cfg_shared_moe = Glm4MoeLiteSCMConfig( |
| auto_map=auto_map, |
| n_group=cfg_standard_moe.n_group, |
| topk_group=cfg_standard_moe.topk_group, |
| n_shared_experts=cfg_standard_moe.n_shared_experts, |
| n_routed_experts=cfg_standard_moe.n_routed_experts, |
| num_experts_per_tok=cfg_standard_moe.num_experts_per_tok, |
| first_k_dense_replace=cfg_standard_moe.first_k_dense_replace, |
| vocab_size=cfg_standard_moe.vocab_size, |
| hidden_size=cfg_standard_moe.hidden_size, |
| intermediate_size=cfg_standard_moe.intermediate_size, |
| num_hidden_layers=cfg_standard_moe.num_hidden_layers, |
| num_attention_heads=cfg_standard_moe.num_attention_heads, |
| num_key_value_heads=cfg_standard_moe.num_key_value_heads, |
| hidden_act=cfg_standard_moe.hidden_act, |
| max_position_embeddings=cfg_standard_moe.max_position_embeddings, |
| initializer_range=cfg_standard_moe.initializer_range, |
| rms_norm_eps=cfg_standard_moe.rms_norm_eps, |
| tie_word_embeddings=cfg_standard_moe.tie_word_embeddings, |
| rope_parameters=cfg_standard_moe.rope_parameters, |
| rope_scaling=cfg_standard_moe.rope_scaling, |
| attention_dropout=cfg_standard_moe.attention_dropout, |
| moe_intermediate_size=cfg_standard_moe.moe_intermediate_size, |
| qk_nope_head_dim=cfg_standard_moe.qk_nope_head_dim, |
| qk_rope_head_dim=cfg_standard_moe.qk_rope_head_dim, |
| v_head_dim=cfg_standard_moe.v_head_dim, |
| partial_rotary_factor=cfg_standard_moe.partial_rotary_factor, |
| num_nextn_predict_layers=0, |
| routed_scaling_factor=cfg_standard_moe.routed_scaling_factor, |
| topk_method=cfg_standard_moe.topk_method, |
| norm_topk_prob=cfg_standard_moe.norm_topk_prob, |
| attention_bias=cfg_standard_moe.attention_bias, |
| q_lora_rank=cfg_standard_moe.q_lora_rank, |
| kv_lora_rank=cfg_standard_moe.kv_lora_rank, |
| eos_token_id=cfg_standard_moe.eos_token_id, |
| pad_token_id=cfg_standard_moe.pad_token_id, |
| torch_dtype=cfg_standard_moe.torch_dtype, |
| ) |
|
|
| num_experts = cfg_standard_moe.n_routed_experts |
| num_hidden_layers = cfg_standard_moe.num_hidden_layers |
|
|
| with accelerate.init_empty_weights(): |
| model_shared_moe = Glm4MoeLiteSCMForCausalLM(cfg_shared_moe) |
|
|
| model_shared_moe = model_shared_moe.to(torch.bfloat16) |
| new_state_dict = {} |
| pattern = f"{input_model}/model-*-of-*.safetensors" |
| files = sorted(glob.glob(pattern)) |
|
|
| if len(files) == 0: |
| raise FileNotFoundError |
| tensors = {} |
|
|
| for file_path in files: |
| print(f"processing {file_path}") |
| with safe_open(file_path, framework="pt", device="cpu") as f: |
| for key in f.keys(): |
| tensor = f.get_tensor(key) |
| tensors[key] = tensor |
|
|
| for key in tensors: |
| if f"layers.{num_hidden_layers}" in key: |
| continue |
| if "experts" not in key or "shared_experts" in key: |
| new_state_dict[key] = tensors[key] |
| elif "experts.0" in key: |
| layer_num = int(re.search(r"\d+", key).group()) |
| new_state_dict[ |
| f"model.layers.{layer_num}.mlp.moe_mlp.output_experts.weight" |
| ] = torch.stack( |
| [ |
| tensors[f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight"] |
| for i in range(num_experts) |
| ] |
| ) |
| new_state_dict[f"model.layers.{layer_num}.mlp.moe_mlp.experts.weight"] = ( |
| torch.stack( |
| [ |
| torch.cat( |
| [ |
| tensors[ |
| f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight" |
| ], |
| tensors[ |
| f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight" |
| ], |
| ], |
| dim=0, |
| ) |
| for i in range(num_experts) |
| ] |
| ) |
| ) |
| model_shared_moe.load_state_dict(new_state_dict, strict=True, assign=True) |
| model_shared_moe.save_pretrained(output_model_path) |
| cfg_shared_moe.save_pretrained(output_model_path) |
|
|
|
|
| shutil.copy( |
| "modeling_glm4_moe_lite_scm.py", |
| output_model_path + "/" + "modeling_glm4_moe_lite_scm.py", |
| ) |
| shutil.copy( |
| "configuration_glm4_moe_lite_scm.py", |
| output_model_path + "/" + "configuration_glm4_moe_lite_scm.py", |
| ) |
| for i in ["tokenizer_config.json", "tokenizer.json", "chat_template.jinja"]: |
| shutil.copy(input_model + "/" + i, output_model_path + "/" + i) |