--- library_name: transformers base_model: - zai-org/GLM-4.7-Flash --- This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [zai-org/GLM-4.7-Flash](https://huggingface.co/zai-org/GLM-4.7-Flash). ### Example usage: - vLLM ```bash # Multi-token prediction is supported model_id=tiny-random/glm-4.7-flash vllm serve $model_id \ --tensor-parallel-size 2 \ --speculative-config.method mtp \ --speculative-config.num_speculative_tokens 1 \ --tool-call-parser glm47 \ --reasoning-parser glm45 \ --enable-auto-tool-choice ``` - SGLang ```bash # Multi-token prediction is supported model_id=tiny-random/glm-4.7-flash python3 -m sglang.launch_server --model-path $model_id --tp-size 2 \ --tool-call-parser glm47 \ --reasoning-parser glm45 \ --speculative-algorithm EAGLE \ --speculative-num-steps 3 \ --speculative-eagle-topk 1 \ --speculative-num-draft-tokens 4 ``` - Transformers ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model_id = "tiny-random/glm-4.7-flash" messages = [{"role": "user", "content": "hello"}] tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", ) model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, torch_dtype=torch.bfloat16, device_map="cuda", ) inputs = inputs.to(model.device) generated_ids = model.generate( **inputs, max_new_tokens=32, do_sample=False) output_text = tokenizer.decode( generated_ids[0][inputs.input_ids.shape[1]:]) print(output_text) ``` ### Codes to create this repo: ```python import json from copy import deepcopy from pathlib import Path import accelerate import torch import torch.nn as nn from huggingface_hub import file_exists, hf_hub_download from transformers import ( AutoConfig, AutoModelForCausalLM, AutoProcessor, GenerationConfig, set_seed, ) source_model_id = "zai-org/GLM-4.7-Flash" save_folder = "/tmp/tiny-random/glm-4.7-flash" processor = AutoProcessor.from_pretrained( source_model_id, trust_remote_code=True) processor.save_pretrained(save_folder) with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: config_json = json.load(f) config_json.update({ 'kv_lora_rank': 384, 'num_key_value_heads': 1, 'q_lora_rank': 32, 'qk_nope_head_dim': 64, 'qk_rope_head_dim': 192, 'v_head_dim': 64, 'num_key_value_heads': 4, 'num_attention_heads': 4, }) config_json['hidden_size'] = 8 config_json['intermediate_size'] = 32 config_json['moe_intermediate_size'] = 32 config_json['num_hidden_layers'] = 2 config_json['tie_word_embeddings'] = False config_json['use_cache'] = True with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: json.dump(config_json, f, indent=2) config = AutoConfig.from_pretrained( save_folder, trust_remote_code=True, ) print(config) torch.set_default_dtype(torch.bfloat16) model = AutoModelForCausalLM.from_config(config) torch.set_default_dtype(torch.float32) if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): model.generation_config = GenerationConfig.from_pretrained( source_model_id, trust_remote_code=True, ) model.generation_config.do_sample = True print(model.generation_config) model = model.cpu() set_seed(42) with torch.no_grad(): for name, p in sorted(model.named_parameters()): torch.nn.init.normal_(p, 0, 0.1) print(name, p.shape) # MTP set_seed(42) model.model.layers.append(nn.ModuleDict(dict( embed_tokens=deepcopy(model.model.embed_tokens), shared_head=nn.ModuleDict(dict( norm=nn.RMSNorm(config.hidden_size), head=deepcopy(model.model.embed_tokens), )), eh_proj=nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False), enorm=nn.RMSNorm(config.hidden_size), hnorm=nn.RMSNorm(config.hidden_size), input_layernorm=nn.RMSNorm(config.hidden_size), post_attention_layernorm=nn.RMSNorm(config.hidden_size), self_attn=deepcopy(model.model.layers[1].self_attn), mlp=deepcopy(model.model.layers[1].mlp), ))) for i in range(1, len(model.model.layers)): model.model.layers[i].mlp.gate.e_score_correction_bias = torch.rand_like( model.model.layers[i].mlp.gate.e_score_correction_bias).float() model.save_pretrained(save_folder) print(model) ``` ### Printing the model: ```text Glm4MoeLiteForCausalLM( (model): Glm4MoeLiteModel( (embed_tokens): Embedding(154880, 8, padding_idx=154820) (layers): ModuleList( (0): Glm4MoeLiteDecoderLayer( (self_attn): Glm4MoeLiteAttention( (q_a_proj): Linear(in_features=8, out_features=32, bias=False) (q_a_layernorm): Glm4MoeLiteRMSNorm((32,), eps=1e-06) (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) (kv_a_layernorm): Glm4MoeLiteRMSNorm((384,), eps=1e-06) (kv_b_proj): Linear(in_features=384, out_features=512, bias=False) (o_proj): Linear(in_features=256, out_features=8, bias=False) ) (mlp): Glm4MoeLiteMLP( (gate_proj): Linear(in_features=8, out_features=32, bias=False) (up_proj): Linear(in_features=8, out_features=32, bias=False) (down_proj): Linear(in_features=32, out_features=8, bias=False) (act_fn): SiLUActivation() ) (input_layernorm): Glm4MoeLiteRMSNorm((8,), eps=1e-05) (post_attention_layernorm): Glm4MoeLiteRMSNorm((8,), eps=1e-05) ) (1): Glm4MoeLiteDecoderLayer( (self_attn): Glm4MoeLiteAttention( (q_a_proj): Linear(in_features=8, out_features=32, bias=False) (q_a_layernorm): Glm4MoeLiteRMSNorm((32,), eps=1e-06) (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) (kv_a_layernorm): Glm4MoeLiteRMSNorm((384,), eps=1e-06) (kv_b_proj): Linear(in_features=384, out_features=512, bias=False) (o_proj): Linear(in_features=256, out_features=8, bias=False) ) (mlp): Glm4MoeLiteMoE( (experts): Glm4MoeLiteNaiveMoe( (act_fn): SiLUActivation() ) (gate): Glm4MoeLiteTopkRouter() (shared_experts): Glm4MoeLiteMLP( (gate_proj): Linear(in_features=8, out_features=32, bias=False) (up_proj): Linear(in_features=8, out_features=32, bias=False) (down_proj): Linear(in_features=32, out_features=8, bias=False) (act_fn): SiLUActivation() ) ) (input_layernorm): Glm4MoeLiteRMSNorm((8,), eps=1e-05) (post_attention_layernorm): Glm4MoeLiteRMSNorm((8,), eps=1e-05) ) (2): ModuleDict( (embed_tokens): Embedding(154880, 8, padding_idx=154820) (shared_head): ModuleDict( (norm): RMSNorm((8,), eps=None, elementwise_affine=True) (head): Embedding(154880, 8, padding_idx=154820) ) (eh_proj): Linear(in_features=16, out_features=8, bias=False) (enorm): RMSNorm((8,), eps=None, elementwise_affine=True) (hnorm): RMSNorm((8,), eps=None, elementwise_affine=True) (input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) (post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) (self_attn): Glm4MoeLiteAttention( (q_a_proj): Linear(in_features=8, out_features=32, bias=False) (q_a_layernorm): Glm4MoeLiteRMSNorm((32,), eps=1e-06) (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) (kv_a_layernorm): Glm4MoeLiteRMSNorm((384,), eps=1e-06) (kv_b_proj): Linear(in_features=384, out_features=512, bias=False) (o_proj): Linear(in_features=256, out_features=8, bias=False) ) (mlp): Glm4MoeLiteMoE( (experts): Glm4MoeLiteNaiveMoe( (act_fn): SiLUActivation() ) (gate): Glm4MoeLiteTopkRouter() (shared_experts): Glm4MoeLiteMLP( (gate_proj): Linear(in_features=8, out_features=32, bias=False) (up_proj): Linear(in_features=8, out_features=32, bias=False) (down_proj): Linear(in_features=32, out_features=8, bias=False) (act_fn): SiLUActivation() ) ) ) ) (norm): Glm4MoeLiteRMSNorm((8,), eps=1e-05) (rotary_emb): Glm4MoeLiteRotaryEmbedding() ) (lm_head): Linear(in_features=8, out_features=154880, bias=False) ) ```