|
|
--- |
|
|
library_name: transformers |
|
|
pipeline_tag: text-generation |
|
|
inference: true |
|
|
widget: |
|
|
- text: Hello! |
|
|
example_title: Hello world |
|
|
group: Python |
|
|
base_model: |
|
|
- meituan-longcat/LongCat-Flash-Chat |
|
|
--- |
|
|
|
|
|
This tiny model is for debugging. It is randomly initialized with the config adapted from [meituan-longcat/LongCat-Flash-Chat](https://huggingface.co/meituan-longcat/LongCat-Flash-Chat). |
|
|
|
|
|
### Example usage: |
|
|
|
|
|
- vLLM |
|
|
|
|
|
```bash |
|
|
vllm serve yujiepan/longcat-flash-tiny-random \ |
|
|
--trust-remote-code \ |
|
|
--enable-expert-parallel \ |
|
|
--tensor-parallel-size 1 \ |
|
|
--speculative_config '{"model": "yujiepan/longcat-flash-tiny-random", "num_speculative_tokens": 1, "method":"longcat_flash_mtp"}' |
|
|
|
|
|
``` |
|
|
|
|
|
- SGLang |
|
|
|
|
|
```bash |
|
|
python3 -m sglang.launch_server \ |
|
|
--model yujiepan/longcat-flash-tiny-random \ |
|
|
--trust-remote-code \ |
|
|
--attention-backend flashinfer \ |
|
|
--enable-ep-moe \ |
|
|
--tp 1 \ |
|
|
--speculative-draft-model-path yujiepan/longcat-flash-tiny-random \ |
|
|
--speculative-algorithm NEXTN \ |
|
|
--speculative-num-draft-tokens 2 \ |
|
|
--speculative-num-steps 1 \ |
|
|
--speculative-eagle-topk 1 |
|
|
``` |
|
|
|
|
|
- Transformers |
|
|
|
|
|
```python |
|
|
import torch |
|
|
import transformers |
|
|
|
|
|
model_id = "yujiepan/longcat-flash-tiny-random" |
|
|
pipe = transformers.pipelines.pipeline( |
|
|
'text-generation', |
|
|
model=model_id, |
|
|
trust_remote_code=True, |
|
|
device_map='cuda', |
|
|
torch_dtype=torch.bfloat16, |
|
|
) |
|
|
past_key_values = transformers.DynamicCache(config=None) # set config to None |
|
|
r = pipe('Hello, world!', past_key_values=past_key_values, max_new_tokens=32) |
|
|
print(r) |
|
|
``` |
|
|
|
|
|
### Codes to create this repo: |
|
|
|
|
|
```python |
|
|
import json |
|
|
from copy import deepcopy |
|
|
from pathlib import Path |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from huggingface_hub import file_exists, hf_hub_download |
|
|
from transformers import ( |
|
|
AutoConfig, |
|
|
AutoModelForCausalLM, |
|
|
AutoProcessor, |
|
|
AutoTokenizer, |
|
|
GenerationConfig, |
|
|
set_seed, |
|
|
) |
|
|
from transformers.models.glm4_moe.modeling_glm4_moe import Glm4MoeRMSNorm |
|
|
source_model_id = "meituan-longcat/LongCat-Flash-Chat" |
|
|
save_folder = "/tmp/yujiepan/longcat-flash-tiny-random" |
|
|
|
|
|
Path(save_folder).mkdir(parents=True, exist_ok=True) |
|
|
tokenizer = AutoTokenizer.from_pretrained(source_model_id, trust_remote_code=True) |
|
|
tokenizer.save_pretrained(save_folder) |
|
|
|
|
|
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
|
|
config_json = json.load(f) |
|
|
for k, v in config_json['auto_map'].items(): |
|
|
config_json['auto_map'][k] = f'{source_model_id}--{v}' |
|
|
config_json.update({ |
|
|
'num_layers': 2, |
|
|
'hidden_size': 8, |
|
|
'ffn_hidden_size': 64, |
|
|
'expert_ffn_hidden_size': 64, |
|
|
'num_attention_heads': 4, |
|
|
'kv_lora_rank': 384, |
|
|
'n_routed_experts': 32, |
|
|
'q_lora_rank': 32, |
|
|
'qk_nope_head_dim': 64, |
|
|
'qk_rope_head_dim': 192, # vllm mla kernel supports 576 only, FA supports head dim <= 256 |
|
|
'v_head_dim': 64, |
|
|
'moe_topk': 12, |
|
|
'zero_expert_num': 16, |
|
|
}) |
|
|
# del config_json['quantization_config'] |
|
|
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
|
|
json.dump(config_json, f, indent=2) |
|
|
|
|
|
config = AutoConfig.from_pretrained( |
|
|
save_folder, |
|
|
trust_remote_code=True, |
|
|
) |
|
|
print(config) |
|
|
torch.set_default_dtype(torch.bfloat16) |
|
|
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) |
|
|
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
|
|
model.generation_config = GenerationConfig.from_pretrained( |
|
|
source_model_id, trust_remote_code=True, |
|
|
) |
|
|
model = model.cpu() |
|
|
# MTP |
|
|
model.model.mtp = nn.ModuleDict({ |
|
|
"layers": nn.ModuleList([nn.ModuleDict(dict( |
|
|
eh_proj=nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False), |
|
|
enorm=nn.ModuleDict({"m": nn.RMSNorm(config.hidden_size)}), |
|
|
hnorm=nn.ModuleDict({"m": nn.RMSNorm(config.hidden_size)}), |
|
|
input_layernorm=nn.RMSNorm(config.hidden_size), |
|
|
post_attention_layernorm=nn.RMSNorm(config.hidden_size), |
|
|
self_attn=deepcopy(model.model.layers[0].self_attn[0]), |
|
|
transformer_layer=nn.ModuleDict({"mlp": deepcopy(model.model.layers[0].mlps[0])}), |
|
|
))]), |
|
|
"norm": nn.RMSNorm(config.hidden_size), |
|
|
}) |
|
|
for i in range(config.num_layers): |
|
|
model.model.layers[i].mlp.router = model.model.layers[i].mlp.router.float() |
|
|
# model.model.layers[i].mlp.router.e_score_correction_bias = torch.zeros((config.n_routed_experts + config.zero_expert_num)).float() |
|
|
set_seed(42) |
|
|
with torch.no_grad(): |
|
|
for name, p in sorted(model.named_parameters()): |
|
|
torch.nn.init.normal_(p, 0, 0.1) |
|
|
print(name, p.shape, p.dtype) |
|
|
model.model.mtp.embed_tokens = deepcopy(model.model.embed_tokens) |
|
|
|
|
|
model.save_pretrained(save_folder) |
|
|
torch.set_default_dtype(torch.float32) |
|
|
|
|
|
for n, m in model.named_modules(): |
|
|
if 'LongcatFlashMLA' in str(type(m)): |
|
|
print(n, m.layer_idx) |
|
|
|
|
|
with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f: |
|
|
config_json = json.load(f) |
|
|
config_json['auto_map'] = {k: v.split('--')[-1] for k, v in config_json['auto_map'].items()} |
|
|
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
|
|
json.dump(config_json, f, indent=2) |
|
|
``` |
|
|
|
|
|
### Printing the model: |
|
|
|
|
|
```text |
|
|
LongcatFlashForCausalLM( |
|
|
(model): LongcatFlashModel( |
|
|
(embed_tokens): Embedding(131072, 8) |
|
|
(layers): ModuleList( |
|
|
(0-1): 2 x LongcatFlashDecoderLayer( |
|
|
(mlp): LongcatFlashMoE( |
|
|
(experts): ModuleList( |
|
|
(0-31): 32 x LongcatFlashMLP( |
|
|
(gate_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(up_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(down_proj): Linear(in_features=64, out_features=8, bias=False) |
|
|
(act_fn): SiLU() |
|
|
) |
|
|
) |
|
|
(router): LongcatFlashTopkRouter( |
|
|
(classifier): Linear(in_features=8, out_features=48, bias=False) |
|
|
) |
|
|
) |
|
|
(self_attn): ModuleList( |
|
|
(0-1): 2 x LongcatFlashMLA( |
|
|
(q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
|
|
(q_a_layernorm): LongcatFlashRMSNorm((32,), eps=1e-06) |
|
|
(q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
|
|
(kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
|
|
(kv_a_layernorm): LongcatFlashRMSNorm((384,), eps=1e-06) |
|
|
(kv_b_proj): Linear(in_features=384, out_features=512, bias=False) |
|
|
(o_proj): Linear(in_features=256, out_features=8, bias=False) |
|
|
) |
|
|
) |
|
|
(mlps): ModuleList( |
|
|
(0-1): 2 x LongcatFlashMLP( |
|
|
(gate_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(up_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(down_proj): Linear(in_features=64, out_features=8, bias=False) |
|
|
(act_fn): SiLU() |
|
|
) |
|
|
) |
|
|
(input_layernorm): ModuleList( |
|
|
(0-1): 2 x LongcatFlashRMSNorm((8,), eps=1e-05) |
|
|
) |
|
|
(post_attention_layernorm): ModuleList( |
|
|
(0-1): 2 x LongcatFlashRMSNorm((8,), eps=1e-05) |
|
|
) |
|
|
) |
|
|
) |
|
|
(norm): LongcatFlashRMSNorm((8,), eps=1e-05) |
|
|
(rotary_emb): LongcatFlashRotaryEmbedding() |
|
|
(mtp): ModuleDict( |
|
|
(layers): ModuleList( |
|
|
(0): ModuleDict( |
|
|
(eh_proj): Linear(in_features=16, out_features=8, bias=False) |
|
|
(enorm): ModuleDict( |
|
|
(m): RMSNorm((8,), eps=None, elementwise_affine=True) |
|
|
) |
|
|
(hnorm): ModuleDict( |
|
|
(m): RMSNorm((8,), eps=None, elementwise_affine=True) |
|
|
) |
|
|
(input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
|
|
(post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
|
|
(self_attn): LongcatFlashMLA( |
|
|
(q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
|
|
(q_a_layernorm): LongcatFlashRMSNorm((32,), eps=1e-06) |
|
|
(q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
|
|
(kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
|
|
(kv_a_layernorm): LongcatFlashRMSNorm((384,), eps=1e-06) |
|
|
(kv_b_proj): Linear(in_features=384, out_features=512, bias=False) |
|
|
(o_proj): Linear(in_features=256, out_features=8, bias=False) |
|
|
) |
|
|
(transformer_layer): ModuleDict( |
|
|
(mlp): LongcatFlashMLP( |
|
|
(gate_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(up_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(down_proj): Linear(in_features=64, out_features=8, bias=False) |
|
|
(act_fn): SiLU() |
|
|
) |
|
|
) |
|
|
) |
|
|
) |
|
|
(norm): RMSNorm((8,), eps=None, elementwise_affine=True) |
|
|
(embed_tokens): Embedding(131072, 8) |
|
|
) |
|
|
) |
|
|
(lm_head): Linear(in_features=8, out_features=131072, bias=False) |
|
|
) |
|
|
``` |