leonMW commited on
Commit
39843d7
·
verified ·
1 Parent(s): a0736a7

Merged basemodel Qwen2ForCausalLM(

Browse files

(model): Qwen2Model(
(embed_tokens): Embedding(152064, 3584)
(layers): ModuleList(
(0-27): 28 x Qwen2DecoderLayer(
(self_attn): Qwen2Attention(
(q_proj): Linear(in_features=3584, out_features=3584, bias=True)
(k_proj): Linear(in_features=3584, out_features=512, bias=True)
(v_proj): Linear(in_features=3584, out_features=512, bias=True)
(o_proj): Linear(in_features=3584, out_features=3584, bias=False)
)
(mlp): Qwen2MLP(
(gate_proj): Linear(in_features=3584, out_features=18944, bias=False)
(up_proj): Linear(in_features=3584, out_features=18944, bias=False)
(down_proj): Linear(in_features=18944, out_features=3584, bias=False)
(act_fn): SiLU()
)
(input_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)
(post_attention_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)
)
)
(norm): Qwen2RMSNorm((3584,), eps=1e-06)
(rotary_emb): Qwen2RotaryEmbedding()
)
(lm_head): Linear(in_features=3584, out_features=152064, bias=False)
) with LoRA adapter leonMW/DeepSeek-R1-Distill-Qwen-7B-LORA-GSPO-Basic using revision main

config.json CHANGED
@@ -4,6 +4,7 @@
4
  ],
5
  "attention_dropout": 0.0,
6
  "bos_token_id": 151643,
 
7
  "eos_token_id": 151643,
8
  "hidden_act": "silu",
9
  "hidden_size": 3584,
@@ -50,8 +51,7 @@
50
  "rope_theta": 10000,
51
  "sliding_window": null,
52
  "tie_word_embeddings": false,
53
- "torch_dtype": "bfloat16",
54
- "transformers_version": "4.55.4",
55
  "use_cache": true,
56
  "use_mrope": false,
57
  "use_sliding_window": false,
 
4
  ],
5
  "attention_dropout": 0.0,
6
  "bos_token_id": 151643,
7
+ "dtype": "bfloat16",
8
  "eos_token_id": 151643,
9
  "hidden_act": "silu",
10
  "hidden_size": 3584,
 
51
  "rope_theta": 10000,
52
  "sliding_window": null,
53
  "tie_word_embeddings": false,
54
+ "transformers_version": "4.56.0",
 
55
  "use_cache": true,
56
  "use_mrope": false,
57
  "use_sliding_window": false,
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "eos_token_id": 151643,
6
  "temperature": 0.6,
7
  "top_p": 0.95,
8
- "transformers_version": "4.55.4"
9
  }
 
5
  "eos_token_id": 151643,
6
  "temperature": 0.6,
7
  "top_p": 0.95,
8
+ "transformers_version": "4.56.0"
9
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:804ca83c274abb422e0f9da5b34b43c79674a7efceceed080a5030fe2ab6f01b
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746eb7bb9e6c52913108c513e8f0bc6b60677a3676f028be5a45b888f35c4768
3
  size 4877660776
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f98e8bcfb146bba58592e1d3251d5b10991a256d77a5d256d4ef423e8aae99e
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc044b2fc471bbcab21394b4d6c47e0db845c7f9855e061852a7761d5dd82803
3
  size 4932751008
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1b9d2cd56088c00c9088b45358a47143bb5473667b4aa9cdf4e63cbd803f1c6
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06172024ed14497c4c853a040fd8f16f4fd176dc38669095fcf5567c9b52a7d0
3
  size 4330865200