Mathlesage commited on
Commit
981f57b
·
verified ·
1 Parent(s): b16873c

Upload model

Browse files
Files changed (2) hide show
  1. config.json +49 -34
  2. model.safetensors +2 -2
config.json CHANGED
@@ -1,45 +1,60 @@
1
  {
2
  "architectures": [
3
- "EuroBertModel"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "configuration_eurobert.EuroBertConfig",
9
- "AutoModel": "modeling_eurobert.EuroBertModel",
10
- "AutoModelForMaskedLM": "modeling_eurobert.EuroBertForMaskedLM",
11
- "AutoModelForPreTraining": "modeling_eurobert.EuroBertPreTrainedModel",
12
- "AutoModelForSequenceClassification": "modeling_eurobert.EuroBertForSequenceClassification",
13
- "AutoModelForTokenClassification": "modeling_eurobert.EuroBertForTokenClassification"
14
- },
15
- "bos_token": "<|begin_of_text|>",
16
- "bos_token_id": 128000,
17
- "clf_pooling": "late",
18
- "eos_token": "<|end_of_text|>",
19
- "eos_token_id": 128001,
20
- "head_dim": 64,
21
  "hidden_act": "silu",
22
- "hidden_dropout": 0.0,
23
- "hidden_size": 768,
24
  "initializer_range": 0.02,
25
  "intermediate_size": 3072,
26
- "mask_token": "<|mask|>",
27
- "mask_token_id": 128002,
28
- "max_position_embeddings": 8192,
29
- "mlp_bias": false,
30
- "model_type": "eurobert",
31
- "num_attention_heads": 12,
32
- "num_hidden_layers": 12,
33
- "num_key_value_heads": 12,
34
- "pad_token": "<|end_of_text|>",
35
- "pad_token_id": 128001,
36
- "pretraining_tp": 1,
37
- "rms_norm_eps": 1e-05,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  "rope_scaling": null,
39
- "rope_theta": 250000,
40
- "tie_word_embeddings": false,
 
41
  "torch_dtype": "float32",
42
- "transformers_version": "4.54.1",
43
- "use_cache": false,
44
- "vocab_size": 128256
 
45
  }
 
1
  {
2
  "architectures": [
3
+ "Qwen3Model"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "head_dim": 128,
 
 
 
 
 
 
 
 
 
 
 
10
  "hidden_act": "silu",
11
+ "hidden_size": 1024,
 
12
  "initializer_range": 0.02,
13
  "intermediate_size": 3072,
14
+ "layer_types": [
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention"
43
+ ],
44
+ "max_position_embeddings": 40960,
45
+ "max_window_layers": 28,
46
+ "model_type": "qwen3",
47
+ "num_attention_heads": 16,
48
+ "num_hidden_layers": 28,
49
+ "num_key_value_heads": 8,
50
+ "rms_norm_eps": 1e-06,
51
  "rope_scaling": null,
52
+ "rope_theta": 1000000,
53
+ "sliding_window": null,
54
+ "tie_word_embeddings": true,
55
  "torch_dtype": "float32",
56
+ "transformers_version": "4.55.0",
57
+ "use_cache": true,
58
+ "use_sliding_window": false,
59
+ "vocab_size": 151936
60
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44c57bf6552b38fc6ec6b763c14a5330797f5fdd5559a232198f24af86d1fb1f
3
- size 847075632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc14e6298569d551491abd34f7d96d47d7255b7a87fce9fd4501e619534cc001
3
+ size 2384233112