Upload tiny models for SwitchTransformersForConditionalGeneration
Browse files- config.json +7 -8
- generation_config.json +7 -0
- pytorch_model.bin +2 -2
- spiece.model +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
config.json
CHANGED
|
@@ -7,20 +7,19 @@
|
|
| 7 |
"d_ff": 37,
|
| 8 |
"d_kv": 8,
|
| 9 |
"d_model": 32,
|
| 10 |
-
"decoder_sparse_step":
|
| 11 |
-
"decoder_start_token_id": 0,
|
| 12 |
"dense_act_fn": "relu",
|
| 13 |
"dropout_rate": 0.1,
|
| 14 |
"encoder_sparse_step": 1,
|
| 15 |
"eos_token_id": 1,
|
| 16 |
-
"expert_capacity":
|
| 17 |
"feed_forward_proj": "relu",
|
| 18 |
"initializer_factor": 0.002,
|
| 19 |
-
"is_encoder_decoder":
|
| 20 |
"is_gated_act": false,
|
| 21 |
"layer_norm_epsilon": 1e-06,
|
| 22 |
"model_type": "switch_transformers",
|
| 23 |
-
"num_decoder_layers":
|
| 24 |
"num_experts": 8,
|
| 25 |
"num_heads": 4,
|
| 26 |
"num_layers": 5,
|
|
@@ -33,11 +32,11 @@
|
|
| 33 |
"router_bias": false,
|
| 34 |
"router_dtype": "float32",
|
| 35 |
"router_ignore_padding_tokens": false,
|
| 36 |
-
"router_jitter_noise": 0.
|
| 37 |
"router_type": "tokens_masked",
|
| 38 |
"router_z_loss_coef": 0.001,
|
| 39 |
"torch_dtype": "float32",
|
| 40 |
-
"transformers_version": "4.
|
| 41 |
"use_cache": true,
|
| 42 |
-
"vocab_size":
|
| 43 |
}
|
|
|
|
| 7 |
"d_ff": 37,
|
| 8 |
"d_kv": 8,
|
| 9 |
"d_model": 32,
|
| 10 |
+
"decoder_sparse_step": 4,
|
|
|
|
| 11 |
"dense_act_fn": "relu",
|
| 12 |
"dropout_rate": 0.1,
|
| 13 |
"encoder_sparse_step": 1,
|
| 14 |
"eos_token_id": 1,
|
| 15 |
+
"expert_capacity": 64,
|
| 16 |
"feed_forward_proj": "relu",
|
| 17 |
"initializer_factor": 0.002,
|
| 18 |
+
"is_encoder_decoder": false,
|
| 19 |
"is_gated_act": false,
|
| 20 |
"layer_norm_epsilon": 1e-06,
|
| 21 |
"model_type": "switch_transformers",
|
| 22 |
+
"num_decoder_layers": 12,
|
| 23 |
"num_experts": 8,
|
| 24 |
"num_heads": 4,
|
| 25 |
"num_layers": 5,
|
|
|
|
| 32 |
"router_bias": false,
|
| 33 |
"router_dtype": "float32",
|
| 34 |
"router_ignore_padding_tokens": false,
|
| 35 |
+
"router_jitter_noise": 0.01,
|
| 36 |
"router_type": "tokens_masked",
|
| 37 |
"router_z_loss_coef": 0.001,
|
| 38 |
"torch_dtype": "float32",
|
| 39 |
+
"transformers_version": "4.28.0.dev0",
|
| 40 |
"use_cache": true,
|
| 41 |
+
"vocab_size": 32100
|
| 42 |
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 0,
|
| 4 |
+
"eos_token_id": 1,
|
| 5 |
+
"pad_token_id": 0,
|
| 6 |
+
"transformers_version": "4.28.0.dev0"
|
| 7 |
+
}
|
pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f1e83a4241d1fa0ee40455d9f2d1907cffc41930c10b09c398f82486450d2522
|
| 3 |
+
size 5033893
|
spiece.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
| 3 |
+
size 791656
|
tokenizer.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
CHANGED
|
@@ -104,8 +104,8 @@
|
|
| 104 |
"eos_token": "</s>",
|
| 105 |
"extra_ids": 100,
|
| 106 |
"model_max_length": 512,
|
| 107 |
-
"name_or_path": "google/switch-base-8",
|
| 108 |
"pad_token": "<pad>",
|
|
|
|
| 109 |
"special_tokens_map_file": null,
|
| 110 |
"tokenizer_class": "T5Tokenizer",
|
| 111 |
"unk_token": "<unk>"
|
|
|
|
| 104 |
"eos_token": "</s>",
|
| 105 |
"extra_ids": 100,
|
| 106 |
"model_max_length": 512,
|
|
|
|
| 107 |
"pad_token": "<pad>",
|
| 108 |
+
"sp_model_kwargs": {},
|
| 109 |
"special_tokens_map_file": null,
|
| 110 |
"tokenizer_class": "T5Tokenizer",
|
| 111 |
"unk_token": "<unk>"
|