Mauricio-100 commited on
Commit
8ea250a
·
verified ·
1 Parent(s): 0102b67

🚀 Agent Gopu Amélioré - Tokenizers optimisés

Browse files
config.json CHANGED
@@ -1,29 +1,54 @@
1
  {
 
2
  "architectures": [
3
- "LlamaForCausalLM"
4
  ],
5
- "attention_bias": false,
6
- "attention_dropout": 0.0,
7
- "bos_token_id": 1,
8
- "dtype": "float16",
9
- "eos_token_id": 2,
10
- "head_dim": 64,
11
- "hidden_act": "silu",
12
- "hidden_size": 2048,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  "initializer_range": 0.02,
14
- "intermediate_size": 5632,
 
15
  "max_position_embeddings": 2048,
16
- "mlp_bias": false,
17
- "model_type": "llama",
18
- "num_attention_heads": 32,
19
- "num_hidden_layers": 22,
20
- "num_key_value_heads": 4,
21
- "pretraining_tp": 1,
22
- "rms_norm_eps": 1e-05,
23
- "rope_scaling": null,
24
- "rope_theta": 10000.0,
25
- "tie_word_embeddings": false,
26
  "transformers_version": "4.57.1",
27
  "use_cache": true,
28
- "vocab_size": 32000
 
29
  }
 
1
  {
2
+ "activation_function": "gelu_new",
3
  "architectures": [
4
+ "GPTNeoForCausalLM"
5
  ],
6
+ "attention_dropout": 0,
7
+ "attention_layers": [
8
+ "global",
9
+ "local",
10
+ "global",
11
+ "local",
12
+ "global",
13
+ "local",
14
+ "global",
15
+ "local",
16
+ "global",
17
+ "local",
18
+ "global",
19
+ "local"
20
+ ],
21
+ "attention_types": [
22
+ [
23
+ [
24
+ "global",
25
+ "local"
26
+ ],
27
+ 6
28
+ ]
29
+ ],
30
+ "bos_token_id": 50256,
31
+ "classifier_dropout": 0.1,
32
+ "dtype": "float32",
33
+ "embed_dropout": 0,
34
+ "eos_token_id": 50256,
35
+ "gradient_checkpointing": false,
36
+ "hidden_size": 768,
37
  "initializer_range": 0.02,
38
+ "intermediate_size": null,
39
+ "layer_norm_epsilon": 1e-05,
40
  "max_position_embeddings": 2048,
41
+ "model_type": "gpt_neo",
42
+ "num_heads": 12,
43
+ "num_layers": 12,
44
+ "resid_dropout": 0,
45
+ "summary_activation": null,
46
+ "summary_first_dropout": 0.1,
47
+ "summary_proj_to_labels": true,
48
+ "summary_type": "cls_index",
49
+ "summary_use_proj": true,
 
50
  "transformers_version": "4.57.1",
51
  "use_cache": true,
52
+ "vocab_size": 50257,
53
+ "window_size": 256
54
  }
generation_config.json CHANGED
@@ -1,7 +1,6 @@
1
  {
2
- "bos_token_id": 1,
3
- "eos_token_id": 2,
4
- "max_length": 2048,
5
- "pad_token_id": 0,
6
  "transformers_version": "4.57.1"
7
  }
 
1
  {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
 
5
  "transformers_version": "4.57.1"
6
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b98ca50916dda5ad745ba32ff50bfad659c5529fbc2b6e00b7911d8bdb7e180
3
- size 2200119664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5e72e03d8fbd6f5bad5b0428dbba0f5232fe17402d2f92f3c787508b65957b6
3
+ size 500811336
special_tokens_map.json CHANGED
@@ -1,29 +1,23 @@
1
  {
2
  "bos_token": {
3
- "content": "<s>",
4
  "lstrip": false,
5
- "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": false,
20
  "rstrip": false,
21
  "single_word": false
22
  },
 
23
  "unk_token": {
24
- "content": "<unk>",
25
  "lstrip": false,
26
- "normalized": false,
27
  "rstrip": false,
28
  "single_word": false
29
  }
 
1
  {
2
  "bos_token": {
3
+ "content": "<|endoftext|>",
4
  "lstrip": false,
5
+ "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|endoftext|>",
11
  "lstrip": false,
12
+ "normalized": true,
 
 
 
 
 
 
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
+ "content": "<|endoftext|>",
19
  "lstrip": false,
20
+ "normalized": true,
21
  "rstrip": false,
22
  "single_word": false
23
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,43 +1,23 @@
1
  {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
  "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": false,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": false,
26
  "rstrip": false,
27
  "single_word": false,
28
  "special": true
29
  }
30
  },
31
- "bos_token": "<s>",
32
- "clean_up_tokenization_spaces": false,
33
- "eos_token": "</s>",
 
34
  "extra_special_tokens": {},
35
- "legacy": false,
36
  "model_max_length": 2048,
37
- "pad_token": "</s>",
38
- "padding_side": "right",
39
- "sp_model_kwargs": {},
40
- "tokenizer_class": "LlamaTokenizer",
41
- "unk_token": "<unk>",
42
- "use_default_system_prompt": false
43
  }
 
1
  {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
 
4
  "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
  "lstrip": false,
8
+ "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  "rstrip": false,
10
  "single_word": false,
11
  "special": true
12
  }
13
  },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
  "extra_special_tokens": {},
 
19
  "model_max_length": 2048,
20
+ "pad_token": "<|endoftext|>",
21
+ "tokenizer_class": "GPT2Tokenizer",
22
+ "unk_token": "<|endoftext|>"
 
 
 
23
  }
vocab.json ADDED
The diff for this file is too large to render. See raw diff