Text Generation
PEFT
English
nmitchko commited on
Commit
7cfb7eb
·
1 Parent(s): afb9450

Uploaded Checkpoint 15%

Browse files
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/media/nmitchko/NVME/text-generation-webui/models/codellama_CodeLlama-34b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "o_proj",
18
+ "v_proj",
19
+ "q_proj",
20
+ "up_proj",
21
+ "down_proj",
22
+ "k_proj",
23
+ "gate_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f6a83c7c5c83fb7e565d31714b16be28109ba1802657401bd3e4e457738924d
3
+ size 871609738
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "spaces_between_special_tokens": false,
27
+ "tokenizer_class": "LlamaTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "use_default_system_prompt": true
37
+ }
trainer_state.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.12690355329949238,
5
+ "eval_steps": 1000,
6
+ "global_step": 200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 0.0001,
14
+ "loss": 0.9676,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 0.0001,
20
+ "loss": 0.7339,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.04,
25
+ "learning_rate": 0.0001,
26
+ "loss": 0.6708,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.05,
31
+ "learning_rate": 0.0001,
32
+ "loss": 0.6334,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.06,
37
+ "learning_rate": 0.0001,
38
+ "loss": 0.6169,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.08,
43
+ "learning_rate": 0.0001,
44
+ "loss": 0.5886,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.09,
49
+ "learning_rate": 0.0001,
50
+ "loss": 0.5877,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.1,
55
+ "learning_rate": 0.0001,
56
+ "loss": 0.5867,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.11,
61
+ "learning_rate": 0.0001,
62
+ "loss": 0.5791,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.13,
67
+ "learning_rate": 0.0001,
68
+ "loss": 0.5764,
69
+ "step": 200
70
+ }
71
+ ],
72
+ "logging_steps": 20,
73
+ "max_steps": 45000,
74
+ "num_train_epochs": 29,
75
+ "save_steps": 200,
76
+ "total_flos": 4.706262881912488e+18,
77
+ "trial_name": null,
78
+ "trial_params": null
79
+ }