heldJan commited on
Commit
b16e2b5
·
verified ·
1 Parent(s): 61dfdb6

Training in progress, step 1

Browse files
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<vid_end>": 32002,
3
+ "<vid_patch>": 32000,
4
+ "<vid_start>": 32001
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<vid_patch>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<vid_start>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<vid_end>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ ],
25
+ "bos_token": {
26
+ "content": "<s>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "eos_token": {
33
+ "content": "</s>",
34
+ "lstrip": false,
35
+ "normalized": true,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ },
39
+ "pad_token": {
40
+ "content": "<unk>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "unk_token": {
47
+ "content": "<unk>",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false
52
+ }
53
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<vid_patch>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<vid_start>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32002": {
46
+ "content": "<vid_end>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<vid_patch>",
56
+ "<vid_start>",
57
+ "<vid_end>"
58
+ ],
59
+ "bos_token": "<s>",
60
+ "clean_up_tokenization_spaces": false,
61
+ "eos_token": "</s>",
62
+ "legacy": false,
63
+ "max_length": 64,
64
+ "model_max_length": 1048,
65
+ "pad_token": "<unk>",
66
+ "padding_side": "right",
67
+ "sp_model_kwargs": {},
68
+ "spaces_between_special_tokens": false,
69
+ "tokenizer_class": "LlamaTokenizer",
70
+ "unk_token": "<unk>",
71
+ "use_default_system_prompt": false
72
+ }
trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 5.335667610168457,
3
+ "best_model_checkpoint": "results/checkpoint-3",
4
+ "epoch": 1.5,
5
+ "eval_steps": 3,
6
+ "global_step": 3,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5,
13
+ "learning_rate": 0.00013333333333333334,
14
+ "loss": 12.207,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 1.0,
19
+ "learning_rate": 6.666666666666667e-05,
20
+ "loss": 9.0941,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 1.5,
25
+ "learning_rate": 0.0,
26
+ "loss": 6.9347,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 1.5,
31
+ "eval_loss": 5.335667610168457,
32
+ "eval_runtime": 17.8301,
33
+ "eval_samples_per_second": 1.122,
34
+ "eval_steps_per_second": 0.168,
35
+ "step": 3
36
+ },
37
+ {
38
+ "epoch": 1.5,
39
+ "step": 3,
40
+ "total_flos": 1506581575649280.0,
41
+ "train_loss": 9.411954879760742,
42
+ "train_runtime": 116.9289,
43
+ "train_samples_per_second": 0.411,
44
+ "train_steps_per_second": 0.026
45
+ }
46
+ ],
47
+ "logging_steps": 1,
48
+ "max_steps": 3,
49
+ "num_input_tokens_seen": 0,
50
+ "num_train_epochs": 2,
51
+ "save_steps": 3,
52
+ "total_flos": 1506581575649280.0,
53
+ "train_batch_size": 16,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b80f0c7e6b2c750298c59b5b7f120e9eb4e01aa60ed82c27bbc5d6b710486cf
3
+ size 4664