finalgp3 commited on
Commit
0eb5641
·
verified ·
1 Parent(s): c979fc1

Upload 11 files

Browse files
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.38.2"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c888c10456369529f0782ddff007e9730f63ceefffb57c20b08b3898ca9d117a
3
+ size 497774208
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b82aef63dd81a31957b75b23a43abdf2d2f1ee3b7466a413f916c036506222
3
+ size 995642298
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c5befeb7a8ede1a63f570b2a9ca46ee90311fd0aee8739e268282bfa64b45d
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b09fc2bd9358da628a3e465c6a7759a95b495fd186af7df11a948e186a372391
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 1024,
19
+ "pad_token": "<|endoftext|>",
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
trainer_state.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 832.680419921875,
3
+ "best_model_checkpoint": "./finetuned_qa11\\checkpoint-61",
4
+ "epoch": 0.9908629441624366,
5
+ "eval_steps": 500,
6
+ "global_step": 61,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.81,
13
+ "grad_norm": 7.192448616027832,
14
+ "learning_rate": 2.4221311475409835e-06,
15
+ "loss": 3.5889,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 0.99,
20
+ "eval_loss": 2.146763801574707,
21
+ "eval_perplexity": 832.680419921875,
22
+ "eval_runtime": 349.8385,
23
+ "eval_samples_per_second": 1.103,
24
+ "eval_steps_per_second": 0.071,
25
+ "step": 61
26
+ }
27
+ ],
28
+ "logging_steps": 50,
29
+ "max_steps": 244,
30
+ "num_input_tokens_seen": 0,
31
+ "num_train_epochs": 4,
32
+ "save_steps": 500,
33
+ "total_flos": 1028902699008000.0,
34
+ "train_batch_size": 16,
35
+ "trial_name": null,
36
+ "trial_params": null
37
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:623d4c49986c6792bfabeec9f72f5d176df1f1880e204db2a8234e1adcd71d8d
3
+ size 4856
vocab.json ADDED
The diff for this file is too large to render. See raw diff