MahiH commited on
Commit
0cec8ed
·
verified ·
1 Parent(s): 0cc404e

Upload folder using huggingface_hub

Browse files
checkpoint-2/config.json CHANGED
@@ -31,7 +31,7 @@
31
  "max_length": 1000
32
  }
33
  },
34
- "torch_dtype": "float16",
35
  "transformers_version": "4.52.4",
36
  "use_cache": true,
37
  "vocab_size": 50257
 
31
  "max_length": 1000
32
  }
33
  },
34
+ "torch_dtype": "float32",
35
  "transformers_version": "4.52.4",
36
  "use_cache": true,
37
  "vocab_size": 50257
checkpoint-2/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75daecf07ed91720ddc6b67797ffb531d15449a24ef4b9b7ca4be461e3217d63
3
- size 248926387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2245c47fd6ede01d43d2b61a9e51a60aa5f2b62ed7c4b3e5c29a87fa74ae47a
3
+ size 497806003
checkpoint-2/trainer_state.json CHANGED
@@ -2,17 +2,17 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 2.0,
6
  "eval_steps": 500,
7
  "global_step": 2,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [],
12
- "logging_steps": 10,
13
- "max_steps": 3,
14
  "num_input_tokens_seen": 0,
15
- "num_train_epochs": 3,
16
  "save_steps": 500,
17
  "stateful_callbacks": {
18
  "TrainerControl": {
@@ -26,8 +26,8 @@
26
  "attributes": {}
27
  }
28
  },
29
- "total_flos": 2612920320000.0,
30
- "train_batch_size": 4,
31
  "trial_name": null,
32
  "trial_params": null
33
  }
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
  "eval_steps": 500,
7
  "global_step": 2,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [],
12
+ "logging_steps": 5,
13
+ "max_steps": 4,
14
  "num_input_tokens_seen": 0,
15
+ "num_train_epochs": 2,
16
  "save_steps": 500,
17
  "stateful_callbacks": {
18
  "TrainerControl": {
 
26
  "attributes": {}
27
  }
28
  },
29
+ "total_flos": 5225840640000.0,
30
+ "train_batch_size": 2,
31
  "trial_name": null,
32
  "trial_params": null
33
  }
checkpoint-2/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6eabaaa0bdaccba1b86a81d17cd1c9aabd7f51bd63331449c0fc287de407316c
3
  size 5713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2d85571f50b6ee5d8da4626ce6d3a308d432ed12f04a5cdbf71aed5f844a9f7
3
  size 5713
checkpoint-4/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}
checkpoint-4/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 1024,
14
+ "n_embd": 768,
15
+ "n_head": 12,
16
+ "n_inner": null,
17
+ "n_layer": 12,
18
+ "n_positions": 1024,
19
+ "pad_token_id": 50256,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "conversational": {
31
+ "max_length": 1000
32
+ }
33
+ },
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.52.4",
36
+ "use_cache": true,
37
+ "vocab_size": 50257
38
+ }
checkpoint-4/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.52.4"
6
+ }
checkpoint-4/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b191a3eea545f4c9475e8ce9c409f1b3a0280a4d7ee1b2c8a73100dfc382013b
3
+ size 497806003
checkpoint-4/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-4/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4/tokenizer_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "extra_special_tokens": {},
19
+ "model_max_length": 1024,
20
+ "pad_token": "<|endoftext|>",
21
+ "tokenizer_class": "GPT2Tokenizer",
22
+ "unk_token": "<|endoftext|>"
23
+ }
checkpoint-4/trainer_state.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 4,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [],
12
+ "logging_steps": 5,
13
+ "max_steps": 4,
14
+ "num_input_tokens_seen": 0,
15
+ "num_train_epochs": 2,
16
+ "save_steps": 500,
17
+ "stateful_callbacks": {
18
+ "TrainerControl": {
19
+ "args": {
20
+ "should_epoch_stop": false,
21
+ "should_evaluate": false,
22
+ "should_log": false,
23
+ "should_save": true,
24
+ "should_training_stop": true
25
+ },
26
+ "attributes": {}
27
+ }
28
+ },
29
+ "total_flos": 10451681280000.0,
30
+ "train_batch_size": 2,
31
+ "trial_name": null,
32
+ "trial_params": null
33
+ }
checkpoint-4/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2d85571f50b6ee5d8da4626ce6d3a308d432ed12f04a5cdbf71aed5f844a9f7
3
+ size 5713
checkpoint-4/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -31,7 +31,7 @@
31
  "max_length": 1000
32
  }
33
  },
34
- "torch_dtype": "float16",
35
  "transformers_version": "4.52.4",
36
  "use_cache": true,
37
  "vocab_size": 50257
 
31
  "max_length": 1000
32
  }
33
  },
34
+ "torch_dtype": "float32",
35
  "transformers_version": "4.52.4",
36
  "use_cache": true,
37
  "vocab_size": 50257
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75daecf07ed91720ddc6b67797ffb531d15449a24ef4b9b7ca4be461e3217d63
3
- size 248926387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b191a3eea545f4c9475e8ce9c409f1b3a0280a4d7ee1b2c8a73100dfc382013b
3
+ size 497806003
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6eabaaa0bdaccba1b86a81d17cd1c9aabd7f51bd63331449c0fc287de407316c
3
  size 5713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2d85571f50b6ee5d8da4626ce6d3a308d432ed12f04a5cdbf71aed5f844a9f7
3
  size 5713