khulnasoft commited on
Commit
f608ef3
·
verified ·
1 Parent(s): 184e0cd

Add files using upload-large-folder tool

Browse files
Files changed (2) hide show
  1. config.json +31 -0
  2. training_args.json +22 -0
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "gpt2",
3
+ "architecture": "GPT2LMHeadModel",
4
+ "vocab_size": 32000,
5
+ "n_positions": 1024,
6
+ "n_ctx": 1024,
7
+ "n_embd": 768,
8
+ "n_layer": 12,
9
+ "n_head": 12,
10
+ "activation_function": "gelu_new",
11
+ "resid_pdrop": 0.1,
12
+ "embd_pdrop": 0.1,
13
+ "attn_pdrop": 0.1,
14
+ "layer_norm_epsilon": 1e-5,
15
+ "initializer_range": 0.02,
16
+ "summary_type": "cls_index",
17
+ "summary_use_proj": true,
18
+ "summary_activation": null,
19
+ "summary_proj_to_labels": true,
20
+ "summary_first_dropout": 0.1,
21
+ "bos_token_id": 1,
22
+ "eos_token_id": 2,
23
+ "task_specific_params": {
24
+ "text-generation": {
25
+ "do_sample": true,
26
+ "max_length": 512,
27
+ "temperature": 0.8,
28
+ "top_p": 0.9
29
+ }
30
+ }
31
+ }
training_args.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "./models/literary-lm",
3
+ "overwrite_output_dir": true,
4
+ "num_train_epochs": 10,
5
+ "per_device_train_batch_size": 8,
6
+ "per_device_eval_batch_size": 8,
7
+ "gradient_accumulation_steps": 4,
8
+ "learning_rate": 5e-5,
9
+ "weight_decay": 0.01,
10
+ "warmup_steps": 500,
11
+ "logging_steps": 100,
12
+ "save_steps": 1000,
13
+ "eval_steps": 500,
14
+ "save_total_limit": 3,
15
+ "fp16": true,
16
+ "dataloader_num_workers": 4,
17
+ "load_best_model_at_end": true,
18
+ "metric_for_best_model": "perplexity",
19
+ "greater_is_better": false,
20
+ "evaluation_strategy": "steps",
21
+ "save_strategy": "steps"
22
+ }