ogoshi2000 commited on
Commit
fa7e6f6
·
1 Parent(s): 4be2ac0

Upload 10 files

Browse files
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "LennartKeller/longformer-gottbert-base-8192-aw512",
3
+ "architectures": [
4
+ "LongformerForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "attention_window": [
8
+ 512,
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "bos_token_id": 0,
22
+ "classifier_dropout": null,
23
+ "eos_token_id": 2,
24
+ "gradient_checkpointing": true,
25
+ "hidden_act": "gelu",
26
+ "hidden_dropout_prob": 0.1,
27
+ "hidden_size": 768,
28
+ "id2label": {
29
+ "0": "LABEL_0",
30
+ "1": "LABEL_1",
31
+ "2": "LABEL_2",
32
+ "3": "LABEL_3"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2,
40
+ "LABEL_3": 3
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 8194,
44
+ "model_type": "longformer",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 1,
48
+ "position_embedding_type": "absolute",
49
+ "problem_type": "single_label_classification",
50
+ "sep_token_id": 2,
51
+ "torch_dtype": "float32",
52
+ "transformers_version": "4.6.1",
53
+ "type_vocab_size": 2,
54
+ "use_cache": true,
55
+ "vocab_size": 52009
56
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ab7b8e22f71f71cc81317d4bc29a4bc3b78462606ed52064a639bf488deff2a
3
+ size 612773004
run_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_CHeeSEArguments": "CHeeSEArguments",
3
+ "model_name_or_path": "/netscratch/schnitzler/longformer",
4
+ "dataset_path": "./dataset.py",
5
+ "metric_path": "./metric.py",
6
+ "task": "stance_detection",
7
+ "first_sentence_inputs": [
8
+ "question"
9
+ ],
10
+ "second_sentence_inputs": [
11
+ "title",
12
+ "snippet",
13
+ "paragraphs"
14
+ ],
15
+ "labels_to_predict": [
16
+ "stance"
17
+ ],
18
+ "_CHeeSETrainingArguments": "CHeeSETrainingArguments",
19
+ "do_train": false,
20
+ "do_eval": true,
21
+ "do_predict": true,
22
+ "do_cross_validation": false,
23
+ "output_dir": "/netscratch/schnitzler/longformer",
24
+ "logging_dir": "/netscratch/schnitzler/longformer",
25
+ "save_total_limit": 3,
26
+ "log_to_file": true,
27
+ "logging_strategy": "steps",
28
+ "logging_steps": 50,
29
+ "cross_validation_folds": 5,
30
+ "save_steps": 1000,
31
+ "evaluation_strategy": "steps",
32
+ "eval_steps": 500,
33
+ "num_train_epochs": 12,
34
+ "per_device_train_batch_size": 8,
35
+ "per_device_eval_batch_size": 8,
36
+ "learning_rate": 3e-05,
37
+ "prediction_columns_to_include": [
38
+ "title",
39
+ "snippet",
40
+ "paragraphs",
41
+ "question",
42
+ "labels",
43
+ "stance"
44
+ ],
45
+ "overwrite_output_dir": true
46
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "LennartKeller/longformer-gottbert-base-8192-aw512", "model_max_length": 8192, "tokenizer_class": "LongformerTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "global_step": 1196,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.17,
12
+ "learning_rate": 2.8770903010033445e-05,
13
+ "loss": 1.3149,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.33,
18
+ "learning_rate": 2.7516722408026756e-05,
19
+ "loss": 1.297,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.5,
24
+ "learning_rate": 2.626254180602007e-05,
25
+ "loss": 1.2396,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.67,
30
+ "learning_rate": 2.508361204013378e-05,
31
+ "loss": 1.1525,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.84,
36
+ "learning_rate": 2.382943143812709e-05,
37
+ "loss": 1.1361,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "learning_rate": 2.2575250836120402e-05,
43
+ "loss": 1.0224,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 1.17,
48
+ "learning_rate": 2.1321070234113713e-05,
49
+ "loss": 0.9715,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 1.34,
54
+ "learning_rate": 2.0066889632107023e-05,
55
+ "loss": 1.0531,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 1.51,
60
+ "learning_rate": 1.8812709030100337e-05,
61
+ "loss": 0.981,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 1.67,
66
+ "learning_rate": 1.7558528428093648e-05,
67
+ "loss": 0.97,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 1.67,
72
+ "eval_accuracy": 0.61875,
73
+ "eval_f1_macro": 0.4388243335611756,
74
+ "eval_f1_micro": 0.61875,
75
+ "eval_f1_unweighted_diskutierend": 0.47619047619047616,
76
+ "eval_f1_unweighted_ja-daf\u00fcr": 0.4912280701754386,
77
+ "eval_f1_unweighted_kein-bezug": 0.7878787878787878,
78
+ "eval_f1_unweighted_nein-dagegen": 0.0,
79
+ "eval_f1_weighted": 0.5780730234677603,
80
+ "eval_loss": 0.9460641145706177,
81
+ "eval_runtime": 61.5083,
82
+ "eval_samples_per_second": 2.601,
83
+ "step": 500
84
+ },
85
+ {
86
+ "epoch": 1.84,
87
+ "learning_rate": 1.6304347826086955e-05,
88
+ "loss": 0.9701,
89
+ "step": 550
90
+ },
91
+ {
92
+ "epoch": 2.01,
93
+ "learning_rate": 1.5050167224080267e-05,
94
+ "loss": 0.9235,
95
+ "step": 600
96
+ },
97
+ {
98
+ "epoch": 2.17,
99
+ "learning_rate": 1.379598662207358e-05,
100
+ "loss": 0.7441,
101
+ "step": 650
102
+ },
103
+ {
104
+ "epoch": 2.34,
105
+ "learning_rate": 1.254180602006689e-05,
106
+ "loss": 0.8782,
107
+ "step": 700
108
+ },
109
+ {
110
+ "epoch": 2.51,
111
+ "learning_rate": 1.1287625418060201e-05,
112
+ "loss": 0.8644,
113
+ "step": 750
114
+ },
115
+ {
116
+ "epoch": 2.68,
117
+ "learning_rate": 1.0033444816053512e-05,
118
+ "loss": 0.7321,
119
+ "step": 800
120
+ },
121
+ {
122
+ "epoch": 2.84,
123
+ "learning_rate": 8.779264214046824e-06,
124
+ "loss": 0.8181,
125
+ "step": 850
126
+ },
127
+ {
128
+ "epoch": 3.01,
129
+ "learning_rate": 7.525083612040134e-06,
130
+ "loss": 0.7836,
131
+ "step": 900
132
+ },
133
+ {
134
+ "epoch": 3.18,
135
+ "learning_rate": 6.270903010033445e-06,
136
+ "loss": 0.653,
137
+ "step": 950
138
+ },
139
+ {
140
+ "epoch": 3.34,
141
+ "learning_rate": 5.016722408026756e-06,
142
+ "loss": 0.6051,
143
+ "step": 1000
144
+ },
145
+ {
146
+ "epoch": 3.34,
147
+ "eval_accuracy": 0.6625,
148
+ "eval_f1_macro": 0.5549999999999999,
149
+ "eval_f1_micro": 0.6625,
150
+ "eval_f1_unweighted_diskutierend": 0.5333333333333333,
151
+ "eval_f1_unweighted_ja-daf\u00fcr": 0.56,
152
+ "eval_f1_unweighted_kein-bezug": 0.8266666666666667,
153
+ "eval_f1_unweighted_nein-dagegen": 0.3,
154
+ "eval_f1_weighted": 0.6507499999999999,
155
+ "eval_loss": 0.9401437044143677,
156
+ "eval_runtime": 61.357,
157
+ "eval_samples_per_second": 2.608,
158
+ "step": 1000
159
+ },
160
+ {
161
+ "epoch": 3.51,
162
+ "learning_rate": 3.762541806020067e-06,
163
+ "loss": 0.6435,
164
+ "step": 1050
165
+ },
166
+ {
167
+ "epoch": 3.68,
168
+ "learning_rate": 2.508361204013378e-06,
169
+ "loss": 0.6506,
170
+ "step": 1100
171
+ },
172
+ {
173
+ "epoch": 3.85,
174
+ "learning_rate": 1.254180602006689e-06,
175
+ "loss": 0.5699,
176
+ "step": 1150
177
+ },
178
+ {
179
+ "epoch": 4.0,
180
+ "step": 1196,
181
+ "total_flos": 1.180320018726912e+16,
182
+ "train_runtime": 10088.4778,
183
+ "train_samples_per_second": 0.119
184
+ }
185
+ ],
186
+ "max_steps": 1196,
187
+ "num_train_epochs": 4,
188
+ "total_flos": 1.180320018726912e+16,
189
+ "trial_name": null,
190
+ "trial_params": null
191
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2231346afadb7f7d79206983c052e3ab24246d9a4124d0f683c9b71a1c1cc308
3
+ size 2543
vocab.json ADDED
The diff for this file is too large to render. See raw diff