Upload folder using huggingface_hub
Browse files- config.json +28 -0
- eval_results.txt +20 -0
- model_args.json +1 -0
- optimizer.pt +3 -0
- pytorch_model.bin +3 -0
- scheduler.pt +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +1 -0
- test_eval.txt +43 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "xlm-roberta-large",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"XLMRobertaForSequenceClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"classifier_dropout": null,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "gelu",
|
| 11 |
+
"hidden_dropout_prob": 0.1,
|
| 12 |
+
"hidden_size": 1024,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 4096,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 514,
|
| 17 |
+
"model_type": "xlm-roberta",
|
| 18 |
+
"num_attention_heads": 16,
|
| 19 |
+
"num_hidden_layers": 24,
|
| 20 |
+
"output_past": true,
|
| 21 |
+
"pad_token_id": 1,
|
| 22 |
+
"position_embedding_type": "absolute",
|
| 23 |
+
"torch_dtype": "float32",
|
| 24 |
+
"transformers_version": "4.16.2",
|
| 25 |
+
"type_vocab_size": 1,
|
| 26 |
+
"use_cache": true,
|
| 27 |
+
"vocab_size": 250002
|
| 28 |
+
}
|
eval_results.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
accuracy = 0.52
|
| 2 |
+
cls_report = precision recall f1-score support
|
| 3 |
+
|
| 4 |
+
0.0 0.5385 0.1429 0.2258 49
|
| 5 |
+
1.0 0.5172 0.8824 0.6522 51
|
| 6 |
+
|
| 7 |
+
accuracy 0.5200 100
|
| 8 |
+
macro avg 0.5279 0.5126 0.4390 100
|
| 9 |
+
weighted avg 0.5276 0.5200 0.4433 100
|
| 10 |
+
|
| 11 |
+
eval_loss = 0.6898809304604163
|
| 12 |
+
fn = 6
|
| 13 |
+
fp = 42
|
| 14 |
+
macro_f1 = 0.43899018232819076
|
| 15 |
+
mcc = 0.03747366058909428
|
| 16 |
+
tn = 7
|
| 17 |
+
tp = 45
|
| 18 |
+
weighted_f1 = 0.44325385694249647
|
| 19 |
+
weighted_p = 0.5278514588859416
|
| 20 |
+
weighted_r = 0.5126050420168067
|
model_args.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"adam_epsilon": 1e-08, "begin_tag": "<e>", "best_model_dir": "best_model/fr_fr", "cache_dir": "temp/cache_dir/", "config": {}, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 70, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 10, "encoding": null, "end_tag": "</e>", "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": false, "evaluate_during_training_steps": 20, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 1e-05, "local_rank": -1, "logging_steps": 20, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 120, "model_name": "xlm-roberta-large", "model_type": "xlmroberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "num_train_epochs": 5, "output_dir": "temp/outputs/", "overwrite_output_dir": true, "process_count": 70, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_steps": 20, "save_recent_only": true, "silent": false, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": true, "use_multiprocessing": false, "wandb_kwargs": {"group": "fr_fr_xlm-roberta-large_CLS_concat", "job_type": "2"}, "wandb_project": "TransWiC-groups", "warmup_ratio": 0.1, "warmup_steps": 57, "weight_decay": 0, "skip_special_tokens": true, "model_class": "ClassificationModel", "labels_list": [0, 1], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "stride": 0.8, "tie_value": 1, "tagging": false, "strategy": "CLS", "special_tags": null, "merge_n": 1, "merge_type": "concat"}
|
optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d6e45a59bfca3560e53ec68c5ee75c5ba614fb979d636c2712aea77db880308
|
| 3 |
+
size 4487768873
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c7dd121c86406650da3cc309927de35bdd7faf8def6b4d72bc356bc37577410
|
| 3 |
+
size 2243936061
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:358c5a102b266abe1e26d94430d927dc1a151702bec6ddede1eb11706168acbe
|
| 3 |
+
size 627
|
sentencepiece.bpe.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
| 3 |
+
size 5069051
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
test_eval.txt
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Default classification report:
|
| 2 |
+
precision recall f1-score support
|
| 3 |
+
|
| 4 |
+
F 0.5036 0.1380 0.2166 500
|
| 5 |
+
T 0.5006 0.8640 0.6339 500
|
| 6 |
+
|
| 7 |
+
accuracy 0.5010 1000
|
| 8 |
+
macro avg 0.5021 0.5010 0.4253 1000
|
| 9 |
+
weighted avg 0.5021 0.5010 0.4253 1000
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
ADJ
|
| 13 |
+
Accuracy = 0.5489130434782609
|
| 14 |
+
Weighted Recall = 0.5489130434782609
|
| 15 |
+
Weighted Precision = 0.5308763586956522
|
| 16 |
+
Weighted F1 = 0.47553287219318685
|
| 17 |
+
Macro Recall = 0.5128832160324466
|
| 18 |
+
Macro Precision = 0.528125
|
| 19 |
+
Macro F1 = 0.45314570129265586
|
| 20 |
+
ADV
|
| 21 |
+
Accuracy = 0.6333333333333333
|
| 22 |
+
Weighted Recall = 0.6333333333333333
|
| 23 |
+
Weighted Precision = 0.5596153846153846
|
| 24 |
+
Weighted F1 = 0.5823240589198037
|
| 25 |
+
Macro Recall = 0.4841269841269841
|
| 26 |
+
Macro Precision = 0.47115384615384615
|
| 27 |
+
Macro F1 = 0.4599018003273323
|
| 28 |
+
NOUN
|
| 29 |
+
Accuracy = 0.4980544747081712
|
| 30 |
+
Weighted Recall = 0.4980544747081712
|
| 31 |
+
Weighted Precision = 0.4848703726466867
|
| 32 |
+
Weighted F1 = 0.423832547104849
|
| 33 |
+
Macro Recall = 0.4924810322111671
|
| 34 |
+
Macro Precision = 0.48457739260087596
|
| 35 |
+
Macro F1 = 0.4205337947669236
|
| 36 |
+
VERB
|
| 37 |
+
Accuracy = 0.45955882352941174
|
| 38 |
+
Weighted Recall = 0.45955882352941174
|
| 39 |
+
Weighted Precision = 0.5359771380303533
|
| 40 |
+
Weighted F1 = 0.3822956863210408
|
| 41 |
+
Macro Recall = 0.511138682106424
|
| 42 |
+
Macro Precision = 0.5237758945386064
|
| 43 |
+
Macro F1 = 0.40696794862287355
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "do_lower_case": false, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "/home/hh2/.cache/huggingface/transformers/7766c86e10505ed9b39af34e456480399bf06e35b36b8f2b917460a2dbe94e59.a984cf52fc87644bd4a2165f1e07e0ac880272c1e82d648b4674907056912bd7", "name_or_path": "xlm-roberta-large", "tokenizer_class": "XLMRobertaTokenizer"}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e44e7277c7f66328447e9d910bc1272d0d5c4bff2f7bf81bdd0ed7f9ffa94f5c
|
| 3 |
+
size 2811
|