rubentito commited on
Commit
cba5e4d
·
1 Parent(s): b9097fb

Upload 10 files

Browse files
README.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ this is a BERT model trained for QA.
2
+ We use the pretrained model named bert-large-uncased-whole-word-masking-finetuned-squad from https://huggingface.co/transformers/pretrained_models.html and further fine tune it on the train split of DocVQA.
3
+
4
+ This one is the best performing BERT model based on our experiments, reported in our paper https://arxiv.org/abs/2007.00398
5
+ i.e. the model listed last in Table 3 in the paper, which yields an ANLS score of 0.655 on val and 0.665 on test
6
+
7
+
8
+ For the predictions on val and test splits using this model, see docvqa_eval_results folder
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForQuestionAnswering"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 1024,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 16,
15
+ "num_hidden_layers": 24,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 30522
19
+ }
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"doc_stride": 128, "max_query_length": 64, "n_best_size": 20, "max_answer_length": 50, "null_score_diff_threshold": 0.0, "adam_epsilon": 1e-08, "best_model_dir": "./models/", "cache_dir": "cache_dir/", "config": {}, "do_lower_case": true, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "correct", "early_stopping_metric_minimize": false, "early_stopping_patience": 3, "encoding": null, "eval_batch_size": 64, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 2000, "evaluate_during_training_verbose": false, "fp16": false, "fp16_opt_level": "O1", "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 50, "manual_seed": null, "max_grad_norm": 1.0, "max_seq_length": 384, "multiprocessing_chunksize": 500, "n_gpu": 4, "no_cache": false, "no_save": false, "num_train_epochs": 6, "output_dir": "./output/", "overwrite_output_dir": false, "process_count": 38, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": false, "save_steps": 2000, "save_optimizer_and_scheduler": true, "silent": false, "tensorboard_dir": null, "train_batch_size": 8, "use_cached_eval_features": true, "use_early_stopping": false, "use_multiprocessing": true, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.06, "warmup_steps": 2232, "weight_decay": 0, "model_name": "bert-large-uncased-whole-word-masking-finetuned-squad", "model_type": "bert"}
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f79453a3bd6ed95b538970d050dd9933c82eda656af61edee5c1629696f57fd
3
+ size 2681269699
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19c251308a6df535a81326cbb5bef4bd9a28902bef73458f9cdbe66e959b8990
3
+ size 1340675298
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1f40bd2002c7bc5b6d1552582ddbf9c57bd3d3772be3fa501c2305ae44cdb41
3
+ size 326
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "model_max_length": 512, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7663f99c453fca38d6cddb50fcac5f6974475b926ed40662fa5f5a37e9a18539
3
+ size 1950
vocab.txt ADDED
The diff for this file is too large to render. See raw diff