poojadamavarapu123 commited on
Commit
b8df96a
·
verified ·
1 Parent(s): 6104e4a

End of training

Browse files
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: microsoft/layoutlm-base-uncased
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: layoutlm-funsd
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # layoutlm-funsd
16
+
17
+ This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.6945
20
+ - Answer: {'precision': 0.7029379760609358, 'recall': 0.7985166872682324, 'f1': 0.7476851851851851, 'number': 809}
21
+ - Header: {'precision': 0.3656716417910448, 'recall': 0.4117647058823529, 'f1': 0.3873517786561265, 'number': 119}
22
+ - Question: {'precision': 0.7918552036199095, 'recall': 0.8215962441314554, 'f1': 0.8064516129032258, 'number': 1065}
23
+ - Overall Precision: 0.7275
24
+ - Overall Recall: 0.7878
25
+ - Overall F1: 0.7564
26
+ - Overall Accuracy: 0.8052
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 3e-05
46
+ - train_batch_size: 16
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 15
52
+ - mixed_precision_training: Native AMP
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Answer | Header | Question | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy |
57
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:|
58
+ | 1.8328 | 1.0 | 10 | 1.6269 | {'precision': 0.012106537530266344, 'recall': 0.012360939431396786, 'f1': 0.012232415902140673, 'number': 809} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} | {'precision': 0.2, 'recall': 0.15492957746478872, 'f1': 0.17460317460317462, 'number': 1065} | 0.1060 | 0.0878 | 0.0960 | 0.3555 |
59
+ | 1.4745 | 2.0 | 20 | 1.2810 | {'precision': 0.1425339366515837, 'recall': 0.1557478368355995, 'f1': 0.1488481984642646, 'number': 809} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} | {'precision': 0.44803982576228996, 'recall': 0.676056338028169, 'f1': 0.5389221556886227, 'number': 1065} | 0.3394 | 0.4245 | 0.3772 | 0.5679 |
60
+ | 1.0973 | 3.0 | 30 | 0.9149 | {'precision': 0.48193916349809884, 'recall': 0.6266996291718171, 'f1': 0.5448683503492746, 'number': 809} | {'precision': 0.06521739130434782, 'recall': 0.025210084033613446, 'f1': 0.03636363636363636, 'number': 119} | {'precision': 0.5712250712250713, 'recall': 0.7530516431924883, 'f1': 0.6496557310652086, 'number': 1065} | 0.5244 | 0.6583 | 0.5838 | 0.7085 |
61
+ | 0.8318 | 4.0 | 40 | 0.7626 | {'precision': 0.5760151085930123, 'recall': 0.754017305315204, 'f1': 0.6531049250535332, 'number': 809} | {'precision': 0.21621621621621623, 'recall': 0.13445378151260504, 'f1': 0.16580310880829016, 'number': 119} | {'precision': 0.6803418803418804, 'recall': 0.7474178403755869, 'f1': 0.7123042505592841, 'number': 1065} | 0.6175 | 0.7135 | 0.6620 | 0.7648 |
62
+ | 0.6741 | 5.0 | 50 | 0.7031 | {'precision': 0.630457933972311, 'recall': 0.7317676143386898, 'f1': 0.6773455377574371, 'number': 809} | {'precision': 0.29473684210526313, 'recall': 0.23529411764705882, 'f1': 0.2616822429906542, 'number': 119} | {'precision': 0.6975060337892196, 'recall': 0.8140845070422535, 'f1': 0.7512998266897747, 'number': 1065} | 0.6531 | 0.7461 | 0.6965 | 0.7841 |
63
+ | 0.5677 | 6.0 | 60 | 0.6814 | {'precision': 0.6348884381338742, 'recall': 0.7737948084054388, 'f1': 0.6974930362116991, 'number': 809} | {'precision': 0.3125, 'recall': 0.21008403361344538, 'f1': 0.25125628140703515, 'number': 119} | {'precision': 0.7495479204339964, 'recall': 0.7784037558685446, 'f1': 0.7637033625057578, 'number': 1065} | 0.6814 | 0.7426 | 0.7107 | 0.7808 |
64
+ | 0.4939 | 7.0 | 70 | 0.6524 | {'precision': 0.6795698924731183, 'recall': 0.7812113720642769, 'f1': 0.7268545140885566, 'number': 809} | {'precision': 0.3148148148148148, 'recall': 0.2857142857142857, 'f1': 0.29955947136563876, 'number': 119} | {'precision': 0.76657824933687, 'recall': 0.8140845070422535, 'f1': 0.7896174863387978, 'number': 1065} | 0.7068 | 0.7692 | 0.7367 | 0.7968 |
65
+ | 0.4355 | 8.0 | 80 | 0.6496 | {'precision': 0.6659707724425887, 'recall': 0.788627935723115, 'f1': 0.7221279003961517, 'number': 809} | {'precision': 0.3055555555555556, 'recall': 0.2773109243697479, 'f1': 0.2907488986784141, 'number': 119} | {'precision': 0.7633851468048359, 'recall': 0.8300469483568075, 'f1': 0.7953216374269007, 'number': 1065} | 0.6992 | 0.7802 | 0.7375 | 0.8021 |
66
+ | 0.3922 | 9.0 | 90 | 0.6662 | {'precision': 0.6965442764578834, 'recall': 0.7972805933250927, 'f1': 0.7435158501440923, 'number': 809} | {'precision': 0.3170731707317073, 'recall': 0.3277310924369748, 'f1': 0.32231404958677684, 'number': 119} | {'precision': 0.7817028985507246, 'recall': 0.8103286384976526, 'f1': 0.7957584140156754, 'number': 1065} | 0.7185 | 0.7762 | 0.7463 | 0.8034 |
67
+ | 0.3788 | 10.0 | 100 | 0.6630 | {'precision': 0.7004357298474946, 'recall': 0.7948084054388134, 'f1': 0.7446438911407064, 'number': 809} | {'precision': 0.36283185840707965, 'recall': 0.3445378151260504, 'f1': 0.35344827586206895, 'number': 119} | {'precision': 0.7727674624226348, 'recall': 0.8206572769953052, 'f1': 0.795992714025501, 'number': 1065} | 0.7206 | 0.7817 | 0.7499 | 0.8053 |
68
+ | 0.3263 | 11.0 | 110 | 0.6684 | {'precision': 0.6940540540540541, 'recall': 0.7935723114956736, 'f1': 0.740484429065744, 'number': 809} | {'precision': 0.3333333333333333, 'recall': 0.35294117647058826, 'f1': 0.34285714285714286, 'number': 119} | {'precision': 0.7744755244755245, 'recall': 0.831924882629108, 'f1': 0.8021729289271163, 'number': 1065} | 0.7153 | 0.7878 | 0.7498 | 0.8053 |
69
+ | 0.3098 | 12.0 | 120 | 0.6795 | {'precision': 0.7033805888767721, 'recall': 0.7972805933250927, 'f1': 0.7473928157589804, 'number': 809} | {'precision': 0.359375, 'recall': 0.3865546218487395, 'f1': 0.3724696356275304, 'number': 119} | {'precision': 0.7887579329102448, 'recall': 0.8169014084507042, 'f1': 0.8025830258302583, 'number': 1065} | 0.7267 | 0.7832 | 0.7539 | 0.8073 |
70
+ | 0.2976 | 13.0 | 130 | 0.6857 | {'precision': 0.6913183279742765, 'recall': 0.7972805933250927, 'f1': 0.74052812858783, 'number': 809} | {'precision': 0.3524590163934426, 'recall': 0.36134453781512604, 'f1': 0.35684647302904565, 'number': 119} | {'precision': 0.7831858407079646, 'recall': 0.8309859154929577, 'f1': 0.806378132118451, 'number': 1065} | 0.7199 | 0.7893 | 0.7530 | 0.8042 |
71
+ | 0.277 | 14.0 | 140 | 0.6918 | {'precision': 0.7022900763358778, 'recall': 0.796044499381953, 'f1': 0.7462340672074159, 'number': 809} | {'precision': 0.36363636363636365, 'recall': 0.40336134453781514, 'f1': 0.38247011952191234, 'number': 119} | {'precision': 0.7848214285714286, 'recall': 0.8253521126760563, 'f1': 0.8045766590389016, 'number': 1065} | 0.7243 | 0.7883 | 0.7549 | 0.8038 |
72
+ | 0.2706 | 15.0 | 150 | 0.6945 | {'precision': 0.7029379760609358, 'recall': 0.7985166872682324, 'f1': 0.7476851851851851, 'number': 809} | {'precision': 0.3656716417910448, 'recall': 0.4117647058823529, 'f1': 0.3873517786561265, 'number': 119} | {'precision': 0.7918552036199095, 'recall': 0.8215962441314554, 'f1': 0.8064516129032258, 'number': 1065} | 0.7275 | 0.7878 | 0.7564 | 0.8052 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.57.1
78
+ - Pytorch 2.9.0+cu126
79
+ - Datasets 4.0.0
80
+ - Tokenizers 0.22.1
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LayoutLMForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "dtype": "float32",
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "O",
12
+ "1": "B-HEADER",
13
+ "2": "I-HEADER",
14
+ "3": "B-QUESTION",
15
+ "4": "I-QUESTION",
16
+ "5": "B-ANSWER",
17
+ "6": "I-ANSWER"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "B-ANSWER": 5,
23
+ "B-HEADER": 1,
24
+ "B-QUESTION": 3,
25
+ "I-ANSWER": 6,
26
+ "I-HEADER": 2,
27
+ "I-QUESTION": 4,
28
+ "O": 0
29
+ },
30
+ "layer_norm_eps": 1e-12,
31
+ "max_2d_position_embeddings": 1024,
32
+ "max_position_embeddings": 512,
33
+ "model_type": "layoutlm",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "output_past": true,
37
+ "pad_token_id": 0,
38
+ "position_embedding_type": "absolute",
39
+ "transformers_version": "4.57.1",
40
+ "type_vocab_size": 2,
41
+ "use_cache": true,
42
+ "vocab_size": 30522
43
+ }
logs/events.out.tfevents.1763995492.14188b08a788.688.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e629bce1e3aa7b9789fc0cbdd237f66b7933f9bb481a4e14d111f4820f26c491
3
+ size 16178
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:557f1b7cc326bb8971d8b2ac726cfc7a9bc70b54219342a9780a709064fff7f8
3
+ size 450558212
preprocessor_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_ocr": true,
3
+ "do_resize": true,
4
+ "image_processor_type": "LayoutLMv2ImageProcessor",
5
+ "ocr_lang": null,
6
+ "processor_class": "LayoutLMv2Processor",
7
+ "resample": 2,
8
+ "size": {
9
+ "height": 224,
10
+ "width": 224
11
+ },
12
+ "tesseract_config": ""
13
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "apply_ocr": false,
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "[CLS]",
48
+ "cls_token_box": [
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0
53
+ ],
54
+ "do_basic_tokenize": true,
55
+ "do_lower_case": true,
56
+ "extra_special_tokens": {},
57
+ "mask_token": "[MASK]",
58
+ "model_max_length": 512,
59
+ "never_split": null,
60
+ "only_label_first_subword": true,
61
+ "pad_token": "[PAD]",
62
+ "pad_token_box": [
63
+ 0,
64
+ 0,
65
+ 0,
66
+ 0
67
+ ],
68
+ "pad_token_label": -100,
69
+ "processor_class": "LayoutLMv2Processor",
70
+ "sep_token": "[SEP]",
71
+ "sep_token_box": [
72
+ 1000,
73
+ 1000,
74
+ 1000,
75
+ 1000
76
+ ],
77
+ "strip_accents": null,
78
+ "tokenize_chinese_chars": true,
79
+ "tokenizer_class": "LayoutLMv2Tokenizer",
80
+ "unk_token": "[UNK]"
81
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d6db32240d5edecae3158a4a8bdf61c83d67933c7630d9aae343a5859c17eb
3
+ size 5841
vocab.txt ADDED
The diff for this file is too large to render. See raw diff