DanSarm commited on
Commit
d2f3d85
·
verified ·
1 Parent(s): 8e0bc3c

Fine-tuned Construction Receipt Model

Browse files
Files changed (4) hide show
  1. README.md +12 -12
  2. config.json +5 -5
  3. model.safetensors +2 -2
  4. tokenizer_config.json +2 -2
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [DanSarm/receipt-core-model](https://huggingface.co/DanSarm/receipt-core-model) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.2568
20
 
21
  ## Model description
22
 
@@ -50,21 +50,21 @@ The following hyperparameters were used during training:
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
- | 2.911 | 1.0 | 2 | 2.5178 |
54
- | 2.0298 | 2.0 | 4 | 2.0128 |
55
- | 1.7239 | 3.0 | 6 | 1.7704 |
56
- | 1.5708 | 4.0 | 8 | 1.6107 |
57
- | 1.4319 | 5.0 | 10 | 1.5010 |
58
- | 1.3463 | 6.0 | 12 | 1.4165 |
59
- | 1.262 | 7.0 | 14 | 1.3495 |
60
- | 1.2137 | 8.0 | 16 | 1.3015 |
61
- | 1.1761 | 9.0 | 18 | 1.2705 |
62
- | 1.1369 | 10.0 | 20 | 1.2568 |
63
 
64
 
65
  ### Framework versions
66
 
67
  - Transformers 4.48.1
68
- - Pytorch 2.5.1+cu124
69
  - Datasets 3.2.0
70
  - Tokenizers 0.21.0
 
16
 
17
  This model is a fine-tuned version of [DanSarm/receipt-core-model](https://huggingface.co/DanSarm/receipt-core-model) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.7435
20
 
21
  ## Model description
22
 
 
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
+ | 4.0171 | 1.0 | 2 | 2.5465 |
54
+ | 2.1463 | 2.0 | 4 | 1.9613 |
55
+ | 1.6495 | 3.0 | 6 | 1.5790 |
56
+ | 1.317 | 4.0 | 8 | 1.2996 |
57
+ | 1.0697 | 5.0 | 10 | 1.0985 |
58
+ | 0.9027 | 6.0 | 12 | 0.9597 |
59
+ | 0.7894 | 7.0 | 14 | 0.8566 |
60
+ | 0.7064 | 8.0 | 16 | 0.7940 |
61
+ | 0.6629 | 9.0 | 18 | 0.7589 |
62
+ | 0.629 | 10.0 | 20 | 0.7435 |
63
 
64
 
65
  ### Framework versions
66
 
67
  - Transformers 4.48.1
68
+ - Pytorch 2.6.0+cu124
69
  - Datasets 3.2.0
70
  - Tokenizers 0.21.0
config.json CHANGED
@@ -4,9 +4,9 @@
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
- "d_ff": 2048,
8
  "d_kv": 64,
9
- "d_model": 512,
10
  "decoder_start_token_id": 0,
11
  "dense_act_fn": "relu",
12
  "dropout_rate": 0.1,
@@ -18,9 +18,9 @@
18
  "layer_norm_epsilon": 1e-06,
19
  "model_type": "t5",
20
  "n_positions": 512,
21
- "num_decoder_layers": 6,
22
- "num_heads": 8,
23
- "num_layers": 6,
24
  "output_past": true,
25
  "pad_token_id": 0,
26
  "relative_attention_max_distance": 128,
 
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
+ "d_ff": 3072,
8
  "d_kv": 64,
9
+ "d_model": 768,
10
  "decoder_start_token_id": 0,
11
  "dense_act_fn": "relu",
12
  "dropout_rate": 0.1,
 
18
  "layer_norm_epsilon": 1e-06,
19
  "model_type": "t5",
20
  "n_positions": 512,
21
+ "num_decoder_layers": 12,
22
+ "num_heads": 12,
23
+ "num_layers": 12,
24
  "output_past": true,
25
  "pad_token_id": 0,
26
  "relative_attention_max_distance": 128,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a419ec4234254612766ea7d422cebfc2c63ebe3c27896ee858ecacfe8ab7aca
3
- size 242041896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd619dc300dd887b3caffd241d334862f7c2da82b80bda3a02660d6877fac7d5
3
+ size 891644712
tokenizer_config.json CHANGED
@@ -928,12 +928,12 @@
928
  "<extra_id_98>",
929
  "<extra_id_99>"
930
  ],
931
- "clean_up_tokenization_spaces": true,
932
  "eos_token": "</s>",
933
  "extra_ids": 100,
934
  "extra_special_tokens": {},
935
  "max_length": 128,
936
- "model_max_length": 512,
937
  "pad_to_multiple_of": null,
938
  "pad_token": "<pad>",
939
  "pad_token_type_id": 0,
 
928
  "<extra_id_98>",
929
  "<extra_id_99>"
930
  ],
931
+ "clean_up_tokenization_spaces": false,
932
  "eos_token": "</s>",
933
  "extra_ids": 100,
934
  "extra_special_tokens": {},
935
  "max_length": 128,
936
+ "model_max_length": 1000000000000000019884624838656,
937
  "pad_to_multiple_of": null,
938
  "pad_token": "<pad>",
939
  "pad_token_type_id": 0,