asahi417 commited on
Commit
a606e16
·
1 Parent(s): 42fdabb

commit files to HF hub

Browse files
README.md CHANGED
@@ -31,25 +31,25 @@ model-index:
31
  metrics:
32
  - name: BLEU4 (Question Answering)
33
  type: bleu4_question_answering
34
- value: 10.81
35
  - name: ROUGE-L (Question Answering)
36
  type: rouge_l_question_answering
37
- value: 25.75
38
  - name: METEOR (Question Answering)
39
  type: meteor_question_answering
40
- value: 20.96
41
  - name: BERTScore (Question Answering)
42
  type: bertscore_question_answering
43
- value: 87.27
44
  - name: MoverScore (Question Answering)
45
  type: moverscore_question_answering
46
- value: 67.79
47
  - name: AnswerF1Score (Question Answering)
48
  type: answer_f1_score__question_answering
49
- value: 39.24
50
  - name: AnswerExactMatch (Question Answering)
51
  type: answer_exact_match_question_answering
52
- value: 22.43
53
  ---
54
 
55
  # Model Card of `lmqg/mt5-small-frquad-qa`
@@ -93,16 +93,16 @@ output = pipe("question: En quelle année a-t-on trouvé trace d'un haut fournea
93
 
94
  | | Score | Type | Dataset |
95
  |:-----------------|--------:|:--------|:-----------------------------------------------------------------|
96
- | AnswerExactMatch | 22.43 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
97
- | AnswerF1Score | 39.24 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
98
- | BERTScore | 87.27 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
99
- | Bleu_1 | 18.63 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
100
- | Bleu_2 | 15.15 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
101
- | Bleu_3 | 12.76 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
102
- | Bleu_4 | 10.81 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
103
- | METEOR | 20.96 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
104
- | MoverScore | 67.79 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
105
- | ROUGE_L | 25.75 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
106
 
107
 
108
 
@@ -117,9 +117,9 @@ The following hyperparameters were used during fine-tuning:
117
  - model: google/mt5-small
118
  - max_length: 512
119
  - max_length_output: 32
120
- - epoch: 20
121
  - batch: 16
122
- - lr: 0.0005
123
  - fp16: False
124
  - random_seed: 1
125
  - gradient_accumulation_steps: 4
 
31
  metrics:
32
  - name: BLEU4 (Question Answering)
33
  type: bleu4_question_answering
34
+ value: 19.59
35
  - name: ROUGE-L (Question Answering)
36
  type: rouge_l_question_answering
37
+ value: 32.35
38
  - name: METEOR (Question Answering)
39
  type: meteor_question_answering
40
+ value: 26.26
41
  - name: BERTScore (Question Answering)
42
  type: bertscore_question_answering
43
+ value: 90.06
44
  - name: MoverScore (Question Answering)
45
  type: moverscore_question_answering
46
+ value: 73.13
47
  - name: AnswerF1Score (Question Answering)
48
  type: answer_f1_score__question_answering
49
+ value: 49.99
50
  - name: AnswerExactMatch (Question Answering)
51
  type: answer_exact_match_question_answering
52
+ value: 30.87
53
  ---
54
 
55
  # Model Card of `lmqg/mt5-small-frquad-qa`
 
93
 
94
  | | Score | Type | Dataset |
95
  |:-----------------|--------:|:--------|:-----------------------------------------------------------------|
96
+ | AnswerExactMatch | 30.87 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
97
+ | AnswerF1Score | 49.99 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
98
+ | BERTScore | 90.06 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
99
+ | Bleu_1 | 29.59 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
100
+ | Bleu_2 | 25.27 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
101
+ | Bleu_3 | 22.2 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
102
+ | Bleu_4 | 19.59 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
103
+ | METEOR | 26.26 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
104
+ | MoverScore | 73.13 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
105
+ | ROUGE_L | 32.35 | default | [lmqg/qg_frquad](https://huggingface.co/datasets/lmqg/qg_frquad) |
106
 
107
 
108
 
 
117
  - model: google/mt5-small
118
  - max_length: 512
119
  - max_length_output: 32
120
+ - epoch: 23
121
  - batch: 16
122
+ - lr: 0.0004
123
  - fp16: False
124
  - random_seed: 1
125
  - gradient_accumulation_steps: 4
eval/metric.first.answer.paragraph_question.answer.lmqg_qg_frquad.default.json CHANGED
@@ -1 +1 @@
1
- {"validation": {"Bleu_1": 0.19846395531505504, "Bleu_2": 0.16438125648808355, "Bleu_3": 0.14011480904266574, "Bleu_4": 0.12095514167776761, "AnswerF1Score": 37.86047708898501, "AnswerExactMatch": 16.499372647427855, "METEOR": 0.2064317193398918, "ROUGE_L": 0.25360764971725297, "BERTScore": 0.8696774432956799, "MoverScore": 0.6634372689334539}, "test": {"Bleu_1": 0.18625190128059535, "Bleu_2": 0.15147379408861977, "Bleu_3": 0.12758082981388957, "Bleu_4": 0.10805168198813628, "AnswerF1Score": 39.237140683394436, "AnswerExactMatch": 22.427854454203263, "METEOR": 0.20958433174855287, "ROUGE_L": 0.2575338552201397, "BERTScore": 0.8727350258729988, "MoverScore": 0.6778583344478704}}
 
1
+ {"validation": {"Bleu_1": 0.3024003959415799, "Bleu_2": 0.2611753109951979, "Bleu_3": 0.23023829738600474, "Bleu_4": 0.20343876542656023, "AnswerF1Score": 46.800761536794, "AnswerExactMatch": 24.466750313676286, "METEOR": 0.2418757178999092, "ROUGE_L": 0.3115869277992877, "BERTScore": 0.8965151410741519, "MoverScore": 0.7109847704900778}, "test": {"Bleu_1": 0.29585025118709685, "Bleu_2": 0.2527355868028149, "Bleu_3": 0.22196077120887636, "Bleu_4": 0.195860575399812, "AnswerF1Score": 49.98649484387896, "AnswerExactMatch": 30.865746549560853, "METEOR": 0.2626112128036875, "ROUGE_L": 0.32345976744028926, "BERTScore": 0.9006157361129895, "MoverScore": 0.7313411013774388}}
eval/samples.test.hyp.paragraph_question.answer.lmqg_qg_frquad.default.txt CHANGED
The diff for this file is too large to render. See raw diff
 
eval/samples.validation.hyp.paragraph_question.answer.lmqg_qg_frquad.default.txt CHANGED
The diff for this file is too large to render. See raw diff