config dict | results dict |
|---|---|
{
"model_name": "22h__cabrita_7b_pt_850000",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.451
},
"oab_mcq": {
"acc": 0.23
},
"enem_mcq": {
"acc": 0.194
},
"bluex_mcq": {
"acc": 0.183
},
"hatebr": {
"acc": 0.5
},
"assin_rte_ptbr": {
"acc": 0.226
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "22h__open-cabrita3b",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.45
},
"oab_mcq": {
"acc": 0.229
},
"enem_mcq": {
"acc": 0.194
},
"bluex_mcq": {
"acc": 0.181
},
"hatebr": {
"acc": 0.5
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "PORTULAN__gervasio-7b-portuguese-ptbr-decoder",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.456
},
"oab_mcq": {
"acc": 0.233
},
"enem_mcq": {
"acc": 0.188
},
"bluex_mcq": {
"acc": 0.198
},
"hatebr": {
"acc": 0.507
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "PORTULAN__gervasio-7b-portuguese-ptpt-decoder",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.454
},
"oab_mcq": {
"acc": 0.232
},
"enem_mcq": {
"acc": 0.198
},
"bluex_mcq": {
"acc": 0.187
},
"hatebr": {
"acc": 0.524
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "PORTULAN__gervasio-8b-portuguese-ptpt-decoder",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.729
},
"oab_mcq": {
"acc": 0.482
},
"enem_mcq": {
"acc": 0.7
},
"bluex_mcq": {
"acc": 0.576
},
"hatebr": {
"acc": 0.801
},
"assin_rte_ptbr": {
"acc": 0.24
},
"faquad_rte_parq": {
"acc": 0.524
}
} |
{
"model_name": "TucanoBR__Tucano-160m",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.464
},
"oab_mcq": {
"acc": 0.229
},
"enem_mcq": {
"acc": 0.193
},
"bluex_mcq": {
"acc": 0.185
},
"hatebr": {
"acc": 0.503
},
"assin_rte_ptbr": {
"acc": 0.455
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "TucanoBR__Tucano-1b1-Instruct",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.45
},
"oab_mcq": {
"acc": 0.269
},
"enem_mcq": {
"acc": 0.199
},
"bluex_mcq": {
"acc": 0.227
},
"hatebr": {
"acc": 0.5
},
"assin_rte_ptbr": {
"acc": 0.52
},
"faquad_rte_parq": {
"acc": 0.479
}
} |
{
"model_name": "TucanoBR__Tucano-1b1",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.45
},
"oab_mcq": {
"acc": 0.215
},
"enem_mcq": {
"acc": 0.194
},
"bluex_mcq": {
"acc": 0.228
},
"hatebr": {
"acc": 0.501
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "TucanoBR__Tucano-2b4-Instruct",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.45
},
"oab_mcq": {
"acc": 0.255
},
"enem_mcq": {
"acc": 0.184
},
"bluex_mcq": {
"acc": 0.231
},
"hatebr": {
"acc": 0.5
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "TucanoBR__Tucano-2b4",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.436
},
"oab_mcq": {
"acc": 0.229
},
"enem_mcq": {
"acc": 0.179
},
"bluex_mcq": {
"acc": 0.19
},
"hatebr": {
"acc": 0.521
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.488
}
} |
{
"model_name": "maritaca-ai__sabia-7b",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.455
},
"oab_mcq": {
"acc": 0.396
},
"enem_mcq": {
"acc": 0.635
},
"bluex_mcq": {
"acc": 0.522
},
"hatebr": {
"acc": 0.621
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.506
}
} |
{
"model_name": "prosodia__ProsodiaT1.7B",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.747
},
"oab_mcq": {
"acc": 0.566
},
"enem_mcq": {
"acc": 0.714
},
"bluex_mcq": {
"acc": 0.593
},
"hatebr": {
"acc": 0.785
},
"assin_rte_ptbr": {
"acc": 0.833
},
"faquad_rte_parq": {
"acc": 0.665
}
} |
{
"model_name": "recogna-nlp__Bode-3.1-8B-Instruct-full",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.72
},
"oab_mcq": {
"acc": 0.478
},
"enem_mcq": {
"acc": 0.698
},
"bluex_mcq": {
"acc": 0.565
},
"hatebr": {
"acc": 0.79
},
"assin_rte_ptbr": {
"acc": 0.236
},
"faquad_rte_parq": {
"acc": 0.538
}
} |
{
"model_name": "recogna-nlp__Phi-Bode",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.491
},
"oab_mcq": {
"acc": 0.235
},
"enem_mcq": {
"acc": 0.202
},
"bluex_mcq": {
"acc": 0.182
},
"hatebr": {
"acc": 0.527
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.447
}
} |
{
"model_name": "recogna-nlp__phi-bode-2-ultraalpaca",
"model_dtype": "float16",
"model_sha": "main"
} | {
"tweetsent": {
"acc": 0.482
},
"oab_mcq": {
"acc": 0.232
},
"enem_mcq": {
"acc": 0.199
},
"bluex_mcq": {
"acc": 0.183
},
"hatebr": {
"acc": 0.506
},
"assin_rte_ptbr": {
"acc": 0.232
},
"faquad_rte_parq": {
"acc": 0.291
}
} |
Those results back the first publication of "Atlas", our portuguese-only language models benchmark, whose purpose is to fullfill as a guidance for understand the SLM/LLM scenario in all portuguese-speaking countries. The results found here are not supposed to either accomplish every model or every benchmarking-tasks possible, although the objective of such thing is, indeed, to provide a complete panorama for our lusophone models.
- Downloads last month
- 24