Model_name
stringclasses 7
values | Train_size
int64 50.8k
50.8k
| Test_size
int64 12.7k
12.7k
| arg
dict | lora
listlengths 1
1
| Parameters
int64 596M
1.72B
| Trainable_parameters
int64 596M
1.72B
| r
int64 -1
-1
| Memory Allocation
stringclasses 7
values | Training Time
stringclasses 7
values | accuracy
float64 0.1
0.91
| f1_macro
float64 0.04
0.9
| f1_weighted
float64 0.04
0.91
| precision
float64 0.06
0.91
| recall
float64 0.08
0.9
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Qwen/Qwen3-1.7B
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 1,720,054,784
| 1,720,054,784
| -1
|
14664.81
|
1740.0
| 0.90792
| 0.90417
| 0.908134
| 0.905682
| 0.902969
|
facebook/opt-1.3b
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 1,315,770,368
| 1,315,770,368
| -1
|
9698.53
|
1412.91
| 0.906576
| 0.902522
| 0.906749
| 0.903631
| 0.901667
|
tiiuae/Falcon3-1B-Instruct
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 1,400,999,936
| 1,400,999,936
| -1
|
11671.36
|
1322.97
| 0.905074
| 0.901303
| 0.905248
| 0.902047
| 0.900759
|
deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 1,543,318,016
| 1,543,318,016
| -1
|
13176.17
|
1538.58
| 0.902387
| 0.89851
| 0.902619
| 0.899403
| 0.897965
|
Alibaba-NLP/E2Rank-0.6B
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 595,789,824
| 595,789,824
| -1
|
6143.03
|
1061.17
| 0.900727
| 0.896198
| 0.900992
| 0.897435
| 0.895297
|
google/mt5-large
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 974,517,261
| 974,517,261
| -1
|
10495.81
|
1844.35
| 0.096111
| 0.03691
| 0.044619
| 0.055458
| 0.079446
|
EleutherAI/gpt-neo-1.3B
| 50,775
| 12,652
|
{
"adafactor": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-8,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 4,
"half_precision_backend": "auto",
"label_smoothing_factor": 0,
"learning_rate": 0.00002,
"lr_scheduler_type": "linear",
"max_grad_norm": 1,
"max_steps": -1,
"n_gpu": 1,
"num_train_epochs": 1,
"optim": "adamw_8bit",
"optim_args": "Not have",
"per_device_eval_batch_size": 8,
"per_device_train_batch_size": 8,
"warmup_ratio": 0,
"warmup_steps": 5,
"weight_decay": 0.01
}
|
[
"Not used"
] | 1,315,602,432
| 1,315,602,432
| -1
|
10908.61
|
1515.02
| 0.897408
| 0.893729
| 0.897698
| 0.894574
| 0.893203
|
README.md exists but content is empty.
- Downloads last month
- 3