model
dict | lora
dict | training
dict | data
dict |
|---|---|---|---|
{
"base_model": "meta-llama/Llama-3.2-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0001,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "meta-llama/Llama-3.2-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.00001,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "meta-llama/Llama-3.2-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0002,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "meta-llama/Llama-3.2-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0003,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "meta-llama/Llama-3.2-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.00009,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0001,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0002,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0003,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 32,
"lora_alpha": 64,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0001,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 32,
"lora_alpha": 64,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0002,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 32,
"lora_alpha": 64,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0003,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 32,
"lora_alpha": 64,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.00009,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0001,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0002,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0003,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.00009,
"num_train_epochs": 2,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1E.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0001,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1I.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0002,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1I.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0003,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1I.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
{
"base_model": "Qwen/Qwen2.5-Coder-3B",
"max_seq_length": 7168,
"packing": false
}
|
{
"use_lora": true,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": "all-linear"
}
|
{
"learning_rate": 0.0004,
"num_train_epochs": 2,
"per_device_train_batch_size": 1,
"gradient_accumulation_steps": 2,
"warmup_ratio": 0.03,
"lr_scheduler_type": "SchedulerType.COSINE",
"weight_decay": 0
}
|
{
"data_file": "data_index_03/grammar_from_index_03_threshold_7000_training_D1I.json",
"max_samples": 20000,
"num_samples_used": 20000
}
|
No dataset card yet
- Downloads last month
- 1