Upload 2 files
Browse files- dataset_info.json +22 -0
- llama3_guardrail_lora_sft.yaml +37 -0
dataset_info.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train_sample": {
|
| 3 |
+
"file_name": "train_sample.json",
|
| 4 |
+
"columns": {
|
| 5 |
+
"prompt": "instruction",
|
| 6 |
+
"query": "input",
|
| 7 |
+
"response": "output",
|
| 8 |
+
"system": "system",
|
| 9 |
+
"history": "history"
|
| 10 |
+
}
|
| 11 |
+
},
|
| 12 |
+
"test_sample": {
|
| 13 |
+
"file_name": "train_sample.json",
|
| 14 |
+
"columns": {
|
| 15 |
+
"prompt": "instruction",
|
| 16 |
+
"query": "input",
|
| 17 |
+
"response": "output",
|
| 18 |
+
"system": "system",
|
| 19 |
+
"history": "history"
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
}
|
llama3_guardrail_lora_sft.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### examples/train_lora/llama3_lora_sft.yaml
|
| 2 |
+
model_name_or_path: "meta-llama/Llama-Guard-3-1B"
|
| 3 |
+
|
| 4 |
+
stage: sft
|
| 5 |
+
do_train: true
|
| 6 |
+
do_eval: true
|
| 7 |
+
finetuning_type: lora
|
| 8 |
+
lora_target: all
|
| 9 |
+
|
| 10 |
+
dataset: train_sample
|
| 11 |
+
eval_dataset: test_sample
|
| 12 |
+
dataset_dir: ./data
|
| 13 |
+
template: llama3
|
| 14 |
+
cutoff_len: 4096
|
| 15 |
+
max_samples: 1000
|
| 16 |
+
overwrite_cache: true
|
| 17 |
+
preprocessing_num_workers: 64
|
| 18 |
+
|
| 19 |
+
output_dir: ./saves/llama3-1b/lora/sft
|
| 20 |
+
logging_steps: 1
|
| 21 |
+
save_steps: 10
|
| 22 |
+
plot_loss: true
|
| 23 |
+
overwrite_output_dir: true
|
| 24 |
+
|
| 25 |
+
per_device_train_batch_size: 4
|
| 26 |
+
gradient_accumulation_steps: 8
|
| 27 |
+
learning_rate: 1.0e-4
|
| 28 |
+
num_train_epochs: 3.0
|
| 29 |
+
lr_scheduler_type: cosine
|
| 30 |
+
warmup_ratio: 0.1
|
| 31 |
+
bf16: true
|
| 32 |
+
ddp_timeout: 180000000
|
| 33 |
+
|
| 34 |
+
# val_size: 0.1
|
| 35 |
+
per_device_eval_batch_size: 16
|
| 36 |
+
eval_strategy: steps
|
| 37 |
+
eval_steps: 1
|