| { | |
| "model_name_or_path": "huggyllama/llama-7b", | |
| "recompute_baseline": false, | |
| "cache_dir": "/home/panda/pda-llm/cache/sft-2000", | |
| "max_length": 1024, | |
| "trust_remote_code": true, | |
| "train_datasets": [ | |
| [ | |
| "SAFE-ALPACA/2000", | |
| { | |
| "proportion": 1.0 | |
| } | |
| ] | |
| ], | |
| "eval_datasets": null, | |
| "safety_ratio_tol": 50.0, | |
| "safe_sft": true, | |
| "resilient_coeff": 1.0, | |
| "epochs": 6, | |
| "per_device_train_batch_size": 4, | |
| "per_device_eval_batch_size": 4, | |
| "gradient_accumulation_steps": 32, | |
| "gradient_checkpointing": true, | |
| "lr": 0.0001, | |
| "lr_scheduler_type": "cosine", | |
| "lr_warmup_ratio": 0.03, | |
| "weight_decay": 0.0, | |
| "seed": 42, | |
| "fp16": true, | |
| "bf16": false, | |
| "tf32": true, | |
| "lora_r": 4, | |
| "lora_alpha": 16, | |
| "lora_dropout": 0.05, | |
| "eval_strategy": "epoch", | |
| "eval_interval": 1000000, | |
| "need_eval": true, | |
| "eval_split_ratio": null, | |
| "output_dir": "/home/panda/pda-llm/output/sft-safe/2000/run-true-2000-1-50", | |
| "log_type": "wandb", | |
| "log_dir": "/home/panda/pda-llm/output/sft-safe/2000/run-true-2000-1-50", | |
| "log_project": "SAFE-SFT-v3", | |
| "log_run_name": "safe-sft-2025-05-01-23-41-21", | |
| "save_16bit": false, | |
| "save_interval": 1000000, | |
| "local_rank": 0, | |
| "zero_stage": 0, | |
| "offload": "none", | |
| "deepspeed": false, | |
| "deepspeed_config": null, | |
| "deepscale": false, | |
| "deepscale_config": null, | |
| "global_rank": 0, | |
| "device": { | |
| "type": "torch.device", | |
| "repr": "device(type='cuda', index=0)" | |
| }, | |
| "num_update_steps_per_epoch": 86, | |
| "total_training_steps": 516 | |
| } |