| #!/bin/bash |
| |
|
|
| cd /workspace/ftt/codellama-migration |
|
|
| BASE_MODEL="/workspace/ftt/codellama-migration/models/base-models/CodeLlama-7B-Instruct" |
| DATASET="datasets/processed/split/train.jsonl" |
| OUTPUT_DIR="training-outputs/codellama-fifo-v1" |
|
|
| echo "======================================================================" |
| echo "🚀 Starting CodeLlama Fine-Tuning" |
| echo "======================================================================" |
| echo "Base Model: $BASE_MODEL" |
| echo "Dataset: $DATASET" |
| echo "Output: $OUTPUT_DIR" |
| echo "======================================================================" |
|
|
| |
| mkdir -p "$OUTPUT_DIR" |
|
|
| |
| if [ -f /venv/main/bin/activate ]; then |
| source /venv/main/bin/activate |
| fi |
|
|
| |
| python3 scripts/training/finetune_codellama.py \ |
| --base-model "$BASE_MODEL" \ |
| --dataset "$DATASET" \ |
| --output-dir "$OUTPUT_DIR" \ |
| --resume-from-checkpoint auto \ |
| --max-length 1536 \ |
| --num-epochs 5 \ |
| --batch-size 2 \ |
| --gradient-accumulation 4 \ |
| --learning-rate 2e-5 \ |
| --lora-r 48 \ |
| --lora-alpha 96 \ |
| --lora-dropout 0.15 \ |
| --warmup-ratio 0.1 \ |
| --eval-steps 25 \ |
| --save-steps 25 \ |
| --early-stopping-patience 5 \ |
| --logging-steps 5 |
|
|
| echo "" |
| echo "======================================================================" |
| echo "✅ Training Complete!" |
| echo "======================================================================" |
|
|
|
|