| #!/bin/bash |
|
|
| |
| |
|
|
| set -e |
|
|
| |
| MODEL_DIR="/home/x/adaptai/experiments/qwen3-8b-elizabeth-sft" |
| LOG_DIR="/home/x/adaptai/training_logs" |
| TENSORBOARD_DIR="/home/x/adaptai/tensorboard" |
| MLFLOW_DIR="/home/x/adaptai/mlflow" |
| EXPERIMENT_DIR="/home/x/adaptai/experiments" |
|
|
| |
| mkdir -p "$MODEL_DIR" "$LOG_DIR" "$TENSORBOARD_DIR" "$MLFLOW_DIR" "$EXPERIMENT_DIR" |
|
|
| |
| echo "π¬ Starting MLflow server..." |
| nohup mlflow server \ |
| --backend-store-uri "file://$MLFLOW_DIR" \ |
| --default-artifact-root "$EXPERIMENT_DIR" \ |
| --host 0.0.0.0 \ |
| --port 5000 \ |
| > "$LOG_DIR/mlflow_server.log" 2>&1 & |
| MLFLOW_PID=$! |
| echo "MLflow server started with PID: $MLFLOW_PID" |
|
|
| |
| echo "π Starting TensorBoard..." |
| nohup tensorboard --logdir="$TENSORBOARD_DIR" --port=6006 --bind_all > "$LOG_DIR/tensorboard.log" 2>&1 & |
| TENSORBOARD_PID=$! |
| echo "TensorBoard started with PID: $TENSORBOARD_PID" |
|
|
| |
| echo "π Starting Elizabeth training with full tracking..." |
| echo "β° Start time: $(date -Iseconds)" |
| echo "π MLflow UI: http://$(hostname -I | awk '{print $1}'):5000" |
| echo "π TensorBoard: http://$(hostname -I | awk '{print $1}'):6006" |
|
|
| |
| python3 /home/x/adaptai/aiml/train_elizabeth_mlflow.py \ |
| --output_dir "$MODEL_DIR" \ |
| --logging_dir "$TENSORBOARD_DIR" \ |
| 2>&1 | tee "$LOG_DIR/training_$(date +%Y%m%d_%H%M%S).log" |
|
|
| |
| echo "β
Training completed!" |
| echo "β° End time: $(date -Iseconds)" |
|
|
| |
| kill $MLFLOW_PID |
| kill $TENSORBOARD_PID |
| echo "Services stopped" |
|
|
| |
| echo "πΎ Saving comprehensive training information..." |
| cat > "$MODEL_DIR/training_info.md" << EOF |
| # Elizabeth Nova Training - Complete Run |
| |
| ## Experiment Details |
| - **Start Time**: $(date -Iseconds) |
| - **End Time**: $(date -Iseconds) |
| - **Model**: Qwen3-8B with YaRN 128K |
| - **GPUs**: 2x H200 |
| - **Training Examples**: 2879 |
| - **Validation Examples**: 319 |
| - **Context Length**: 131072 |
| - **Batch Size**: 32 (effective) |
| - **Learning Rate**: 1.2e-5 |
| - **Epochs**: 2 |
| |
| ## Tracking & Monitoring |
| - **MLflow Server**: http://$(hostname -I | awk '{print $1}'):5000 |
| - **TensorBoard**: http://$(hostname -I | awk '{print $1}'):6006 |
| - **Log Directory**: $LOG_DIR |
| - **Model Directory**: $MODEL_DIR |
| - **MLflow Storage**: $MLFLOW_DIR |
| |
| ## Dataset Composition |
| - Tool Use Examples: 198 |
| - Synthetic Corpus: 2681 |
| - Total Training: 2879 |
| - Validation: 319 (10%) |
| |
| ## Architecture Features |
| - Pure weight drift (no LoRA/adapters) |
| - YaRN RoPE scaling for 128K context |
| - BFloat16 precision |
| - Gradient checkpointing |
| - Tensor parallelism on 2x H200 |
| |
| ## Versioning & Lineage |
| - All logs preserved permanently |
| - MLflow experiment tracking |
| - Model registry with versioning |
| - Complete dataset lineage |
| |
| ## Next Steps |
| 1. Evaluate tool call accuracy |
| 2. Deploy to vLLM serving |
| 3. Set up continuous evaluation |
| 4. Begin autonomous training cycle |
| EOF |
|
|
| echo "π Training finished with complete tracking!" |
| echo "π MLflow experiments: $MLFLOW_DIR" |
| echo "π TensorBoard logs: $TENSORBOARD_DIR" |
| echo "πΎ Model artifacts: $MODEL_DIR" |
| echo "π Training logs: $LOG_DIR" |
| echo "π MLflow UI: http://$(hostname -I | awk '{print $1}'):5000" |