adaptai / projects /elizabeth /training /start_training_mlflow.sh
ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
#!/bin/bash
# Elizabeth Training Launcher with MLflow
# Complete experiment tracking and versioning
set -e
# Configuration
MODEL_DIR="/home/x/adaptai/experiments/qwen3-8b-elizabeth-sft"
LOG_DIR="/home/x/adaptai/training_logs"
TENSORBOARD_DIR="/home/x/adaptai/tensorboard"
MLFLOW_DIR="/home/x/adaptai/mlflow"
EXPERIMENT_DIR="/home/x/adaptai/experiments"
# Create directories
mkdir -p "$MODEL_DIR" "$LOG_DIR" "$TENSORBOARD_DIR" "$MLFLOW_DIR" "$EXPERIMENT_DIR"
# Start MLflow server in background
echo "πŸ”¬ Starting MLflow server..."
nohup mlflow server \
--backend-store-uri "file://$MLFLOW_DIR" \
--default-artifact-root "$EXPERIMENT_DIR" \
--host 0.0.0.0 \
--port 5000 \
> "$LOG_DIR/mlflow_server.log" 2>&1 &
MLFLOW_PID=$!
echo "MLflow server started with PID: $MLFLOW_PID"
# Start TensorBoard in background
echo "πŸ“Š Starting TensorBoard..."
nohup tensorboard --logdir="$TENSORBOARD_DIR" --port=6006 --bind_all > "$LOG_DIR/tensorboard.log" 2>&1 &
TENSORBOARD_PID=$!
echo "TensorBoard started with PID: $TENSORBOARD_PID"
# Training command
echo "πŸš€ Starting Elizabeth training with full tracking..."
echo "⏰ Start time: $(date -Iseconds)"
echo "πŸ“ˆ MLflow UI: http://$(hostname -I | awk '{print $1}'):5000"
echo "πŸ“Š TensorBoard: http://$(hostname -I | awk '{print $1}'):6006"
# Run training with MLflow tracking
python3 /home/x/adaptai/aiml/train_elizabeth_mlflow.py \
--output_dir "$MODEL_DIR" \
--logging_dir "$TENSORBOARD_DIR" \
2>&1 | tee "$LOG_DIR/training_$(date +%Y%m%d_%H%M%S).log"
# Save training completion time
echo "βœ… Training completed!"
echo "⏰ End time: $(date -Iseconds)"
# Stop services
kill $MLFLOW_PID
kill $TENSORBOARD_PID
echo "Services stopped"
# Save comprehensive training info
echo "πŸ’Ύ Saving comprehensive training information..."
cat > "$MODEL_DIR/training_info.md" << EOF
# Elizabeth Nova Training - Complete Run
## Experiment Details
- **Start Time**: $(date -Iseconds)
- **End Time**: $(date -Iseconds)
- **Model**: Qwen3-8B with YaRN 128K
- **GPUs**: 2x H200
- **Training Examples**: 2879
- **Validation Examples**: 319
- **Context Length**: 131072
- **Batch Size**: 32 (effective)
- **Learning Rate**: 1.2e-5
- **Epochs**: 2
## Tracking & Monitoring
- **MLflow Server**: http://$(hostname -I | awk '{print $1}'):5000
- **TensorBoard**: http://$(hostname -I | awk '{print $1}'):6006
- **Log Directory**: $LOG_DIR
- **Model Directory**: $MODEL_DIR
- **MLflow Storage**: $MLFLOW_DIR
## Dataset Composition
- Tool Use Examples: 198
- Synthetic Corpus: 2681
- Total Training: 2879
- Validation: 319 (10%)
## Architecture Features
- Pure weight drift (no LoRA/adapters)
- YaRN RoPE scaling for 128K context
- BFloat16 precision
- Gradient checkpointing
- Tensor parallelism on 2x H200
## Versioning & Lineage
- All logs preserved permanently
- MLflow experiment tracking
- Model registry with versioning
- Complete dataset lineage
## Next Steps
1. Evaluate tool call accuracy
2. Deploy to vLLM serving
3. Set up continuous evaluation
4. Begin autonomous training cycle
EOF
echo "πŸŽ‰ Training finished with complete tracking!"
echo "πŸ“ˆ MLflow experiments: $MLFLOW_DIR"
echo "πŸ“Š TensorBoard logs: $TENSORBOARD_DIR"
echo "πŸ’Ύ Model artifacts: $MODEL_DIR"
echo "πŸ“ Training logs: $LOG_DIR"
echo "πŸ”— MLflow UI: http://$(hostname -I | awk '{print $1}'):5000"