File size: 2,305 Bytes
36c78b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/bin/bash
#
# BitTransformerLM OPTIMIZED Massive Scale Training Launcher
# ==========================================================
# 
# Launches 680M parameter BitTransformerLM with ALL optimizations enabled!
# Uses DataParallel for reliable multi-GPU training.
#

set -e  # Exit on any error

echo "πŸš€ BITTRANSFORMERLM OPTIMIZED MASSIVE SCALE TRAINING"
echo "====================================================="
echo "Target: 680 MILLION parameters (CONFIRMED!)"
echo "Hardware: Multi-GPU with DataParallel"
echo "Dataset: WikiText-103 with bit-level encoding"
echo "Optimizations: ALL ENABLED!"
echo ""

# Set environment variables for optimal performance
export CUDA_VISIBLE_DEVICES=0,1,2,3
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
export OMP_NUM_THREADS=12

# Set HuggingFace token
export HF_TOKEN="${HF_TOKEN:-your-token-here}"

# Change to BitTransformerLM directory
cd /data/BitTransformerLM/BitTransformerLM

# Create checkpoint directory
mkdir -p /data/checkpoints

echo "πŸ” Hardware Check:"
python -c "
import torch
print(f'CUDA Available: {torch.cuda.is_available()}')
print(f'GPU Count: {torch.cuda.device_count()}')
for i in range(torch.cuda.device_count()):
    props = torch.cuda.get_device_properties(i)
    print(f'  GPU {i}: {props.name} ({props.total_memory / 1024**3:.1f}GB)')
"

echo ""
echo "βš™οΈ OPTIMIZATIONS ENABLED:"
echo "  βœ… Reversible Layers (50% memory savings)"
echo "  βœ… Gradient Checkpointing" 
echo "  βœ… Mixed Precision (FP16)"
echo "  βœ… Memory-Mapped Dataset Loading"
echo "  βœ… Safety Telemetry (K, C, S metrics)"
echo "  βœ… Bit-Native Processing"
echo "  βœ… DataParallel Multi-GPU"
echo ""

echo "πŸ“Š Training Configuration:"
echo "  β€’ Parameters: 679,962,626 (680M)"
echo "  β€’ Architecture: d_model=1536, layers=24, heads=24"
echo "  β€’ Batch Size: 2 per GPU"
echo "  β€’ Gradient Accumulation: 16 steps"
echo "  β€’ Effective Batch Size: 128"
echo "  β€’ Learning Rate: 3e-4 with OneCycle"
echo "  β€’ Dataset: WikiText-103 (2000 training samples)"
echo ""

echo "🎯 Starting optimized training..."
echo "   This version should train successfully!"
echo ""

# Launch optimized training
python massive_scale_simple.py

echo ""
echo "🏁 Training completed successfully!"
echo "Check /data/checkpoints/ for saved models"