File size: 1,995 Bytes
9b87049
341a160
9b87049
341a160
 
 
 
 
 
 
 
 
 
 
9b87049
 
 
 
 
 
 
341a160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9dd801
 
 
341a160
 
 
 
 
 
 
 
 
 
 
 
 
 
e9dd801
 
341a160
e9dd801
341a160
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!/bin/zsh
# Cluster environment setup for GR00T N1.7 training
# Usage: source setup_cluster.sh

# --- Redirect caches to scratch ---
export HOME=/var/tmp/szang
export HF_HOME=/var/tmp/szang
export TORCH_HOME=/var/tmp/szang
export TRANSFORMERS_CACHE=/var/tmp/szang
export UV_CACHE_DIR=/var/tmp/szang/.cache/uv
mkdir -p /var/tmp/szang/.triton/autotune
mkdir -p /var/tmp/szang/.cache/torch/kernels
mkdir -p $UV_CACHE_DIR

# --- Load modules (compatible with zsh) ---
if typeset -f module > /dev/null 2>&1; then
  module load gcc/10.5.0 cuda/11.8.0
elif [[ -f /etc/profile.d/modules.sh ]]; then
  source /etc/profile.d/modules.sh
  module load gcc/10.5.0 cuda/11.8.0
fi

# --- Library paths ---
export LD_LIBRARY_PATH=/global/scratch/users/ghr/Projects/szang/envs/video_act/lib:$LD_LIBRARY_PATH

# --- NCCL settings ---
export NCCL_P2P_DISABLE=1
export NCCL_IB_DISABLE=1
export NCCL_DEBUG=INFO
export NCCL_NVLS_ENABLE=1
export NCCL_P2P_LEVEL=NVL
export NCCL_TIMEOUT=1200

# --- Training settings ---
export OMP_NUM_THREADS=4
export TOKENIZERS_PARALLELISM=false

# --- tmux ---
tmux set mouse on 2>/dev/null || true

# --- Activate venv ---
cd /global/scratch/users/ghr/Projects/szang/GR00TN1.7
source .venv/bin/activate

# --- Verify ---
nvcc --version && echo "CUDA_HOME: $CUDA_HOME"
python -c "
import torch
print('PyTorch version:', torch.__version__)
print('CUDA available:', torch.cuda.is_available())
print('CUDA version:', torch.version.cuda)
print('cuDNN version:', torch.backends.cudnn.version())
print('GPU count:', torch.cuda.device_count())
for i in range(torch.cuda.device_count()):
    print(f'  GPU {i}: {torch.cuda.get_device_name(i)} ({torch.cuda.get_device_properties(i).total_memory / 1024**3:.1f} GB)')
print('Current device:', torch.cuda.current_device() if torch.cuda.is_available() else 'N/A')
"
python -c "import transformers; print('Transformers version:', transformers.__version__)"
python -c "import flash_attn; print('Flash Attention version:', flash_attn.__version__)"