File size: 4,558 Bytes
9214a75 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | #!/bin/bash
set -euo pipefail
export PATH="/venv/main/bin:$PATH"
export PYTHONDONTWRITEBYTECODE=1
export CUDA_VISIBLE_DEVICES=0
export WORK=/dev/shm/eval
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; }
log "============================================"
log " Setup: Polish LLM Eval for QuIP# Bielik"
log "============================================"
mkdir -p "$WORK"
cd "$WORK"
# 1. HF login first (needed for private model download)
log "HuggingFace login..."
python -c "
from huggingface_hub import login
login(token='HF_TOKEN_REDACTED')
print('Login OK')
"
# 2. Clone speakleash lm-eval fork with Polish tasks
log "Cloning speakleash/lm-evaluation-harness (polish3 branch)..."
if [ ! -d "$WORK/lm-evaluation-harness" ]; then
git clone https://github.com/speakleash/lm-evaluation-harness.git -b polish3
fi
cd "$WORK/lm-evaluation-harness"
pip install -e . 2>&1 | tail -5
log "lm-eval installed"
# 3. Clone quip-sharp for model loading
cd "$WORK"
if [ ! -d "$WORK/quip-sharp" ]; then
git clone https://github.com/Cornell-RelaxML/quip-sharp.git
fi
cd "$WORK/quip-sharp"
# 4. Build CUDA kernels
log "Building quiptools CUDA kernels..."
cd quiptools && python setup.py install 2>&1 | tail -5 && cd ..
log "quiptools built"
# 5. Install deps
pip install glog primefac protobuf sentencepiece 2>&1 | tail -3
# 6. Apply patches
log "Applying quip-sharp patches..."
# torch.load compat for PyTorch 2.10+
python -c "
for f in ['lib/utils/unsafe_import.py', 'eval/eval_zeroshot.py']:
try:
path = '$WORK/quip-sharp/' + f
with open(path, 'r') as fh:
c = fh.read()
if 'weights_only' not in c and 'torch.load' in c:
c = c.replace('import torch\n', 'import torch\n_orig_load = torch.load\ndef _compat_load(*a, **kw):\n kw.setdefault(\"weights_only\", False)\n return _orig_load(*a, **kw)\ntorch.load = _compat_load\n', 1)
with open(path, 'w') as fh:
fh.write(c)
print(f'Patched {f}')
except FileNotFoundError:
pass
"
# fast_hadamard_transform fallback
python -c "
path = '$WORK/quip-sharp/lib/utils/matmul_had.py'
with open(path, 'r') as f:
content = f.read()
if content.startswith('import fast_hadamard_transform'):
content = 'try:\n import fast_hadamard_transform\n HAS_FAST_HAD = True\nexcept ImportError:\n HAS_FAST_HAD = False\n' + content.split('\n', 1)[1]
with open(path, 'w') as f:
f.write(content)
print('matmul_had.py patched')
"
# hadamard fallback implementation
python -c "
path = '$WORK/quip-sharp/lib/utils/matmul_had.py'
with open(path, 'r') as f:
c = f.read()
if 'HAS_FAST_HAD' in c and 'Walsh-Hadamard' not in c:
old = 'return fast_hadamard_transform.hadamard_transform(x, scale)'
new = '''if HAS_FAST_HAD:
return fast_hadamard_transform.hadamard_transform(x, scale)
else:
# Pure PyTorch Walsh-Hadamard fallback
n = x.shape[-1]
orig_shape = x.shape
x = x.contiguous().view(-1, n)
h = 1
while h < n:
x = x.view(-1, n // (2 * h), 2, h)
a = x[:, :, 0, :] + x[:, :, 1, :]
b = x[:, :, 0, :] - x[:, :, 1, :]
x = torch.stack([a, b], dim=2).view(-1, n)
h *= 2
return (x * scale).view(orig_shape)'''
if old in c:
c = c.replace(old, new)
with open(path, 'w') as f:
f.write(c)
print('hadamard fallback added')
"
# 7. Download QuIP# model from HuggingFace
log "Downloading QuIP# model..."
mkdir -p "$WORK/model"
python -c "
from huggingface_hub import snapshot_download
path = snapshot_download('Jakubrd4/bielik-q2-variant-a', local_dir='$WORK/model')
print(f'Model downloaded to: {path}')
"
log "Model downloaded"
# 8. Download tokenizer (base model)
log "Pre-downloading tokenizer..."
python -c "
from transformers import AutoTokenizer
tok = AutoTokenizer.from_pretrained('speakleash/Bielik-11B-v2.3-Instruct')
print(f'Tokenizer loaded: {tok.name_or_path}, vocab_size={tok.vocab_size}')
"
# 9. List available Polish tasks
log "Available Polish tasks:"
cd "$WORK/lm-evaluation-harness"
python -c "
from lm_eval import tasks
mgr = tasks.TaskManager()
polish = [t for t in mgr.all_tasks if 'polish' in t.lower() or 'polemo' in t.lower()]
for t in sorted(polish):
print(f' {t}')
print(f'Total: {len(polish)} Polish tasks')
" 2>/dev/null || echo "Task listing skipped"
log "============================================"
log " Setup complete! Ready for eval."
log "============================================"
|