| | #!/bin/bash |
| | set -euo pipefail |
| | export PATH="/venv/main/bin:$PATH" |
| | export PYTHONDONTWRITEBYTECODE=1 |
| | export CUDA_VISIBLE_DEVICES=0 |
| | export WORK=/dev/shm/eval |
| |
|
| | log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; } |
| |
|
| | log "============================================" |
| | log " Setup: Polish LLM Eval for QuIP# Bielik" |
| | log "============================================" |
| |
|
| | mkdir -p "$WORK" |
| | cd "$WORK" |
| |
|
| | |
| | log "HuggingFace login..." |
| | python -c " |
| | from huggingface_hub import login |
| | login(token='HF_TOKEN_REDACTED') |
| | print('Login OK') |
| | " |
| |
|
| | |
| | log "Cloning speakleash/lm-evaluation-harness (polish3 branch)..." |
| | if [ ! -d "$WORK/lm-evaluation-harness" ]; then |
| | git clone https://github.com/speakleash/lm-evaluation-harness.git -b polish3 |
| | fi |
| | cd "$WORK/lm-evaluation-harness" |
| | pip install -e . 2>&1 | tail -5 |
| | log "lm-eval installed" |
| |
|
| | |
| | cd "$WORK" |
| | if [ ! -d "$WORK/quip-sharp" ]; then |
| | git clone https://github.com/Cornell-RelaxML/quip-sharp.git |
| | fi |
| | cd "$WORK/quip-sharp" |
| |
|
| | |
| | log "Building quiptools CUDA kernels..." |
| | cd quiptools && python setup.py install 2>&1 | tail -5 && cd .. |
| | log "quiptools built" |
| |
|
| | |
| | pip install glog primefac protobuf sentencepiece 2>&1 | tail -3 |
| |
|
| | |
| | log "Applying quip-sharp patches..." |
| |
|
| | |
| | python -c " |
| | for f in ['lib/utils/unsafe_import.py', 'eval/eval_zeroshot.py']: |
| | try: |
| | path = '$WORK/quip-sharp/' + f |
| | with open(path, 'r') as fh: |
| | c = fh.read() |
| | if 'weights_only' not in c and 'torch.load' in c: |
| | c = c.replace('import torch\n', 'import torch\n_orig_load = torch.load\ndef _compat_load(*a, **kw):\n kw.setdefault(\"weights_only\", False)\n return _orig_load(*a, **kw)\ntorch.load = _compat_load\n', 1) |
| | with open(path, 'w') as fh: |
| | fh.write(c) |
| | print(f'Patched {f}') |
| | except FileNotFoundError: |
| | pass |
| | " |
| |
|
| | |
| | python -c " |
| | path = '$WORK/quip-sharp/lib/utils/matmul_had.py' |
| | with open(path, 'r') as f: |
| | content = f.read() |
| | if content.startswith('import fast_hadamard_transform'): |
| | content = 'try:\n import fast_hadamard_transform\n HAS_FAST_HAD = True\nexcept ImportError:\n HAS_FAST_HAD = False\n' + content.split('\n', 1)[1] |
| | with open(path, 'w') as f: |
| | f.write(content) |
| | print('matmul_had.py patched') |
| | " |
| |
|
| | |
| | python -c " |
| | path = '$WORK/quip-sharp/lib/utils/matmul_had.py' |
| | with open(path, 'r') as f: |
| | c = f.read() |
| | if 'HAS_FAST_HAD' in c and 'Walsh-Hadamard' not in c: |
| | old = 'return fast_hadamard_transform.hadamard_transform(x, scale)' |
| | new = '''if HAS_FAST_HAD: |
| | return fast_hadamard_transform.hadamard_transform(x, scale) |
| | else: |
| | # Pure PyTorch Walsh-Hadamard fallback |
| | n = x.shape[-1] |
| | orig_shape = x.shape |
| | x = x.contiguous().view(-1, n) |
| | h = 1 |
| | while h < n: |
| | x = x.view(-1, n // (2 * h), 2, h) |
| | a = x[:, :, 0, :] + x[:, :, 1, :] |
| | b = x[:, :, 0, :] - x[:, :, 1, :] |
| | x = torch.stack([a, b], dim=2).view(-1, n) |
| | h *= 2 |
| | return (x * scale).view(orig_shape)''' |
| | if old in c: |
| | c = c.replace(old, new) |
| | with open(path, 'w') as f: |
| | f.write(c) |
| | print('hadamard fallback added') |
| | " |
| |
|
| | |
| | log "Downloading QuIP# model..." |
| | mkdir -p "$WORK/model" |
| | python -c " |
| | from huggingface_hub import snapshot_download |
| | path = snapshot_download('Jakubrd4/bielik-q2-variant-a', local_dir='$WORK/model') |
| | print(f'Model downloaded to: {path}') |
| | " |
| | log "Model downloaded" |
| |
|
| | |
| | log "Pre-downloading tokenizer..." |
| | python -c " |
| | from transformers import AutoTokenizer |
| | tok = AutoTokenizer.from_pretrained('speakleash/Bielik-11B-v2.3-Instruct') |
| | print(f'Tokenizer loaded: {tok.name_or_path}, vocab_size={tok.vocab_size}') |
| | " |
| |
|
| | |
| | log "Available Polish tasks:" |
| | cd "$WORK/lm-evaluation-harness" |
| | python -c " |
| | from lm_eval import tasks |
| | mgr = tasks.TaskManager() |
| | polish = [t for t in mgr.all_tasks if 'polish' in t.lower() or 'polemo' in t.lower()] |
| | for t in sorted(polish): |
| | print(f' {t}') |
| | print(f'Total: {len(polish)} Polish tasks') |
| | " 2>/dev/null || echo "Task listing skipped" |
| |
|
| | log "============================================" |
| | log " Setup complete! Ready for eval." |
| | log "============================================" |
| |
|