| """Download and test Qwen2.5-3B-Instruct on MPS.""" |
|
|
| import time |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| MODEL_ID = "Qwen/Qwen2.5-3B-Instruct" |
|
|
| print(f"Downloading {MODEL_ID} (~6GB, one-time)...") |
| t0 = time.time() |
|
|
| tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, trust_remote_code=True, dtype=torch.float16, |
| ).to("mps") |
| model.eval() |
|
|
| n_params = sum(p.numel() for p in model.parameters()) / 1e6 |
| print(f"Loaded: {n_params:.0f}M params on MPS (float16) in {time.time() - t0:.0f}s") |
|
|
| |
| print("\nTesting generation...") |
| inputs = tok("What is quantum computing?", return_tensors="pt").to("mps") |
| with torch.no_grad(): |
| t1 = time.time() |
| out = model.generate( |
| **inputs, |
| max_new_tokens=150, |
| temperature=0.7, |
| do_sample=True, |
| pad_token_id=tok.eos_token_id, |
| ) |
| elapsed = time.time() - t1 |
|
|
| gen_ids = out[0][inputs["input_ids"].shape[1]:] |
| gen_text = tok.decode(gen_ids, skip_special_tokens=True) |
| n_tokens = len(gen_ids) |
| tps = n_tokens / max(elapsed, 0.001) |
|
|
| print(f"Speed: {tps:.1f} tokens/sec ({n_tokens} tokens in {elapsed:.1f}s)") |
| print(f"Response:\n{gen_text[:500]}") |
| print(f"\nModel ready. M4 Max + 36GB + MPS = {MODEL_ID} runs perfectly.") |
|
|