| ```CODE: |
| # Load model directly |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
| tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-M2") |
| model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2") |
| messages = [ |
| {"role": "user", "content": "Who are you?"}, |
| ] |
| inputs = tokenizer.apply_chat_template( |
| messages, |
| add_generation_prompt=True, |
| tokenize=True, |
| return_dict=True, |
| return_tensors="pt", |
| ).to(model.device) |
|
|
| outputs = model.generate(**inputs, max_new_tokens=40) |
| print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) |
| ``` |
|
|
| ERROR: |
| Traceback (most recent call last): |
| File "/tmp/MiniMaxAI_MiniMax-M2_1H18ifN.py", line 20, in <module> |
| model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2") |
| File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained |
| return model_class.from_pretrained( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper |
| return func(*args, **kwargs) |
| File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained |
| hf_quantizer, config, dtype, device_map = get_hf_quantizer( |
| ~~~~~~~~~~~~~~~~^ |
| config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer |
| hf_quantizer.validate_environment( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| dtype=dtype, |
| ^^^^^^^^^^^^ |
| ...<3 lines>... |
| weights_only=weights_only, |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment |
| raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.") |
| RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization. |
|
|