| ```CODE: |
| # Use a pipeline as a high-level helper |
| from transformers import pipeline |
|
|
| pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2") |
| messages = [ |
| {"role": "user", "content": "Who are you?"}, |
| ] |
| pipe(messages) |
| ``` |
|
|
| ERROR: |
| Traceback (most recent call last): |
| File "/tmp/MiniMaxAI_MiniMax-M2_00mS19U.py", line 19, in <module> |
| pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2") |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline |
| framework, model = infer_framework_load_model( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| adapter_path if adapter_path is not None else model, |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ...<5 lines>... |
| **model_kwargs, |
| ^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model |
| raise ValueError( |
| f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" |
| ) |
| ValueError: Could not load model MiniMaxAI/MiniMax-M2 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>,). See the original errors: |
|
|
| while loading with AutoModelForCausalLM, an error is thrown: |
| Traceback (most recent call last): |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model |
| model = model_class.from_pretrained(model, **kwargs) |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained |
| return model_class.from_pretrained( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper |
| return func(*args, **kwargs) |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained |
| hf_quantizer, config, dtype, device_map = get_hf_quantizer( |
| ~~~~~~~~~~~~~~~~^ |
| config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer |
| hf_quantizer.validate_environment( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| dtype=dtype, |
| ^^^^^^^^^^^^ |
| ...<3 lines>... |
| weights_only=weights_only, |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment |
| raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.") |
| RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization. |
|
|
| During handling of the above exception, another exception occurred: |
|
|
| Traceback (most recent call last): |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model |
| model = model_class.from_pretrained(model, **fp32_kwargs) |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained |
| return model_class.from_pretrained( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper |
| return func(*args, **kwargs) |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained |
| hf_quantizer, config, dtype, device_map = get_hf_quantizer( |
| ~~~~~~~~~~~~~~~~^ |
| config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer |
| hf_quantizer.validate_environment( |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
| dtype=dtype, |
| ^^^^^^^^^^^^ |
| ...<3 lines>... |
| weights_only=weights_only, |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| ) |
| ^ |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment |
| raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.") |
| RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization. |
|
|
|
|
|
|
|
|