File size: 9,162 Bytes
52fe397 637a82a 52fe397 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
```CODE:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True)
messages = [
{"role": "user", "content": "Who are you?"},
]
pipe(messages)
```
ERROR:
Traceback (most recent call last):
File "/tmp/deepseek-ai_DeepSeek-R1_0sp29T3.py", line 19, in <module>
pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
framework, model = infer_framework_load_model(
~~~~~~~~~~~~~~~~~~~~~~~~~~^
adapter_path if adapter_path is not None else model,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
...<5 lines>...
**model_kwargs,
^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
raise ValueError(
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
)
ValueError: Could not load model deepseek-ai/DeepSeek-R1 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>, <class 'transformers.models.deepseek_v3.modeling_deepseek_v3.DeepseekV3ForCausalLM'>). See the original errors:
while loading with AutoModelForCausalLM, an error is thrown:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 597, in from_pretrained
return model_class.from_pretrained(
~~~~~~~~~~~~~~~~~~~~~~~~~~~^
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 597, in from_pretrained
return model_class.from_pretrained(
~~~~~~~~~~~~~~~~~~~~~~~~~~~^
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
while loading with DeepseekV3ForCausalLM, an error is thrown:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
|