ariG23498 HF Staff commited on
Commit
d5ad80c
·
verified ·
1 Parent(s): 1ffd517

Upload cyankiwi_GLM-4.7-Flash-AWQ-4bit_1.txt with huggingface_hub

Browse files
cyankiwi_GLM-4.7-Flash-AWQ-4bit_1.txt CHANGED
@@ -21,10 +21,41 @@ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
21
 
22
  ERROR:
23
  Traceback (most recent call last):
24
- File "/tmp/cyankiwi_GLM-4.7-Flash-AWQ-4bit_19kQ86B.py", line 26, in <module>
25
- tokenizer = AutoTokenizer.from_pretrained("cyankiwi/GLM-4.7-Flash-AWQ-4bit")
26
- File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1153, in from_pretrained
27
- raise ValueError(
28
- f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
 
 
29
  )
30
- ValueError: Tokenizer class TokenizersBackend does not exist or is not currently imported.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  ERROR:
23
  Traceback (most recent call last):
24
+ File "/tmp/cyankiwi_GLM-4.7-Flash-AWQ-4bit_1UfsAtF.py", line 27, in <module>
25
+ model = AutoModelForCausalLM.from_pretrained("cyankiwi/GLM-4.7-Flash-AWQ-4bit")
26
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 372, in from_pretrained
27
+ return model_class.from_pretrained(
28
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
29
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
30
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
31
  )
32
+ ^
33
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4015, in from_pretrained
34
+ hf_quantizer, config, device_map = get_hf_quantizer(
35
+ ~~~~~~~~~~~~~~~~^
36
+ config, quantization_config, device_map, weights_only, user_agent
37
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
38
+ )
39
+ ^
40
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 312, in get_hf_quantizer
41
+ config.quantization_config = AutoHfQuantizer.merge_quantization_configs(
42
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
43
+ config.quantization_config, quantization_config
44
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
45
+ )
46
+ ^
47
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 213, in merge_quantization_configs
48
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
49
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 139, in from_dict
50
+ return target_cls.from_dict(quantization_config_dict)
51
+ ~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^
52
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1203, in from_dict
53
+ return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)
54
+ ~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
55
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 115, in from_dict
56
+ config = cls(**config_dict)
57
+ File "/tmp/.cache/uv/environments-v2/9cbdd13f85033a9e/lib/python3.13/site-packages/transformers/utils/quantization_config.py", line 1133, in __init__
58
+ raise ImportError(
59
+ "compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`."
60
+ )
61
+ ImportError: compressed_tensors is not installed and is required for compressed-tensors quantization. Please install it with `pip install compressed-tensors`.