| ```CODE: | |
| # Load model directly | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| tokenizer = AutoTokenizer.from_pretrained("ByteDance-Seed/Stable-DiffCoder-8B-Instruct", trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained("ByteDance-Seed/Stable-DiffCoder-8B-Instruct", trust_remote_code=True) | |
| messages = [ | |
| {"role": "user", "content": "Who are you?"}, | |
| ] | |
| inputs = tokenizer.apply_chat_template( | |
| messages, | |
| add_generation_prompt=True, | |
| tokenize=True, | |
| return_dict=True, | |
| return_tensors="pt", | |
| ).to(model.device) | |
| outputs = model.generate(**inputs, max_new_tokens=40) | |
| print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) | |
| ``` | |
| ERROR: | |
| Traceback (most recent call last): | |
| File "/tmp/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1H3wK31.py", line 39, in <module> | |
| outputs = model.generate(**inputs, max_new_tokens=40) | |
| File "/tmp/.cache/uv/environments-v2/d3eb16abd23c0167/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context | |
| return func(*args, **kwargs) | |
| File "/tmp/.cache/huggingface/modules/transformers_modules/ByteDance_hyphen_Seed/Stable_hyphen_DiffCoder_hyphen_8B_hyphen_Instruct/d0808fa34fa03de9163a29cabfc7107294a535a8/modeling_seed_diffcoder.py", line 24, in generate | |
| output_ids, nfe = generate_block( | |
| ~~~~~~~~~~~~~~^ | |
| model=self, | |
| ^^^^^^^^^^^ | |
| prompt=prompt, | |
| ^^^^^^^^^^^^^^ | |
| **kwargs, | |
| ^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eb16abd23c0167/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context | |
| return func(*args, **kwargs) | |
| TypeError: generate_block() got an unexpected keyword argument 'attention_mask' | |