alanbuxton commited on
Commit
d3f4410
·
verified ·
1 Parent(s): d613c78

Added return_token_type_ids=False

Browse files

Existing sample code gives this error

```
>>> outputs = model.generate(**inputs, max_new_tokens=20)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/xxxx/path/to/xxx/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/xxxx/path/to/xxx/lib/python3.10/site-packages/transformers/generation/utils.py", line 1686, in generate
self._validate_model_kwargs(model_kwargs.copy())
File "/xxxx/path/to/xxx/lib/python3.10/site-packages/transformers/generation/utils.py", line 1242, in _validate_model_kwargs
raise ValueError(
ValueError: The following `model_kwargs` are not used by the model: ['token_type_ids'] (note: typos in the generate arguments will also show up in this list)
```

Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -126,7 +126,7 @@ model_id = "mistralai/Mistral-Nemo-Base-2407"
126
  tokenizer = AutoTokenizer.from_pretrained(model_id)
127
 
128
  model = AutoModelForCausalLM.from_pretrained(model_id)
129
- inputs = tokenizer("Hello my name is", return_tensors="pt")
130
 
131
  outputs = model.generate(**inputs, max_new_tokens=20)
132
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
126
  tokenizer = AutoTokenizer.from_pretrained(model_id)
127
 
128
  model = AutoModelForCausalLM.from_pretrained(model_id)
129
+ inputs = tokenizer("Hello my name is", return_tensors="pt",return_token_type_ids=False)
130
 
131
  outputs = model.generate(**inputs, max_new_tokens=20)
132
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))