Update example
Browse files
README.md
CHANGED
|
@@ -93,8 +93,10 @@ prompt = tokenizer.apply_chat_template(
|
|
| 93 |
|
| 94 |
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
| 95 |
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=200)
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
| 98 |
|
| 99 |
```
|
| 100 |
|
|
|
|
| 93 |
|
| 94 |
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
| 95 |
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=200)
|
| 96 |
+
generated_tokens = outputs[0][len(inputs[0]):]
|
| 97 |
+
response = self.tokenizer.decode(generated_tokens, skip_special_tokens=False).strip()
|
| 98 |
+
response = response.split("<|reserved_token_1|>")[0].strip()
|
| 99 |
+
print(response)
|
| 100 |
|
| 101 |
```
|
| 102 |
|