pablo-rf commited on
Commit
8a2a444
verified
1 Parent(s): 528cac8

Update example

Browse files
Files changed (1) hide show
  1. README.md +4 -2
README.md CHANGED
@@ -93,8 +93,10 @@ prompt = tokenizer.apply_chat_template(
93
 
94
  inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
95
  outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=200)
96
-
97
- print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
98
 
99
  ```
100
 
 
93
 
94
  inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
95
  outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=200)
96
+ generated_tokens = outputs[0][len(inputs[0]):]
97
+ response = self.tokenizer.decode(generated_tokens, skip_special_tokens=False).strip()
98
+ response = response.split("<|reserved_token_1|>")[0].strip()
99
+ print(response)
100
 
101
  ```
102