CreatorPhan commited on
Commit
7b8ee67
·
1 Parent(s): e4ae303

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -28,5 +28,5 @@ tokens = tokenizer(f"Tóm tắt văn bản sau: {context}", return_tensors='pt')
28
  output = model.generate(tokens.to(device), max_new_tokens=170)[0]
29
  predict = tokenizer.decode(output, skip_special_tokens=True)
30
  print(len(predict.split()))
31
- predict
32
  ```
 
28
  output = model.generate(tokens.to(device), max_new_tokens=170)[0]
29
  predict = tokenizer.decode(output, skip_special_tokens=True)
30
  print(len(predict.split()))
31
+ print(predict)
32
  ```