File size: 322 Bytes
5424587 |
1 2 3 4 5 6 7 8 9 |
from transformers import AutoTokenizer
# encoding
text = "這張我以前在一個日本妹子ig上看過餒"
tokenizer = AutoTokenizer.from_pretrained("intfloat/multilingual-e5-large")
tokens = tokenizer.encode(text)
print(tokens)
print(tokenizer.decode(tokens))
print(tokenizer.decode(tokens, skip_special_tokens=True)) |