hibana2077
Update token goal to 1 billion and improve remaining tokens calculation; add text encoding test script
5424587
raw
history blame contribute delete
322 Bytes
from transformers import AutoTokenizer
# encoding
text = "這張我以前在一個日本妹子ig上看過餒"
tokenizer = AutoTokenizer.from_pretrained("intfloat/multilingual-e5-large")
tokens = tokenizer.encode(text)
print(tokens)
print(tokenizer.decode(tokens))
print(tokenizer.decode(tokens, skip_special_tokens=True))