Jordan Legg
feat: tokenized details
7090536
raw
history blame contribute delete
436 Bytes
from datasets import load_dataset
from transformers import AutoTokenizer
def main():
ds = load_dataset("takara-ai/micropajama", split="train")
tok = AutoTokenizer.from_pretrained("BAAI/bge-large-en-v1.5")
lens = ds.map(lambda b: {"len": [len(x) for x in tok(b["text"], add_special_tokens=False).input_ids]}, batched=True, remove_columns=ds.column_names)
print(sum(lens["len"]))
if __name__ == "__main__":
main()