Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer | |
| import gradio as gr | |
| gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") | |
| llama_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") | |
| phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2") | |
| falcon_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b") | |
| def tokenize(input_text): | |
| gpt2_tokens = gpt2_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| gpt_neox_tokens = gpt_neox_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| llama_tokens = llama_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| yi_tokens = llama_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| falcon_tokens = falcon_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| return f"GPT-2/GPT-J: {len(gpt2_tokens)}\nGPT-NeoX: {len(gpt_neox_tokens)}\nLLaMa: {len(llama_tokens)}\nPhi-2: {len(phi2_tokens)}\nFalcon: {len(falcon_tokens)}" | |
| iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=7), outputs="text") | |
| iface.launch() |