Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer | |
| import gradio as gr | |
| gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| gptj_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6b") | |
| gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") | |
| llama_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") | |
| def tokenize(input_text): | |
| gpt2_tokens = gpt2_tokenizer(input_text)["input_ids"] | |
| gptj_tokens = gptj_tokenizer(input_text)["input_ids"] | |
| gpt_neox_tokens = gpt_neox_tokenizer(input_text)["input_ids"] | |
| llama_tokens = llama_tokenizer(input_text)["input_ids"] | |
| return f"""Number of tokens. | |
| GPT-2: {len(gpt2_tokens)} | |
| GPT-J: {len(gptj_tokens)} | |
| GPT-NeoX: {len(gpt_neox_tokens)} | |
| LLaMa: {len(llama_tokens)} | |
| """ | |
| iface = gr.Interface(fn=tokenize, inputs=gr.inputs.Textbox(lines=7), outputs="text") | |
| iface.launch() |