Spaces:
Build error
Build error
Create 11_Tokenizer_Detokenizer.py
Browse files
pages/11_Tokenizer_Detokenizer.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
+
|
| 4 |
+
# Load the tokenizer
|
| 5 |
+
tokenizer = AutoTokenizer.from_pretrained('gpt2')
|
| 6 |
+
|
| 7 |
+
# Streamlit app title
|
| 8 |
+
st.title("Tokenizer and Detokenizer using GPT-2 for 2D Canvas")
|
| 9 |
+
st.write("Example: cr8 lg cnvs html js hlds 9 wbs becomes 060980002300300000700026900077142592771144804002890033500082008600026601443")
|
| 10 |
+
|
| 11 |
+
# Tokenization section
|
| 12 |
+
st.header("Tokenization")
|
| 13 |
+
sentence = st.text_input("Enter a sentence to tokenize:", "cr8 lg cnvs html js hlds 9 wbs")
|
| 14 |
+
|
| 15 |
+
def format_token_ids(token_ids):
|
| 16 |
+
formatted_ids = [str(token_id).zfill(5) for token_id in token_ids]
|
| 17 |
+
return ''.join(formatted_ids)
|
| 18 |
+
|
| 19 |
+
if st.button("Tokenize"):
|
| 20 |
+
input_ids = tokenizer(sentence, return_tensors='pt').input_ids
|
| 21 |
+
token_ids_list = input_ids[0].tolist()
|
| 22 |
+
formatted_token_ids = format_token_ids(token_ids_list)
|
| 23 |
+
st.write("Tokenized input IDs (formatted):")
|
| 24 |
+
st.write(formatted_token_ids)
|
| 25 |
+
|
| 26 |
+
# Detokenization section
|
| 27 |
+
st.header("Detokenization")
|
| 28 |
+
token_ids = st.text_input("Enter token IDs (concatenated without spaces):", "619710116000284001536")
|
| 29 |
+
|
| 30 |
+
def split_token_ids(concatenated_ids, length=5):
|
| 31 |
+
return [concatenated_ids[i:i+length] for i in range(0, len(concatenated_ids), length)]
|
| 32 |
+
|
| 33 |
+
def remove_leading_zeros(grouped_ids):
|
| 34 |
+
return [id.lstrip('0') for id in grouped_ids]
|
| 35 |
+
|
| 36 |
+
if st.button("Detokenize"):
|
| 37 |
+
split_ids = split_token_ids(token_ids)
|
| 38 |
+
cleaned_ids = remove_leading_zeros(split_ids)
|
| 39 |
+
cleaned_token_ids_str = ' '.join(cleaned_ids)
|
| 40 |
+
token_id_list = [int(id) for id in cleaned_ids if id.isdigit()]
|
| 41 |
+
|
| 42 |
+
detokenized_sentence = tokenizer.decode(token_id_list)
|
| 43 |
+
|
| 44 |
+
st.write("Grouped and cleaned token IDs:")
|
| 45 |
+
st.write(cleaned_token_ids_str)
|
| 46 |
+
st.write("Detokenized sentence:")
|
| 47 |
+
st.write(detokenized_sentence)
|
| 48 |
+
|
| 49 |
+
# Load the model
|
| 50 |
+
gpt2 = AutoModelForCausalLM.from_pretrained('gpt2')
|
| 51 |
+
|
| 52 |
+
# Display help for the GPT-2 model
|
| 53 |
+
st.write("Help GPT2")
|
| 54 |
+
st.write(help(gpt2))
|