Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| import regex as re | |
| import json | |
| import ast | |
| import pandas as pd | |
| def clean_text(text, valid_chars_set, replaced_char=None): | |
| text = replace_unwanted_chars(text, valid_chars_set, replaced_char) | |
| #gpt2pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") | |
| gpt2pat = re.compile(r""" | |
| 's|'t|'re|'ve|'m|'ll|'d| # English contractions | |
| \s*[\u0900-\u097F]+(?:[\u093E-\u094D\u0950-\u0954\u0962-\u0963]+)*| # Devanagari letters and diacritics with leading spaces | |
| \s*\d+| # Digits with leading spaces | |
| [^\s\w\u0900-\u097F]+| # Punctuation and symbols | |
| \s+ # Whitespace | |
| """, re.VERBOSE) | |
| return re.findall(gpt2pat, text) | |
| def replace_unwanted_chars(text, valid_chars_set, replaced_char=None): | |
| # Use list comprehension to quickly replace unwanted characters | |
| if replaced_char == None: | |
| replaced_char = '' | |
| result = ''.join([char if char in valid_chars_set else replaced_char for char in text]) | |
| return result | |
| def get_stats(ids, counts=None): | |
| counts = {} if counts is None else counts | |
| for pair in zip(ids, ids[1:]): | |
| counts[pair] = counts.get(pair, 0) + 1 | |
| return counts | |
| def merge(ids, pair, idx): | |
| newids = [] | |
| i = 0 | |
| #print(f"ids {ids} pair {pair} idx {idx}") | |
| #print("lllr") | |
| while i < len(ids): | |
| #print("newids ",newids) | |
| #if i < len(id) and id[i] == pair[0] and id[i+1] == pair[1]: | |
| if ids[i] == pair[0] and i < len(ids) - 1 and ids[i+1] == pair[1]: | |
| newids.append(idx) | |
| i += 2 | |
| else: | |
| #print("mer ids-i ",ids[i], i, newids) | |
| newids.append(ids[i]) | |
| i += 1 | |
| return newids | |
| def get_vocab(merges, univ_vocab): | |
| #vocab = {sr: chr(idx) for sr, idx in enumerate (uni_chars)} | |
| for (p0, p1), idx in merges.items(): | |
| univ_vocab[idx] = univ_vocab[p0] + univ_vocab[p1] | |
| return univ_vocab | |
| def get_init_vocab(u_ids): | |
| vocab = {idx: chr(idx) for idx in u_ids} | |
| #print("init vocab", vocab) | |
| #print(u_ids) | |
| return vocab | |
| def decode(ids, univ_vocab): | |
| # given ids , return Python strings | |
| text = "".join(univ_vocab[idx] for idx in ids) | |
| return text | |
| def encode(text, merges): | |
| global char_to_int | |
| # given string, return list of ints | |
| #print(text) | |
| tokens = [char_to_int[char] for char in text] | |
| # list(map(ord, text)) #list(text.encode("utf-8")) | |
| #print(tokens) | |
| while len(tokens) >= 2: | |
| stats = get_stats(tokens) | |
| #print("stats ", stats) | |
| pair = min(stats, key=lambda p: merges.get(p, float("inf"))) | |
| if pair not in merges: | |
| break # nothing else can be merged | |
| #print("merges",merges) | |
| idx = merges[pair] | |
| #print("idx ",idx) | |
| tokens = merge(tokens, pair, idx) | |
| #print("encode token ",tokens) | |
| #print("done endode tok ", tokens) | |
| return tokens | |
| def encode_ordinary(text, merges, valid_char_set): | |
| """Encoding that ignores any special tokens.""" | |
| # split text into chunks of text by categories defined in regex pattern | |
| replace_char = chr(191) # inverted char | |
| text_chunks = clean_text(text,valid_char_set,replace_char) | |
| # all chunks of text are encoded separately, then results are joined | |
| ids = [] | |
| for chunk in text_chunks: | |
| #chunk_bytes = chunk.encode("utf-8") # raw bytes | |
| chunk_ids = encode(chunk, merges) | |
| ids.extend(chunk_ids) | |
| #print("encode ord ",ids) | |
| return ids | |
| def unicode_chars_range(start, end): | |
| return [chr(i) for i in range(start, end+1)] | |
| def list_unicode_chars(sp_list): | |
| return [chr(i) for i in sp_list] | |
| def prepare_init_vocab(): | |
| # functions to list characters in a given Unicode range | |
| # Devanagari | |
| special_chars = unicode_chars_range(0x0020, 0x0040) | |
| punchuation1_chars = unicode_chars_range(0x005B, 0x0060) | |
| punchuation2_chars = unicode_chars_range(0x007B, 0x007E) | |
| # Devanagari | |
| devanagari_chars = unicode_chars_range(0x0900, 0x097F) | |
| #print(f"devanagari_chars : {devanagari_chars}") | |
| # Devanagari Extended | |
| devanagari_extended_chars = unicode_chars_range(0xA8E0, 0xA8FF) | |
| #print(f"devanagari_extended_chars : {devanagari_extended_chars}") | |
| # General Punctuations list from wiki page | |
| # (–,—,―,‗,‛,“,”,„,†,‡,•,…,‰,′,″,‹,›,‼,‾,⁄) | |
| pun_list = [0x2013,0x2014,0x2015,0x2017,0x2018,0x2019,0x201A,0x201B,0x201C,0x201D,0x201E,0x2020\ | |
| ,0x2021,0x2022,0x2026,0x2030,0x2032,0x2033,0x2039,0x203A,0x203C,0x203E,0x2044,0x204A] | |
| # append inverted-? and newline | |
| pun_list.append(0x00BF) | |
| pun_list.append(10) | |
| punctuation_chars = list_unicode_chars(pun_list) | |
| # Superscripts and Subscripts | |
| #super_subscript_chars = unicode_chars_range(0x2070, 0x209F) | |
| # Combine all characters | |
| all_chars_list = (devanagari_chars + devanagari_extended_chars + special_chars + punchuation1_chars + \ | |
| punchuation2_chars + punctuation_chars) | |
| # Print all characters with their Unicode code points | |
| #for char in all_chars: | |
| # print(f"Character: {char}, Unicode: {ord(char)}") | |
| #init_vocab = {ord(ch1): ch1 for ch1 in (all_chars_list)} | |
| init_vocab = {ii: ch1 for ii, ch1 in enumerate(all_chars_list)} | |
| char_to_int = {ch1: ii for ii, ch1 in enumerate(all_chars_list)} | |
| return set(all_chars_list), init_vocab, char_to_int | |
| valid_char_set, univ_vocab, char_to_int = prepare_init_vocab() | |
| n_vocab_init = len(univ_vocab) | |
| # Function to read and print the contents of a JSON file | |
| def read_json_file(filename): | |
| with open(filename, 'r', encoding='utf-8') as file: | |
| data = json.load(file) | |
| converted_data = {ast.literal_eval(k): v for k, v in data.items()} | |
| return converted_data | |
| # File names | |
| vocab_filename = 'vocab_15000.json' | |
| merges_filename = 'merges_15000.json' | |
| # Read the vocabulary JSON file | |
| univ_vocab = read_json_file(vocab_filename) | |
| #print("Vocabulary Data:") | |
| #print(vocab_data) | |
| # Read the merges JSON file | |
| merges = read_json_file(merges_filename) | |
| #print("\nMerges Data:") | |
| #print(merges_data) | |
| def tokenize(text): | |
| global n_orig_corpus_chars, merges | |
| global valid_char_set, univ_vocab | |
| #print(" n merges ", num_merges) | |
| n_orig_corpus_chars = len(text) | |
| tokens_corpus = encode_ordinary(text, merges, valid_char_set) | |
| n_tokens_corpus = len(tokens_corpus) | |
| #print(" n_tokens_corpus for voc size ", vocab_size, " -- ", n_tokens_corpus) | |
| ids_tokens = encode_ordinary(text, merges, valid_char_set) | |
| #out_text = decode(out_tokens, univ_vocab) | |
| txt_tokens = [univ_vocab[tok1] for tok1 in ids_tokens] | |
| return ids_tokens, txt_tokens | |
| #in_text = input("provide some text : ") | |
| #print(in_text) | |
| #tokenize(in_text) | |
| # Streamlit app | |
| st.title("Marathi Language Tokenizer") | |
| # Input text | |
| input_text = st.text_area("Enter text to tokenize:") | |
| st.write(""" | |
| This app can tokenize your input Marathi text. It recognizes devnagari and special characters [unrecognizable input characters appear as inverted-?(question mark)] | |
| Enter any text in the box below and click "Tokenize" to see the tokens and their corresponding IDs. e.g. \"क्रिकेट हा जगभरातला आणि त्यातही भारतात विशेष लोकप्रिय असलेला खेळ आहे. त्यात यंदा क्रिकेट | |
| वर्ल्ड कप भारतात होणार असल्याने क्रिकेटरसिकांच्या उत्साहाला उधाण आलं आहे.\" | |
| """) | |
| if st.button("Tokenize"): | |
| if input_text: | |
| # Tokenize the input text | |
| tokens, token_ids = tokenize(input_text) | |
| st.write(f"Stats | Number of input characters : {len(input_text)} | Number of tokens : {len(tokens)} | Compression : {len(input_text)/len(tokens)} |" ) | |
| # Display the tokens and their IDs | |
| # Create a DataFrame for better readability | |
| df = pd.DataFrame(list(zip(tokens, token_ids)), columns=["Token", "Token ID"]) | |
| # Display the tokens and their IDs in a table | |
| st.write("Tokens and Token IDs:") | |
| st.dataframe(df) | |
| else: | |
| st.write("Please enter some text to tokenize.") | |