|
|
|
|
|
"""Corpus for tokenisation.ipynb |
|
|
|
|
|
Automatically generated by Colab. |
|
|
|
|
|
Original file is located at |
|
|
https://colab.research.google.com/drive/1_maItYuOpWMe8YAaz4nzcKT32Rvye144 |
|
|
""" |
|
|
|
|
|
continue_seamless = True |
|
|
|
|
|
|
|
|
|
|
|
"""# Tokenisation Whole (Experimenation) - 0.5b""" |
|
|
|
|
|
!pip install zstandard |
|
|
|
|
|
!pip install datasets |
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
from huggingface_hub import list_repo_files |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
!mkdir SlimPajama-627B |
|
|
|
|
|
!mkdir validation |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!git init |
|
|
!git remote add -f origin https://huggingface.co/datasets/cerebras/SlimPajama-627B |
|
|
!git config core.sparseCheckout true |
|
|
!echo "validation/*" >> .git/info/sparse-checkout |
|
|
!git pull origin main |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pwd |
|
|
|
|
|
!pip install chardet |
|
|
|
|
|
import os |
|
|
import json |
|
|
import chardet |
|
|
from tqdm.auto import tqdm |
|
|
from collections import Counter |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/validation/validation |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/validation/validation/chunk1 |
|
|
|
|
|
!ls -l /content/SlimPajama-627B/validation/validation/chunk1 | grep -v '^d' | wc -l |
|
|
|
|
|
import os |
|
|
import json |
|
|
import zstandard as zstd |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
dataset_dir = "/content/SlimPajama-627B/validation/validation" |
|
|
|
|
|
|
|
|
corpus = [] |
|
|
|
|
|
|
|
|
def read_jsonl_zst(file_path): |
|
|
with open(file_path, "rb") as f: |
|
|
dctx = zstd.ZstdDecompressor() |
|
|
with dctx.stream_reader(f) as reader: |
|
|
decompressed_data = reader.read() |
|
|
for line in decompressed_data.splitlines(): |
|
|
try: |
|
|
data = json.loads(line.decode("utf-8")) |
|
|
if isinstance(data, dict) and "text" in data: |
|
|
yield data["text"] |
|
|
except json.JSONDecodeError: |
|
|
print(f"Skipping malformed JSON in {file_path}") |
|
|
|
|
|
|
|
|
for root, _, files in tqdm(os.walk(dataset_dir)): |
|
|
for file in tqdm(files): |
|
|
file_path = os.path.join(root, file) |
|
|
|
|
|
|
|
|
if file.endswith(".jsonl.zst"): |
|
|
corpus.extend(read_jsonl_zst(file_path)) |
|
|
|
|
|
|
|
|
print(f"Extracted {len(corpus)} text entries.") |
|
|
|
|
|
corpus[0] |
|
|
|
|
|
corpus[1][-50:] |
|
|
|
|
|
corpus[2] |
|
|
|
|
|
corpus[len(corpus)//2][-50:] |
|
|
|
|
|
corpus[len(corpus)//2+1] |
|
|
|
|
|
corpus[len(corpus)//2+2] |
|
|
|
|
|
len(corpus) |
|
|
|
|
|
first_half_corpus = " ".join(corpus[:len(corpus)//2]) |
|
|
second_half_corpus = " ".join(corpus[len(corpus)//2:]) |
|
|
|
|
|
net_corpus = " ".join(corpus) |
|
|
|
|
|
|
|
|
net_corpus[:500] |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open("whole_corpus_0.5b_val.txt", "w") as file: |
|
|
file.write(net_corpus) |
|
|
|
|
|
with open("first_half_corpus_0.5b_val.txt", "w") as file: |
|
|
file.write(first_half_corpus) |
|
|
|
|
|
with open("second_half_corpus_0.5b_val.txt", "w") as file: |
|
|
file.write(second_half_corpus) |
|
|
|
|
|
with open("whole_corpus_ind_texts_0.5b_val.json", "w") as file: |
|
|
json.dump(corpus, file, indent=4) |
|
|
|
|
|
with open("first_half_corpus_ind_texts_0.5b_val.json", "w") as file: |
|
|
json.dump(corpus[:len(corpus)//2], file, indent=4) |
|
|
|
|
|
with open("second_half_corpus_ind_texts_0.5b_val.json", "w") as file: |
|
|
json.dump(corpus[len(corpus)//2:], file, indent=4) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
char_counts = Counter(net_corpus) |
|
|
|
|
|
len(char_counts) |
|
|
|
|
|
sorted_char_counts = dict(sorted(char_counts.items(), key=lambda item: item[1], reverse=True)) |
|
|
|
|
|
total_char_occ = 0 |
|
|
for i in sorted_char_counts: |
|
|
total_char_occ += sorted_char_counts[i] |
|
|
total_char_occ |
|
|
|
|
|
sorted_char_counts[' '] |
|
|
|
|
|
sorted_char_counts |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
import regex |
|
|
import re |
|
|
|
|
|
def split_text_into_words(text): |
|
|
|
|
|
words = regex.split(r'[^\p{L}\p{N}_]+', text) |
|
|
|
|
|
|
|
|
return [word for word in words if word] |
|
|
|
|
|
sp_ch_sep_toks = split_text_into_words(net_corpus) |
|
|
len(sp_ch_sep_toks) |
|
|
|
|
|
word_counts = Counter(sp_ch_sep_toks) |
|
|
|
|
|
len(word_counts) |
|
|
|
|
|
word_counts = word_counts | sorted_char_counts |
|
|
|
|
|
len(word_counts) |
|
|
|
|
|
word_counts |
|
|
|
|
|
sorted_word_counts = dict(sorted(word_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_word_counts |
|
|
|
|
|
max_len = 0 |
|
|
for word in word_counts: |
|
|
if len(word) > max_len: |
|
|
max_len = len(word) |
|
|
print(max_len) |
|
|
|
|
|
len(sorted_word_counts) |
|
|
|
|
|
word_len_count_map = {} |
|
|
for word in word_counts: |
|
|
if len(word) not in word_len_count_map: |
|
|
word_len_count_map[len(word)] = word_counts[word] |
|
|
else: |
|
|
word_len_count_map[len(word)] += word_counts[word] |
|
|
|
|
|
|
|
|
sorted_word_len_count_map = dict(sorted(word_len_count_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_word_len_count_map |
|
|
|
|
|
len(sorted_word_len_count_map) |
|
|
|
|
|
sum = 0 |
|
|
for i in sorted_word_len_count_map: |
|
|
sum += sorted_word_len_count_map[i] |
|
|
sum |
|
|
|
|
|
lengths = {} |
|
|
for word in list(sorted_word_counts.keys())[:131_072]: |
|
|
if len(word) not in lengths: |
|
|
lengths[len(word)] = [sorted_word_counts[word],sorted_word_counts[word],1] |
|
|
else: |
|
|
lengths[len(word)][0] += sorted_word_counts[word] |
|
|
lengths[len(word)][2] += 1 |
|
|
if sorted_word_counts[word] > lengths[len(word)][1]: |
|
|
lengths[len(word)][1] = sorted_word_counts[word] |
|
|
lengths |
|
|
|
|
|
len(lengths) |
|
|
|
|
|
sorted_lengths_max_ind_tok_counts = dict(sorted(lengths.items(), key=lambda item: item[1][1], reverse=True)) |
|
|
sorted_lengths_max_ind_tok_counts |
|
|
|
|
|
from collections import defaultdict |
|
|
|
|
|
def count_char_ngrams_in_words(word_list, max_n=14): |
|
|
""" |
|
|
Count character-level n-grams for each word in a list. |
|
|
|
|
|
Args: |
|
|
word_list (list): List of words to analyze |
|
|
max_n (int): Maximum size of n-grams to count (default: 14) |
|
|
|
|
|
Returns: |
|
|
dict: Nested dictionary with n as outer key and |
|
|
inner dictionaries containing n-grams and their counts |
|
|
""" |
|
|
|
|
|
ngram_counts = defaultdict(lambda: defaultdict(int)) |
|
|
|
|
|
|
|
|
for word in tqdm(word_list): |
|
|
word_length = len(word) |
|
|
|
|
|
|
|
|
for n in range(2, min(max_n + 1, word_length)): |
|
|
|
|
|
for i in range(word_length - n + 1): |
|
|
|
|
|
ngram = word[i:i+n] |
|
|
|
|
|
|
|
|
ngram_counts[n][ngram] += 1 |
|
|
|
|
|
|
|
|
return {n: dict(counts) for n, counts in ngram_counts.items()} |
|
|
|
|
|
n_gram = count_char_ngrams_in_words(sp_ch_sep_toks,max_n=15) |
|
|
|
|
|
threshold = 56_515 |
|
|
count_above_threshold = 0 |
|
|
for n in n_gram: |
|
|
for i in n_gram[n]: |
|
|
if n_gram[n][i] > threshold: |
|
|
count_above_threshold += 1 |
|
|
count_above_threshold |
|
|
|
|
|
threshold = 141_993 |
|
|
count_above_threshold = 0 |
|
|
for n in n_gram: |
|
|
for i in n_gram[n]: |
|
|
if n_gram[n][i] > threshold: |
|
|
count_above_threshold += 1 |
|
|
count_above_threshold |
|
|
|
|
|
t_sum_below = 0 |
|
|
for i in sorted_lengths_max_ind_tok_counts: |
|
|
if sorted_lengths_max_ind_tok_counts[i][1] < threshold: |
|
|
t_sum_below += sorted_lengths_max_ind_tok_counts[i][2] |
|
|
t_sum_below |
|
|
|
|
|
t_sum_below |
|
|
|
|
|
n_gram_tok = {} |
|
|
for n in tqdm(n_gram): |
|
|
for i in n_gram[n]: |
|
|
n_gram_tok[i] = n_gram[n][i] |
|
|
n_gram_tok |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(n_gram_tok.items(), key=lambda item: item[1], reverse=True)) |
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
sorted_word_counts |
|
|
|
|
|
n_gram_lengths = {} |
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(i) not in n_gram_lengths: |
|
|
n_gram_lengths[len(i)] = [sorted_n_gram_tok_counts[i],sorted_n_gram_tok_counts[i],1] |
|
|
else: |
|
|
n_gram_lengths[len(i)][0] += sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths[len(i)][2] += 1 |
|
|
if sorted_n_gram_tok_counts[i] > n_gram_lengths[len(i)][1]: |
|
|
n_gram_lengths[len(i)][1] = sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths |
|
|
|
|
|
sorted_n_gram_lengths_tok_counts = dict(sorted(n_gram_lengths.items(), key=lambda item: item[1][1], reverse=True)) |
|
|
sorted_n_gram_lengths_tok_counts |
|
|
|
|
|
sorted_lengths_max_ind_tok_counts |
|
|
|
|
|
import regex |
|
|
from collections import Counter |
|
|
|
|
|
def count_special_char_ngrams(text, max_n): |
|
|
""" |
|
|
Count occurrences of character-level ngrams (only special characters, excluding whitespace) with n > 1. |
|
|
|
|
|
Parameters: |
|
|
text (str): The input text. |
|
|
max_n (int): The highest n-gram length to consider. |
|
|
|
|
|
Returns: |
|
|
dict: A mapping of special character ngrams to their counts. |
|
|
""" |
|
|
|
|
|
special_pattern = r'[^\p{L}\p{N}_\s]+' |
|
|
|
|
|
counts = Counter() |
|
|
|
|
|
|
|
|
for match in tqdm(regex.finditer(special_pattern, text)): |
|
|
block = match.group() |
|
|
L = len(block) |
|
|
|
|
|
|
|
|
for n in range(2, min(L, max_n) + 1): |
|
|
for i in range(L - n + 1): |
|
|
ngram = block[i:i+n] |
|
|
counts[ngram] += 1 |
|
|
|
|
|
return dict(counts) |
|
|
|
|
|
sp_tok_n_grams = count_special_char_ngrams(net_corpus, 1000) |
|
|
sp_tok_n_grams |
|
|
|
|
|
sorted_sp_tok_n_grams = dict(sorted(sp_tok_n_grams.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_sp_tok_n_grams |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
sorted_sp_tok_n_grams_len_count_map = {} |
|
|
for sp_tok in sorted_sp_tok_n_grams: |
|
|
if len(sp_tok) not in sorted_sp_tok_n_grams_len_count_map: |
|
|
sorted_sp_tok_n_grams_len_count_map[len(sp_tok)] = sorted_sp_tok_n_grams[sp_tok] |
|
|
else: |
|
|
sorted_sp_tok_n_grams_len_count_map[len(sp_tok)] += sorted_sp_tok_n_grams[sp_tok] |
|
|
|
|
|
|
|
|
|
|
|
sorted_sorted_sp_tok_n_grams_len_count_map = dict(sorted(sorted_sp_tok_n_grams_len_count_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_sorted_sp_tok_n_grams_len_count_map |
|
|
|
|
|
len(sorted_sp_tok_n_grams_len_count_map) |
|
|
|
|
|
sum = 0 |
|
|
for i in sorted_sp_tok_n_grams_len_count_map: |
|
|
sum += sorted_sp_tok_n_grams_len_count_map[i] |
|
|
sum |
|
|
|
|
|
sp_token_ngram_lengths = {} |
|
|
for sp_tok in list(sorted_sp_tok_n_grams.keys())[:131_072]: |
|
|
if len(sp_tok) not in sp_token_ngram_lengths: |
|
|
sp_token_ngram_lengths[len(sp_tok)] = [sorted_sp_tok_n_grams[sp_tok],sorted_sp_tok_n_grams[sp_tok],1] |
|
|
else: |
|
|
sp_token_ngram_lengths[len(sp_tok)][0] += sorted_sp_tok_n_grams[sp_tok] |
|
|
sp_token_ngram_lengths[len(sp_tok)][2] += 1 |
|
|
if sorted_sp_tok_n_grams[sp_tok] > sp_token_ngram_lengths[len(sp_tok)][1]: |
|
|
sp_token_ngram_lengths[len(sp_tok)][1] = sorted_sp_tok_n_grams[sp_tok] |
|
|
sp_token_ngram_lengths |
|
|
|
|
|
def max_contiguous_occurrences(corpus): |
|
|
|
|
|
max_runs = {} |
|
|
|
|
|
|
|
|
current_char = corpus[0] |
|
|
current_run = 1 |
|
|
|
|
|
|
|
|
for ch in tqdm(corpus[1:]): |
|
|
if ch == current_char: |
|
|
|
|
|
current_run += 1 |
|
|
else: |
|
|
if current_char not in max_runs: |
|
|
max_runs[current_char] = current_run |
|
|
else: |
|
|
max_runs[current_char] = max(max_runs[current_char], current_run) |
|
|
|
|
|
current_char = ch |
|
|
current_run = 1 |
|
|
|
|
|
if current_char not in max_runs: |
|
|
max_runs[current_char] = current_run |
|
|
else: |
|
|
max_runs[current_char] = max(max_runs[current_char], current_run) |
|
|
|
|
|
return max_runs |
|
|
|
|
|
unique_char_list = sorted_char_counts.keys() |
|
|
|
|
|
max_occ_map = max_contiguous_occurrences(net_corpus) |
|
|
|
|
|
|
|
|
sorted_max_occ_map = dict(sorted(max_occ_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_max_occ_map |
|
|
|
|
|
sorted_word_counts |
|
|
|
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
sorted_sp_tok_n_grams |
|
|
|
|
|
len(sorted_word_counts) |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
3809463 + 49495136 |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
for i in tqdm(sorted_word_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_sp_tok_n_grams): |
|
|
if i in sorted_word_counts: |
|
|
print(i) |
|
|
|
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_word_counts): |
|
|
if i not in sorted_n_gram_tok_counts: |
|
|
sorted_n_gram_tok_counts[i] = sorted_word_counts[i] |
|
|
continue |
|
|
|
|
|
sorted_n_gram_tok_counts[i] += sorted_word_counts[i] |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
|
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_sp_tok_n_grams): |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(set(list(i))) == 1 and len(i) > 3: |
|
|
print(i, len(i)) |
|
|
|
|
|
n_gram_lengths = {} |
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(i) not in n_gram_lengths: |
|
|
n_gram_lengths[len(i)] = [sorted_n_gram_tok_counts[i],sorted_n_gram_tok_counts[i],1] |
|
|
else: |
|
|
n_gram_lengths[len(i)][0] += sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths[len(i)][2] += 1 |
|
|
if sorted_n_gram_tok_counts[i] > n_gram_lengths[len(i)][1]: |
|
|
n_gram_lengths[len(i)][1] = sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths |
|
|
|
|
|
for word in tqdm(sorted_n_gram_tok_counts): |
|
|
if word.strip(' \n').isnumeric() and sorted_n_gram_tok_counts[word] > 56_000: |
|
|
print(f"{word}:{sorted_word_counts[word]}") |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
52307513 + 696425 |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
|
|
|
sorted_n_gram_tok_counts = sorted_n_gram_tok_counts | sorted_sp_tok_n_grams |
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in list(sorted_sp_tok_n_grams.keys())[:131_072]: |
|
|
if len(set(list(i))) == 1 and len(i) > 100: |
|
|
print(i[:5], len(i), sorted_sp_tok_n_grams[i], len(i)*sorted_sp_tok_n_grams[i]) |
|
|
|
|
|
top_tokens = {} |
|
|
|
|
|
count = 0 |
|
|
c = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts.keys()): |
|
|
if count >= 131_072: |
|
|
break |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
c += 1 |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
if len(set(list(i))) > 1: |
|
|
print(i, sorted_n_gram_tok_counts[i]) |
|
|
s[0] += 1 |
|
|
if len(i) > 15: |
|
|
s[1] += 1 |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
else: |
|
|
count += 1 |
|
|
c, count, s |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
char_count = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
|
|
|
char_count += 1 |
|
|
char_count, max_length |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**2), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**3), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_quad = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**4), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_quad.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_quad.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)* list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0])))))), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)* list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2/3), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
def longest_same_char_sequence(s: str) -> int: |
|
|
if not s: |
|
|
return 0 |
|
|
|
|
|
max_length = 1 |
|
|
current_length = 1 |
|
|
|
|
|
for i in range(1, len(s)): |
|
|
if s[i] == s[i - 1]: |
|
|
current_length += 1 |
|
|
else: |
|
|
max_length = max(max_length, current_length) |
|
|
current_length = 1 |
|
|
|
|
|
return max(max_length, current_length) |
|
|
|
|
|
longest_same_char_sequence('\\\\\\\\\\\\\\\\\\\\\\') |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(3/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
class SuffixAutomaton: |
|
|
def __init__(self, s): |
|
|
self.states = [{'len': 0, 'link': -1, 'next': {}, 'occ': 0}] |
|
|
self.last = 0 |
|
|
for ch in s: |
|
|
self.add_char(ch) |
|
|
self.propagate_occurrences() |
|
|
|
|
|
def add_char(self, ch): |
|
|
p = self.last |
|
|
curr = len(self.states) |
|
|
|
|
|
self.states.append({'len': self.states[p]['len'] + 1, 'link': 0, 'next': {}, 'occ': 1}) |
|
|
while p != -1 and ch not in self.states[p]['next']: |
|
|
self.states[p]['next'][ch] = curr |
|
|
p = self.states[p]['link'] |
|
|
if p == -1: |
|
|
self.states[curr]['link'] = 0 |
|
|
else: |
|
|
q = self.states[p]['next'][ch] |
|
|
if self.states[p]['len'] + 1 == self.states[q]['len']: |
|
|
self.states[curr]['link'] = q |
|
|
else: |
|
|
clone = len(self.states) |
|
|
self.states.append({ |
|
|
'len': self.states[p]['len'] + 1, |
|
|
'next': self.states[q]['next'].copy(), |
|
|
'link': self.states[q]['link'], |
|
|
'occ': 0 |
|
|
}) |
|
|
while p != -1 and self.states[p]['next'].get(ch) == q: |
|
|
self.states[p]['next'][ch] = clone |
|
|
p = self.states[p]['link'] |
|
|
self.states[q]['link'] = self.states[curr]['link'] = clone |
|
|
self.last = curr |
|
|
|
|
|
def propagate_occurrences(self): |
|
|
|
|
|
order = sorted(range(len(self.states)), key=lambda i: self.states[i]['len'], reverse=True) |
|
|
for i in order: |
|
|
link = self.states[i]['link'] |
|
|
if link != -1: |
|
|
self.states[link]['occ'] += self.states[i]['occ'] |
|
|
|
|
|
def max_substring_value(s): |
|
|
n = len(s) |
|
|
if n < 2: |
|
|
return 1 |
|
|
sa = SuffixAutomaton(s) |
|
|
max_val = 0 |
|
|
|
|
|
for state in sa.states: |
|
|
if state['len'] >= 2: |
|
|
effective_length = min(state['len'], n - 1) |
|
|
product = state['occ'] * effective_length |
|
|
if product > max_val: |
|
|
max_val = product |
|
|
return max_val if max_val > 0 else 1 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_net_smart_3 = {} |
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
sorted_n_gram_tok_counts_anti_length_net_smart_3[i] = [sorted_n_gram_tok_counts[i], (((((((len(i)**2)*longest_same_char_sequence(i)*max_substring_value(i)*list(Counter(list(i)).items())[0][1]))))/(len(set(list(i))))))] |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(3/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(2/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(3/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(2/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(1/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(1/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(1/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(1.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
counts = [] |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2): |
|
|
counts.append(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2[i]) |
|
|
counts = np.array(counts) |
|
|
|
|
|
counts2 = [] |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3): |
|
|
counts2.append(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3[i]) |
|
|
counts2 = np.array(counts2) |
|
|
|
|
|
counts2 |
|
|
|
|
|
counts2_ = counts2[:,0] / (counts2[:,1] ** (3/6)) |
|
|
counts2_ |
|
|
|
|
|
counts = counts[:131_072] |
|
|
counts2_ = counts2_[:131_072] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
counts = counts - counts.mean() |
|
|
|
|
|
counts2_ = counts2_ - counts2_.mean() |
|
|
|
|
|
counts.std() |
|
|
|
|
|
counts.mean() |
|
|
|
|
|
counts2_.mean() |
|
|
|
|
|
counts2_.std() |
|
|
|
|
|
scale = counts.std() / counts2_.std() |
|
|
scale |
|
|
|
|
|
scale = 1 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart = {} |
|
|
ind = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]): |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] = counts[ind] |
|
|
|
|
|
ind = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]): |
|
|
if ind in sorted_n_gram_tok_counts_anti_length_hybrid_smart: |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] += scale* 0.99994575 * counts2_[ind] |
|
|
else: |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] = scale* 0.99994575 * counts2_[ind] |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_hybrid_smart.items(), key=lambda x: x[1], reverse=True))) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
!wget https://huggingface.co/Qwen/QwQ-32B/resolve/main/tokenizer.json |
|
|
|
|
|
import json |
|
|
with open('tokenizer.json', 'r') as file: |
|
|
data = json.load(file) |
|
|
|
|
|
data.keys() |
|
|
|
|
|
len(data['model']['vocab']) |
|
|
|
|
|
sim = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())): |
|
|
if i in data['model']['vocab'].keys(): |
|
|
sim += 1 |
|
|
sim |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(data['model']['vocab'].keys()): |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(data['model']['vocab'].keys()): |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
from collections import Counter |
|
|
import re |
|
|
|
|
|
def count_overlapping_space_ngrams(text): |
|
|
|
|
|
space_sequences = re.findall(r' +', text) |
|
|
ngram_counter = Counter() |
|
|
|
|
|
|
|
|
for seq in tqdm(space_sequences): |
|
|
k = len(seq) |
|
|
|
|
|
for n in range(1, k + 1): |
|
|
|
|
|
ngram_counter[' ' * n] += (k - n + 1) |
|
|
|
|
|
|
|
|
sorted_ngrams = sorted(ngram_counter.items(), key=lambda x: len(x[0])) |
|
|
return sorted_ngrams |
|
|
|
|
|
space_counts = count_overlapping_space_ngrams(net_corpus) |
|
|
space_counts |
|
|
|
|
|
space_counts_dict = {} |
|
|
for i in space_counts: |
|
|
if i != ' ': |
|
|
space_counts_dict[i[0]] = i[1] |
|
|
space_counts_dict |
|
|
|
|
|
sorted_space_counts_dict = dict(tqdm(sorted(space_counts_dict.items(), key=lambda x: x[1], reverse=True))) |
|
|
|
|
|
sorted_space_counts_dict |
|
|
|
|
|
len(space_counts_dict) |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in space_counts_dict: |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in space_counts_dict: |
|
|
sorted_n_gram_tok_counts[i] = space_counts_dict[i] |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_char_counts |
|
|
|
|
|
len(sorted_char_counts) |
|
|
|
|
|
top_131k_vals_final_app_with_sp_no_ind = {} |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if i not in sorted_char_counts: |
|
|
top_131k_vals_final_app_with_sp_no_ind[i] = sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp[i] |
|
|
|
|
|
len(top_131k_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
top_131k_vals_final_app_with_sp_and_ind = sorted_char_counts.copy() |
|
|
j = len(top_131k_vals_final_app_with_sp_and_ind) |
|
|
for i in top_131k_vals_final_app_with_sp_no_ind: |
|
|
if j>=131_072: |
|
|
break |
|
|
top_131k_vals_final_app_with_sp_and_ind[i] = top_131k_vals_final_app_with_sp_no_ind[i] |
|
|
j += 1 |
|
|
|
|
|
len(top_131k_vals_final_app_with_sp_and_ind) |
|
|
|
|
|
sorted_top_131k_vals_final_app_with_sp_and_ind = dict(tqdm(sorted(top_131k_vals_final_app_with_sp_and_ind.items(), key=lambda x: x[1], reverse=True))) |
|
|
sorted_top_131k_vals_final_app_with_sp_and_ind |
|
|
|
|
|
def save_dict_to_json(data, filename): |
|
|
"""Saves a dictionary to a JSON file.""" |
|
|
with open(filename, 'w', encoding='utf-8') as f: |
|
|
json.dump(data, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
keys = list(sorted_top_131k_vals_final_app_with_sp_and_ind.keys()) |
|
|
|
|
|
ordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
ordered_tokeniser[i] = id |
|
|
|
|
|
import random |
|
|
random.shuffle(keys) |
|
|
|
|
|
unordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
unordered_tokeniser[i] = id |
|
|
|
|
|
unordered_tokeniser |
|
|
|
|
|
ordered_tokeniser |
|
|
|
|
|
save_dict_to_json(sorted_top_131k_vals_final_app_with_sp_and_ind, "count_tokenizer_0.5b_val_data_raw.json") |
|
|
|
|
|
save_dict_to_json(ordered_tokeniser, "ordered_tokenizer_0.5b_val_data_raw.json") |
|
|
|
|
|
save_dict_to_json(unordered_tokeniser, "unordered_tokenizer_0.5b_val_data_raw.json") |
|
|
|
|
|
sorted_n_gram_tok_counts['#'] |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_top_131k_vals_final_app_with_sp_and_ind.keys())[:131_072]: |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
else: |
|
|
if sorted_char_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_char_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
try: |
|
|
print(list(sorted_n_gram_tok_counts.keys()).index(last_i)) |
|
|
except: |
|
|
pass |
|
|
|
|
|
top_all_vals_final_app_with_sp_no_ind = {} |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys()): |
|
|
if i not in sorted_char_counts: |
|
|
top_all_vals_final_app_with_sp_no_ind[i] = sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp[i] |
|
|
|
|
|
len(top_all_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
for i in tqdm(top_all_vals_final_app_with_sp_no_ind): |
|
|
if i in sorted_char_counts: |
|
|
print(i) |
|
|
|
|
|
print(list(sorted_char_counts.keys())[-1]) |
|
|
|
|
|
len(sorted_char_counts) |
|
|
|
|
|
net_corpus_keys_order = list(sorted_char_counts.keys()) |
|
|
net_corpus_keys_order.extend(top_all_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
net_corpus_keys_order |
|
|
|
|
|
net_corpus = sorted_char_counts | top_all_vals_final_app_with_sp_no_ind |
|
|
net_corpus |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
save_dict_to_json(net_corpus, "net_corpus_0.5b_val.json") |
|
|
|
|
|
save_dict_to_json(sorted_char_counts, "sorted_char_counts_0.5b_val.json") |
|
|
|
|
|
save_dict_to_json(top_all_vals_final_app_with_sp_no_ind, "top_all_vals_final_app_with_sp_no_ind_0.5b_val.json") |
|
|
|
|
|
with open("net_corpus_keys_order_0.5b_val.txt", 'w') as file: |
|
|
for item in tqdm(net_corpus_keys_order): |
|
|
file.write(str(item) + '\n') |
|
|
|
|
|
!git clone https://github.com/Tasmay-Tibrewal/tokenizer.git |
|
|
|
|
|
import json |
|
|
|
|
|
if not continue_seamless: |
|
|
with open("tokenizer/net_corpus_0.5b_val.json", "r", encoding="utf-8") as file: |
|
|
net_corpus = json.load(file) |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
net_corpus |
|
|
|
|
|
from tqdm.auto import tqdm |
|
|
|
|
|
net_corpus_keys_pos = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
net_corpus_keys_pos[i] = id |
|
|
net_corpus_keys_pos |
|
|
|
|
|
len(net_corpus_keys_pos) |
|
|
|
|
|
mean_len_term = 0 |
|
|
for i in net_corpus: |
|
|
mean_len_term += len(i) * (len(i) + 1) / 2 |
|
|
mean_len_term /= len(net_corpus) |
|
|
mean_len_term |
|
|
|
|
|
total_its = mean_len_term*len(net_corpus) |
|
|
round(total_its) |
|
|
|
|
|
parent_strings_dict = {} |
|
|
for id, i in tqdm(enumerate(net_corpus)): |
|
|
if len(i) == 1: |
|
|
if i not in parent_strings_dict: |
|
|
parent_strings_dict[i] = [id] |
|
|
else: |
|
|
parent_strings_dict[i].append(id) |
|
|
continue |
|
|
elif len(i)>50 or net_corpus[i] < 10: |
|
|
continue |
|
|
for j in range(len(i)): |
|
|
for k in range(j + 1, len(i) + 1): |
|
|
sub_word = i[j:k] |
|
|
if sub_word not in parent_strings_dict: |
|
|
parent_strings_dict[sub_word] = [id] |
|
|
else: |
|
|
parent_strings_dict[sub_word].append(id) |
|
|
|
|
|
parent_strings_dict |
|
|
|
|
|
len(parent_strings_dict) |
|
|
|
|
|
keys = list(net_corpus.keys()) |
|
|
keys |
|
|
|
|
|
def test_max_num(max_parent_id): |
|
|
rem_counts = 0 |
|
|
single_max_occ = 0 |
|
|
total_added = 0 |
|
|
last_occ = 0 |
|
|
max_parent_id_occ = 0 |
|
|
min_parent_id_occ_outside = -1 |
|
|
net_corpus_small_adj = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
if total_added >= 131_072: |
|
|
break |
|
|
elif total_added % 10_000 == 0: |
|
|
print(f"{total_added/1000:.0f}K ", end="") |
|
|
if i not in parent_strings_dict: |
|
|
print(f"'{i[:50]}'", len(i), net_corpus[i]) |
|
|
continue |
|
|
single_occ = 0 |
|
|
add = 1 |
|
|
if len(i) > 1: |
|
|
for parent_id in parent_strings_dict[i]: |
|
|
parent = keys[parent_id] |
|
|
if len(i) > len(parent): |
|
|
print("Some issue, less or same length",i, parent) |
|
|
continue |
|
|
elif len(i) == len(parent) and i == parent: |
|
|
single_occ += 1 |
|
|
continue |
|
|
elif len(i) == len(parent): |
|
|
print("Child parent length same but child != parent", i, parent) |
|
|
continue |
|
|
if net_corpus[parent] > net_corpus[i] * 0.7 : |
|
|
if parent_id < max_parent_id: |
|
|
if parent_id > max_parent_id_occ: |
|
|
max_parent_id_occ = parent_id |
|
|
single_occ += 1 |
|
|
rem_counts += 1 |
|
|
add = 0 |
|
|
break |
|
|
else: |
|
|
if min_parent_id_occ_outside == -1: |
|
|
min_parent_id_occ_outside = parent_id |
|
|
if parent_id < min_parent_id_occ_outside: |
|
|
min_parent_id_occ_outside = parent_id |
|
|
if add == 1: |
|
|
last_occ = id |
|
|
total_added += 1 |
|
|
net_corpus_small_adj[i] = net_corpus[i] |
|
|
if single_occ > single_max_occ: |
|
|
single_max_occ = single_occ |
|
|
return last_occ, max_parent_id_occ, min_parent_id_occ_outside |
|
|
|
|
|
max_parent_id = len(net_corpus) |
|
|
min = 131_072 |
|
|
max = max_parent_id |
|
|
it = 0 |
|
|
while True: |
|
|
last_occ, max_parent_id_occ, min_parent_id_occ_outside = test_max_num(max_parent_id) |
|
|
print(f"\nIt: {it}, Threshold: {max_parent_id}, Looped till: {last_occ}") |
|
|
print(f"Inner max id: {max_parent_id_occ}, Outer min id: {min_parent_id_occ_outside}") |
|
|
print(f"Id b/w inner max and outer min: {(last_occ < min_parent_id_occ_outside and last_occ > max_parent_id_occ)}") |
|
|
if last_occ < min_parent_id_occ_outside and last_occ > max_parent_id_occ: |
|
|
break |
|
|
if last_occ < max_parent_id: |
|
|
max = last_occ |
|
|
elif last_occ > max_parent_id: |
|
|
min = last_occ |
|
|
max_parent_id = (min + max)/2 |
|
|
print(f"New overall min: {min}, max: {max}\nNew Max parent id: {max_parent_id}\n{'='*40}\n\n") |
|
|
|
|
|
max_parent_id |
|
|
|
|
|
import math |
|
|
max_parent_id = math.ceil(max_parent_id) |
|
|
max_parent_id |
|
|
|
|
|
rem_counts = 0 |
|
|
single_max_occ = 0 |
|
|
total_added = 0 |
|
|
last_occ = 0 |
|
|
net_corpus_small_adj = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
if total_added >= 131_072: |
|
|
break |
|
|
elif total_added % 10_000 == 0: |
|
|
print(f"{total_added/1000:.0f}K ", end="") |
|
|
if i not in parent_strings_dict: |
|
|
print(f"'{i[:50]}'", len(i), net_corpus[i]) |
|
|
continue |
|
|
single_occ = 0 |
|
|
add = 1 |
|
|
if len(i) > 1: |
|
|
for parent_id in parent_strings_dict[i]: |
|
|
parent = keys[parent_id] |
|
|
if len(i) > len(parent): |
|
|
print("Some issue, less or same length",i, parent) |
|
|
continue |
|
|
elif len(i) == len(parent) and i == parent: |
|
|
single_occ += 1 |
|
|
continue |
|
|
elif len(i) == len(parent): |
|
|
print("Child parent length same but child != parent", i, parent) |
|
|
continue |
|
|
if net_corpus[parent] > net_corpus[i] * 0.7 : |
|
|
if parent_id < max_parent_id: |
|
|
|
|
|
|
|
|
single_occ += 1 |
|
|
rem_counts += 1 |
|
|
add = 0 |
|
|
break |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if add == 1: |
|
|
last_occ = id |
|
|
total_added += 1 |
|
|
net_corpus_small_adj[i] = net_corpus[i] |
|
|
if single_occ > single_max_occ: |
|
|
single_max_occ = single_occ |
|
|
|
|
|
single_max_occ |
|
|
|
|
|
last_occ |
|
|
|
|
|
total_added |
|
|
|
|
|
rem_counts |
|
|
|
|
|
rem_counts + total_added, rem_counts + total_added == last_occ + 1 |
|
|
|
|
|
len(net_corpus_small_adj) |
|
|
|
|
|
net_corpus_small_adj |
|
|
|
|
|
first_index = -1 |
|
|
for id, i in tqdm(enumerate(net_corpus)): |
|
|
if id > last_occ: |
|
|
break |
|
|
if len(i) == 1: |
|
|
if i not in parent_strings_dict: |
|
|
pass |
|
|
else: |
|
|
pass |
|
|
continue |
|
|
elif len(i)>50 or net_corpus[i] < 10: |
|
|
if first_index == -1: |
|
|
first_index = id |
|
|
print(id, len(i), net_corpus[i], i) |
|
|
continue |
|
|
else: |
|
|
pass |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = net_corpus[' '] |
|
|
last_i = None |
|
|
for i in list(net_corpus_small_adj.keys())[:131_072]: |
|
|
if net_corpus[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = net_corpus[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(net_corpus_small_adj.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
net_corpus['ља'] |
|
|
|
|
|
net_corpus[last_i] |
|
|
|
|
|
sorted_net_corpus_small_adj = dict(tqdm(sorted(net_corpus_small_adj.items(), key=lambda x: x[1], reverse=True))) |
|
|
sorted_net_corpus_small_adj |
|
|
|
|
|
keys = list(net_corpus_small_adj.keys()) |
|
|
|
|
|
ordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
ordered_tokeniser[i] = id |
|
|
|
|
|
import random |
|
|
random.shuffle(keys) |
|
|
|
|
|
unordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
unordered_tokeniser[i] = id |
|
|
|
|
|
ordered_tokeniser |
|
|
|
|
|
unordered_tokeniser |
|
|
|
|
|
def save_dict_to_json(data, filename): |
|
|
"""Saves a dictionary to a JSON file.""" |
|
|
with open(filename, 'w', encoding='utf-8') as f: |
|
|
json.dump(data, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
save_dict_to_json(sorted_net_corpus_small_adj, "count_tokenizer_0.5b_val_data.json") |
|
|
|
|
|
save_dict_to_json(ordered_tokeniser, "ordered_tokenizer_0.5b_val_data.json") |
|
|
|
|
|
save_dict_to_json(unordered_tokeniser, "unordered_tokenizer_0.5b_val_data.json") |
|
|
|
|
|
"""# Tokenisation Whole (Experimenation) - 1b""" |
|
|
|
|
|
!pip install zstandard |
|
|
|
|
|
!pip install datasets |
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
from huggingface_hub import list_repo_files |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
!mkdir SlimPajama-627B |
|
|
|
|
|
!mkdir test |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!git init |
|
|
!git remote add -f origin https://huggingface.co/datasets/cerebras/SlimPajama-627B |
|
|
!git config core.sparseCheckout true |
|
|
!echo "test/*" >> .git/info/sparse-checkout |
|
|
!git pull origin main |
|
|
|
|
|
|
|
|
|
|
|
!mkdir validation |
|
|
|
|
|
!git init |
|
|
!git remote add -f origin https://huggingface.co/datasets/cerebras/SlimPajama-627B |
|
|
!git config core.sparseCheckout true |
|
|
!echo "validation/*" >> .git/info/sparse-checkout |
|
|
!git pull origin main |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pwd |
|
|
|
|
|
!pip install chardet |
|
|
|
|
|
import os |
|
|
import json |
|
|
import chardet |
|
|
from tqdm.auto import tqdm |
|
|
from collections import Counter |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/test/test |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/test/test/chunk1 |
|
|
|
|
|
!ls -l /content/SlimPajama-627B/test/test/chunk1 | grep -v '^d' | wc -l |
|
|
|
|
|
import os |
|
|
import json |
|
|
import zstandard as zstd |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
dataset_dir = "/content/SlimPajama-627B/test/test" |
|
|
|
|
|
|
|
|
corpus = [] |
|
|
|
|
|
|
|
|
def read_jsonl_zst(file_path): |
|
|
with open(file_path, "rb") as f: |
|
|
dctx = zstd.ZstdDecompressor() |
|
|
with dctx.stream_reader(f) as reader: |
|
|
decompressed_data = reader.read() |
|
|
for line in decompressed_data.splitlines(): |
|
|
try: |
|
|
data = json.loads(line.decode("utf-8")) |
|
|
if isinstance(data, dict) and "text" in data: |
|
|
yield data["text"] |
|
|
except json.JSONDecodeError: |
|
|
print(f"Skipping malformed JSON in {file_path}") |
|
|
|
|
|
|
|
|
for root, _, files in tqdm(os.walk(dataset_dir)): |
|
|
for file in tqdm(files): |
|
|
file_path = os.path.join(root, file) |
|
|
|
|
|
|
|
|
if file.endswith(".jsonl.zst"): |
|
|
corpus.extend(read_jsonl_zst(file_path)) |
|
|
|
|
|
|
|
|
print(f"Extracted {len(corpus)} text entries.") |
|
|
|
|
|
len(corpus) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pwd |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/validation/validation |
|
|
|
|
|
!ls -lh /content/SlimPajama-627B/validation/validation/chunk1 |
|
|
|
|
|
!ls -l /content/SlimPajama-627B/validation/validation/chunk1 | grep -v '^d' | wc -l |
|
|
|
|
|
|
|
|
dataset_dir = "/content/SlimPajama-627B/validation/validation" |
|
|
|
|
|
|
|
|
def read_jsonl_zst(file_path): |
|
|
with open(file_path, "rb") as f: |
|
|
dctx = zstd.ZstdDecompressor() |
|
|
with dctx.stream_reader(f) as reader: |
|
|
decompressed_data = reader.read() |
|
|
for line in decompressed_data.splitlines(): |
|
|
try: |
|
|
data = json.loads(line.decode("utf-8")) |
|
|
if isinstance(data, dict) and "text" in data: |
|
|
yield data["text"] |
|
|
except json.JSONDecodeError: |
|
|
print(f"Skipping malformed JSON in {file_path}") |
|
|
|
|
|
|
|
|
for root, _, files in tqdm(os.walk(dataset_dir)): |
|
|
for file in tqdm(files): |
|
|
file_path = os.path.join(root, file) |
|
|
|
|
|
|
|
|
if file.endswith(".jsonl.zst"): |
|
|
corpus.extend(read_jsonl_zst(file_path)) |
|
|
|
|
|
|
|
|
print(f"Extracted {len(corpus)} text entries.") |
|
|
|
|
|
len(corpus) |
|
|
|
|
|
corpus[0] |
|
|
|
|
|
corpus[1][-50:] |
|
|
|
|
|
corpus[2] |
|
|
|
|
|
corpus[len(corpus)//2][-50:] |
|
|
|
|
|
corpus[len(corpus)//2+1] |
|
|
|
|
|
corpus[len(corpus)//2+2] |
|
|
|
|
|
len(corpus) |
|
|
|
|
|
first_quarter_corpus = " ".join(corpus[:len(corpus)//4]) |
|
|
second_quarter_corpus = " ".join(corpus[len(corpus)//4:2*(len(corpus)//4)]) |
|
|
third_quarter_corpus = " ".join(corpus[2*(len(corpus)//4):3*(len(corpus)//4)]) |
|
|
fourth_quarter_corpus = " ".join(corpus[3*(len(corpus)//4):]) |
|
|
|
|
|
net_corpus = " ".join(corpus) |
|
|
|
|
|
|
|
|
net_corpus[:500] |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open("whole_corpus_1b_val_test.txt", "w") as file: |
|
|
file.write(net_corpus) |
|
|
|
|
|
with open("first_quarter_corpus_1b_val_test.txt", "w") as file: |
|
|
file.write(first_quarter_corpus) |
|
|
|
|
|
with open("second_quarter_corpus_1b_val_test.txt", "w") as file: |
|
|
file.write(second_quarter_corpus) |
|
|
|
|
|
with open("third_quarter_corpus_1b_val_test.txt", "w") as file: |
|
|
file.write(third_quarter_corpus) |
|
|
|
|
|
with open("fourth_quarter_corpus_1b_val_test.txt", "w") as file: |
|
|
file.write(fourth_quarter_corpus) |
|
|
|
|
|
with open("whole_corpus_ind_texts_1b_val_test.json", "w") as file: |
|
|
json.dump(corpus, file, indent=4) |
|
|
|
|
|
with open("first_quarter_corpus_ind_texts_1b_val_test.json", "w") as file: |
|
|
json.dump(corpus[:len(corpus)//4], file, indent=4) |
|
|
|
|
|
with open("second_quarter_corpus_ind_texts_1b_val_test.json", "w") as file: |
|
|
json.dump(corpus[len(corpus)//4:2*(len(corpus)//4)], file, indent=4) |
|
|
|
|
|
with open("third_quarter_corpus_ind_texts_1b_val_test.json", "w") as file: |
|
|
json.dump(corpus[2*(len(corpus)//4):3*(len(corpus)//4)], file, indent=4) |
|
|
|
|
|
with open("fourth_quarter_corpus_ind_texts_1b_val_test.json", "w") as file: |
|
|
json.dump(corpus[3*(len(corpus)//4):], file, indent=4) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
char_counts = Counter(net_corpus) |
|
|
|
|
|
len(char_counts) |
|
|
|
|
|
sorted_char_counts = dict(sorted(char_counts.items(), key=lambda item: item[1], reverse=True)) |
|
|
|
|
|
total_char_occ = 0 |
|
|
for i in sorted_char_counts: |
|
|
total_char_occ += sorted_char_counts[i] |
|
|
total_char_occ |
|
|
|
|
|
sorted_char_counts[' '] |
|
|
|
|
|
sorted_char_counts |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
import regex |
|
|
import re |
|
|
|
|
|
def split_text_into_words(text): |
|
|
|
|
|
words = regex.split(r'[^\p{L}\p{N}_]+', text) |
|
|
|
|
|
|
|
|
return [word for word in words if word] |
|
|
|
|
|
sp_ch_sep_toks = split_text_into_words(net_corpus) |
|
|
len(sp_ch_sep_toks) |
|
|
|
|
|
word_counts = Counter(sp_ch_sep_toks) |
|
|
|
|
|
len(word_counts) |
|
|
|
|
|
word_counts = word_counts | sorted_char_counts |
|
|
|
|
|
len(word_counts) |
|
|
|
|
|
word_counts |
|
|
|
|
|
sorted_word_counts = dict(sorted(word_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_word_counts |
|
|
|
|
|
max_len = 0 |
|
|
for word in word_counts: |
|
|
if len(word) > max_len: |
|
|
max_len = len(word) |
|
|
print(max_len) |
|
|
|
|
|
len(sorted_word_counts) |
|
|
|
|
|
word_len_count_map = {} |
|
|
for word in word_counts: |
|
|
if len(word) not in word_len_count_map: |
|
|
word_len_count_map[len(word)] = word_counts[word] |
|
|
else: |
|
|
word_len_count_map[len(word)] += word_counts[word] |
|
|
|
|
|
|
|
|
sorted_word_len_count_map = dict(sorted(word_len_count_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_word_len_count_map |
|
|
|
|
|
len(sorted_word_len_count_map) |
|
|
|
|
|
sum = 0 |
|
|
for i in sorted_word_len_count_map: |
|
|
sum += sorted_word_len_count_map[i] |
|
|
sum |
|
|
|
|
|
lengths = {} |
|
|
for word in list(sorted_word_counts.keys())[:131_072]: |
|
|
if len(word) not in lengths: |
|
|
lengths[len(word)] = [sorted_word_counts[word],sorted_word_counts[word],1] |
|
|
else: |
|
|
lengths[len(word)][0] += sorted_word_counts[word] |
|
|
lengths[len(word)][2] += 1 |
|
|
if sorted_word_counts[word] > lengths[len(word)][1]: |
|
|
lengths[len(word)][1] = sorted_word_counts[word] |
|
|
lengths |
|
|
|
|
|
len(lengths) |
|
|
|
|
|
sorted_lengths_max_ind_tok_counts = dict(sorted(lengths.items(), key=lambda item: item[1][1], reverse=True)) |
|
|
sorted_lengths_max_ind_tok_counts |
|
|
|
|
|
from collections import defaultdict |
|
|
|
|
|
def count_char_ngrams_in_words(word_list, max_n=14): |
|
|
""" |
|
|
Count character-level n-grams for each word in a list. |
|
|
|
|
|
Args: |
|
|
word_list (list): List of words to analyze |
|
|
max_n (int): Maximum size of n-grams to count (default: 14) |
|
|
|
|
|
Returns: |
|
|
dict: Nested dictionary with n as outer key and |
|
|
inner dictionaries containing n-grams and their counts |
|
|
""" |
|
|
|
|
|
ngram_counts = defaultdict(lambda: defaultdict(int)) |
|
|
|
|
|
|
|
|
for word in tqdm(word_list): |
|
|
word_length = len(word) |
|
|
|
|
|
|
|
|
for n in range(2, min(max_n + 1, word_length)): |
|
|
|
|
|
for i in range(word_length - n + 1): |
|
|
|
|
|
ngram = word[i:i+n] |
|
|
|
|
|
|
|
|
ngram_counts[n][ngram] += 1 |
|
|
|
|
|
|
|
|
return {n: dict(counts) for n, counts in ngram_counts.items()} |
|
|
|
|
|
n_gram = count_char_ngrams_in_words(sp_ch_sep_toks,max_n=15) |
|
|
|
|
|
threshold = 56_515 |
|
|
count_above_threshold = 0 |
|
|
for n in n_gram: |
|
|
for i in n_gram[n]: |
|
|
if n_gram[n][i] > threshold: |
|
|
count_above_threshold += 1 |
|
|
count_above_threshold |
|
|
|
|
|
threshold = 141_993 |
|
|
count_above_threshold = 0 |
|
|
for n in n_gram: |
|
|
for i in n_gram[n]: |
|
|
if n_gram[n][i] > threshold: |
|
|
count_above_threshold += 1 |
|
|
count_above_threshold |
|
|
|
|
|
t_sum_below = 0 |
|
|
for i in sorted_lengths_max_ind_tok_counts: |
|
|
if sorted_lengths_max_ind_tok_counts[i][1] < threshold: |
|
|
t_sum_below += sorted_lengths_max_ind_tok_counts[i][2] |
|
|
t_sum_below |
|
|
|
|
|
t_sum_below |
|
|
|
|
|
n_gram_tok = {} |
|
|
for n in tqdm(n_gram): |
|
|
for i in n_gram[n]: |
|
|
n_gram_tok[i] = n_gram[n][i] |
|
|
n_gram_tok |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(n_gram_tok.items(), key=lambda item: item[1], reverse=True)) |
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
sorted_word_counts |
|
|
|
|
|
n_gram_lengths = {} |
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(i) not in n_gram_lengths: |
|
|
n_gram_lengths[len(i)] = [sorted_n_gram_tok_counts[i],sorted_n_gram_tok_counts[i],1] |
|
|
else: |
|
|
n_gram_lengths[len(i)][0] += sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths[len(i)][2] += 1 |
|
|
if sorted_n_gram_tok_counts[i] > n_gram_lengths[len(i)][1]: |
|
|
n_gram_lengths[len(i)][1] = sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths |
|
|
|
|
|
sorted_n_gram_lengths_tok_counts = dict(sorted(n_gram_lengths.items(), key=lambda item: item[1][1], reverse=True)) |
|
|
sorted_n_gram_lengths_tok_counts |
|
|
|
|
|
sorted_lengths_max_ind_tok_counts |
|
|
|
|
|
import regex |
|
|
from collections import Counter |
|
|
|
|
|
def count_special_char_ngrams(text, max_n): |
|
|
""" |
|
|
Count occurrences of character-level ngrams (only special characters, excluding whitespace) with n > 1. |
|
|
|
|
|
Parameters: |
|
|
text (str): The input text. |
|
|
max_n (int): The highest n-gram length to consider. |
|
|
|
|
|
Returns: |
|
|
dict: A mapping of special character ngrams to their counts. |
|
|
""" |
|
|
|
|
|
special_pattern = r'[^\p{L}\p{N}_\s]+' |
|
|
|
|
|
counts = Counter() |
|
|
|
|
|
|
|
|
for match in tqdm(regex.finditer(special_pattern, text)): |
|
|
block = match.group() |
|
|
L = len(block) |
|
|
|
|
|
|
|
|
for n in range(2, min(L, max_n) + 1): |
|
|
for i in range(L - n + 1): |
|
|
ngram = block[i:i+n] |
|
|
counts[ngram] += 1 |
|
|
|
|
|
return dict(counts) |
|
|
|
|
|
sp_tok_n_grams = count_special_char_ngrams(net_corpus, 1000) |
|
|
sp_tok_n_grams |
|
|
|
|
|
sorted_sp_tok_n_grams = dict(sorted(sp_tok_n_grams.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_sp_tok_n_grams |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
sorted_sp_tok_n_grams_len_count_map = {} |
|
|
for sp_tok in sorted_sp_tok_n_grams: |
|
|
if len(sp_tok) not in sorted_sp_tok_n_grams_len_count_map: |
|
|
sorted_sp_tok_n_grams_len_count_map[len(sp_tok)] = sorted_sp_tok_n_grams[sp_tok] |
|
|
else: |
|
|
sorted_sp_tok_n_grams_len_count_map[len(sp_tok)] += sorted_sp_tok_n_grams[sp_tok] |
|
|
|
|
|
|
|
|
|
|
|
sorted_sorted_sp_tok_n_grams_len_count_map = dict(sorted(sorted_sp_tok_n_grams_len_count_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_sorted_sp_tok_n_grams_len_count_map |
|
|
|
|
|
len(sorted_sp_tok_n_grams_len_count_map) |
|
|
|
|
|
sum = 0 |
|
|
for i in sorted_sp_tok_n_grams_len_count_map: |
|
|
sum += sorted_sp_tok_n_grams_len_count_map[i] |
|
|
sum |
|
|
|
|
|
sp_token_ngram_lengths = {} |
|
|
for sp_tok in list(sorted_sp_tok_n_grams.keys())[:131_072]: |
|
|
if len(sp_tok) not in sp_token_ngram_lengths: |
|
|
sp_token_ngram_lengths[len(sp_tok)] = [sorted_sp_tok_n_grams[sp_tok],sorted_sp_tok_n_grams[sp_tok],1] |
|
|
else: |
|
|
sp_token_ngram_lengths[len(sp_tok)][0] += sorted_sp_tok_n_grams[sp_tok] |
|
|
sp_token_ngram_lengths[len(sp_tok)][2] += 1 |
|
|
if sorted_sp_tok_n_grams[sp_tok] > sp_token_ngram_lengths[len(sp_tok)][1]: |
|
|
sp_token_ngram_lengths[len(sp_tok)][1] = sorted_sp_tok_n_grams[sp_tok] |
|
|
sp_token_ngram_lengths |
|
|
|
|
|
def max_contiguous_occurrences(corpus): |
|
|
|
|
|
max_runs = {} |
|
|
|
|
|
|
|
|
current_char = corpus[0] |
|
|
current_run = 1 |
|
|
|
|
|
|
|
|
for ch in tqdm(corpus[1:]): |
|
|
if ch == current_char: |
|
|
|
|
|
current_run += 1 |
|
|
else: |
|
|
if current_char not in max_runs: |
|
|
max_runs[current_char] = current_run |
|
|
else: |
|
|
max_runs[current_char] = max(max_runs[current_char], current_run) |
|
|
|
|
|
current_char = ch |
|
|
current_run = 1 |
|
|
|
|
|
if current_char not in max_runs: |
|
|
max_runs[current_char] = current_run |
|
|
else: |
|
|
max_runs[current_char] = max(max_runs[current_char], current_run) |
|
|
|
|
|
return max_runs |
|
|
|
|
|
unique_char_list = sorted_char_counts.keys() |
|
|
|
|
|
max_occ_map = max_contiguous_occurrences(net_corpus) |
|
|
|
|
|
|
|
|
sorted_max_occ_map = dict(sorted(max_occ_map.items(), key=lambda x: x[1], reverse=True)) |
|
|
sorted_max_occ_map |
|
|
|
|
|
sorted_word_counts |
|
|
|
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
sorted_sp_tok_n_grams |
|
|
|
|
|
len(sorted_word_counts) |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
3809463 + 49495136 |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
for i in tqdm(sorted_word_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_sp_tok_n_grams): |
|
|
if i in sorted_word_counts: |
|
|
print(i) |
|
|
|
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_word_counts): |
|
|
if i not in sorted_n_gram_tok_counts: |
|
|
sorted_n_gram_tok_counts[i] = sorted_word_counts[i] |
|
|
continue |
|
|
|
|
|
sorted_n_gram_tok_counts[i] += sorted_word_counts[i] |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
|
|
|
sorted_n_gram_tok_counts |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
print(i) |
|
|
|
|
|
for i in tqdm(sorted_sp_tok_n_grams): |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(set(list(i))) == 1 and len(i) > 3: |
|
|
print(i, len(i)) |
|
|
|
|
|
n_gram_lengths = {} |
|
|
for i in list(sorted_n_gram_tok_counts.keys())[:131_072]: |
|
|
if len(i) not in n_gram_lengths: |
|
|
n_gram_lengths[len(i)] = [sorted_n_gram_tok_counts[i],sorted_n_gram_tok_counts[i],1] |
|
|
else: |
|
|
n_gram_lengths[len(i)][0] += sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths[len(i)][2] += 1 |
|
|
if sorted_n_gram_tok_counts[i] > n_gram_lengths[len(i)][1]: |
|
|
n_gram_lengths[len(i)][1] = sorted_n_gram_tok_counts[i] |
|
|
n_gram_lengths |
|
|
|
|
|
for word in tqdm(sorted_n_gram_tok_counts): |
|
|
if word.strip(' \n').isnumeric() and sorted_n_gram_tok_counts[word] > 56_000: |
|
|
print(f"{word}:{sorted_word_counts[word]}") |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
len(sorted_sp_tok_n_grams) |
|
|
|
|
|
52307513 + 696425 |
|
|
|
|
|
sorted_n_gram_tok_counts = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1], reverse=True)) |
|
|
|
|
|
sorted_n_gram_tok_counts = sorted_n_gram_tok_counts | sorted_sp_tok_n_grams |
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in list(sorted_sp_tok_n_grams.keys())[:131_072]: |
|
|
if len(set(list(i))) == 1 and len(i) > 100: |
|
|
print(i[:5], len(i), sorted_sp_tok_n_grams[i], len(i)*sorted_sp_tok_n_grams[i]) |
|
|
|
|
|
top_tokens = {} |
|
|
|
|
|
count = 0 |
|
|
c = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts.keys()): |
|
|
if count >= 131_072: |
|
|
break |
|
|
if i in sorted_sp_tok_n_grams: |
|
|
c += 1 |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
if len(set(list(i))) > 1: |
|
|
print(i, sorted_n_gram_tok_counts[i]) |
|
|
s[0] += 1 |
|
|
if len(i) > 15: |
|
|
s[1] += 1 |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
else: |
|
|
count += 1 |
|
|
c, count, s |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
char_count = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
|
|
|
char_count += 1 |
|
|
char_count, max_length |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**2), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**3), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_quad = dict(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(len(x[0])**4), reverse=True)) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_quad.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_quad.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)* list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0])))))), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)* list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2/3), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
def longest_same_char_sequence(s: str) -> int: |
|
|
if not s: |
|
|
return 0 |
|
|
|
|
|
max_length = 1 |
|
|
current_length = 1 |
|
|
|
|
|
for i in range(1, len(s)): |
|
|
if s[i] == s[i - 1]: |
|
|
current_length += 1 |
|
|
else: |
|
|
max_length = max(max_length, current_length) |
|
|
current_length = 1 |
|
|
|
|
|
return max(max_length, current_length) |
|
|
|
|
|
longest_same_char_sequence('\\\\\\\\\\\\\\\\\\\\\\') |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(3/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
max_length = 0 |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
class SuffixAutomaton: |
|
|
def __init__(self, s): |
|
|
self.states = [{'len': 0, 'link': -1, 'next': {}, 'occ': 0}] |
|
|
self.last = 0 |
|
|
for ch in s: |
|
|
self.add_char(ch) |
|
|
self.propagate_occurrences() |
|
|
|
|
|
def add_char(self, ch): |
|
|
p = self.last |
|
|
curr = len(self.states) |
|
|
|
|
|
self.states.append({'len': self.states[p]['len'] + 1, 'link': 0, 'next': {}, 'occ': 1}) |
|
|
while p != -1 and ch not in self.states[p]['next']: |
|
|
self.states[p]['next'][ch] = curr |
|
|
p = self.states[p]['link'] |
|
|
if p == -1: |
|
|
self.states[curr]['link'] = 0 |
|
|
else: |
|
|
q = self.states[p]['next'][ch] |
|
|
if self.states[p]['len'] + 1 == self.states[q]['len']: |
|
|
self.states[curr]['link'] = q |
|
|
else: |
|
|
clone = len(self.states) |
|
|
self.states.append({ |
|
|
'len': self.states[p]['len'] + 1, |
|
|
'next': self.states[q]['next'].copy(), |
|
|
'link': self.states[q]['link'], |
|
|
'occ': 0 |
|
|
}) |
|
|
while p != -1 and self.states[p]['next'].get(ch) == q: |
|
|
self.states[p]['next'][ch] = clone |
|
|
p = self.states[p]['link'] |
|
|
self.states[q]['link'] = self.states[curr]['link'] = clone |
|
|
self.last = curr |
|
|
|
|
|
def propagate_occurrences(self): |
|
|
|
|
|
order = sorted(range(len(self.states)), key=lambda i: self.states[i]['len'], reverse=True) |
|
|
for i in order: |
|
|
link = self.states[i]['link'] |
|
|
if link != -1: |
|
|
self.states[link]['occ'] += self.states[i]['occ'] |
|
|
|
|
|
def max_substring_value(s): |
|
|
n = len(s) |
|
|
if n < 2: |
|
|
return 1 |
|
|
sa = SuffixAutomaton(s) |
|
|
max_val = 0 |
|
|
|
|
|
for state in sa.states: |
|
|
if state['len'] >= 2: |
|
|
effective_length = min(state['len'], n - 1) |
|
|
product = state['occ'] * effective_length |
|
|
if product > max_val: |
|
|
max_val = product |
|
|
return max_val if max_val > 0 else 1 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_net_smart_3 = {} |
|
|
for i in tqdm(sorted_n_gram_tok_counts): |
|
|
sorted_n_gram_tok_counts_anti_length_net_smart_3[i] = [sorted_n_gram_tok_counts[i], (((((((len(i)**2)*longest_same_char_sequence(i)*max_substring_value(i)*list(Counter(list(i)).items())[0][1]))))/(len(set(list(i))))))] |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_cube_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(3/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sq_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(2/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(3/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(2/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_sq_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(1/5)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_ac_smart_3 = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_net_smart_3.items(), key=lambda x: x[1][0]/(x[1][1]**(1/6)), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_smart_3.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_ac_smart_3.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(1/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(1.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_linear_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2 = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
counts = [] |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2): |
|
|
counts.append(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2[i]) |
|
|
counts = np.array(counts) |
|
|
|
|
|
counts2 = [] |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3): |
|
|
counts2.append(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3[i]) |
|
|
counts2 = np.array(counts2) |
|
|
|
|
|
counts2 |
|
|
|
|
|
counts2_ = counts2[:,0] / (counts2[:,1] ** (3/6)) |
|
|
counts2_ |
|
|
|
|
|
counts = counts[:131_072] |
|
|
counts2_ = counts2_[:131_072] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
counts = counts - counts.mean() |
|
|
|
|
|
counts2_ = counts2_ - counts2_.mean() |
|
|
|
|
|
counts.std() |
|
|
|
|
|
counts.mean() |
|
|
|
|
|
counts2_.mean() |
|
|
|
|
|
counts2_.std() |
|
|
|
|
|
scale = counts.std() / counts2_.std() |
|
|
scale |
|
|
|
|
|
scale = 1 |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart = {} |
|
|
ind = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2.keys())[:131_072]): |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] = counts[ind] |
|
|
|
|
|
ind = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_ac_cube_smart_3.keys())[:131_072]): |
|
|
if ind in sorted_n_gram_tok_counts_anti_length_hybrid_smart: |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] += scale* 0.99994575 * counts2_[ind] |
|
|
else: |
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart[i] = scale* 0.99994575 * counts2_[ind] |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted = dict(tqdm(sorted(sorted_n_gram_tok_counts_anti_length_hybrid_smart.items(), key=lambda x: x[1], reverse=True))) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_hybrid_smart_sorted.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
!wget https://huggingface.co/Qwen/QwQ-32B/resolve/main/tokenizer.json |
|
|
|
|
|
import json |
|
|
with open('tokenizer.json', 'r') as file: |
|
|
data = json.load(file) |
|
|
|
|
|
data.keys() |
|
|
|
|
|
len(data['model']['vocab']) |
|
|
|
|
|
sim = 0 |
|
|
for i in tqdm(list(sorted_n_gram_tok_counts_anti_length_cube_smart_2.keys())): |
|
|
if i in data['model']['vocab'].keys(): |
|
|
sim += 1 |
|
|
sim |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(data['model']['vocab'].keys()): |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(data['model']['vocab'].keys()): |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
from collections import Counter |
|
|
import re |
|
|
|
|
|
def count_overlapping_space_ngrams(text): |
|
|
|
|
|
space_sequences = re.findall(r' +', text) |
|
|
ngram_counter = Counter() |
|
|
|
|
|
|
|
|
for seq in tqdm(space_sequences): |
|
|
k = len(seq) |
|
|
|
|
|
for n in range(1, k + 1): |
|
|
|
|
|
ngram_counter[' ' * n] += (k - n + 1) |
|
|
|
|
|
|
|
|
sorted_ngrams = sorted(ngram_counter.items(), key=lambda x: len(x[0])) |
|
|
return sorted_ngrams |
|
|
|
|
|
space_counts = count_overlapping_space_ngrams(net_corpus) |
|
|
space_counts |
|
|
|
|
|
space_counts_dict = {} |
|
|
for i in space_counts: |
|
|
if i != ' ': |
|
|
space_counts_dict[i[0]] = i[1] |
|
|
space_counts_dict |
|
|
|
|
|
sorted_space_counts_dict = dict(tqdm(sorted(space_counts_dict.items(), key=lambda x: x[1], reverse=True))) |
|
|
|
|
|
sorted_space_counts_dict |
|
|
|
|
|
len(space_counts_dict) |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
for i in space_counts_dict: |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
print(i) |
|
|
|
|
|
for i in space_counts_dict: |
|
|
sorted_n_gram_tok_counts[i] = space_counts_dict[i] |
|
|
|
|
|
len(sorted_n_gram_tok_counts) |
|
|
|
|
|
sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp = dict(tqdm(sorted(sorted_n_gram_tok_counts.items(), key=lambda x: x[1]/(((((((len(x[0])**2)*longest_same_char_sequence(x[0])*list(Counter(list(x[0])).items())[0][1]))))/(len(set(list(x[0]))))))**(2.5/4), reverse=True))) |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
list(sorted_n_gram_tok_counts.keys()).index(last_i) |
|
|
|
|
|
sorted_char_counts |
|
|
|
|
|
len(sorted_char_counts) |
|
|
|
|
|
top_131k_vals_final_app_with_sp_no_ind = {} |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if i not in sorted_char_counts: |
|
|
top_131k_vals_final_app_with_sp_no_ind[i] = sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp[i] |
|
|
|
|
|
len(top_131k_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
top_131k_vals_final_app_with_sp_and_ind = sorted_char_counts.copy() |
|
|
j = len(top_131k_vals_final_app_with_sp_and_ind) |
|
|
for i in top_131k_vals_final_app_with_sp_no_ind: |
|
|
if j>=131_072: |
|
|
break |
|
|
top_131k_vals_final_app_with_sp_and_ind[i] = top_131k_vals_final_app_with_sp_no_ind[i] |
|
|
j += 1 |
|
|
|
|
|
len(top_131k_vals_final_app_with_sp_and_ind) |
|
|
|
|
|
sorted_top_131k_vals_final_app_with_sp_and_ind = dict(tqdm(sorted(top_131k_vals_final_app_with_sp_and_ind.items(), key=lambda x: x[1], reverse=True))) |
|
|
sorted_top_131k_vals_final_app_with_sp_and_ind |
|
|
|
|
|
def save_dict_to_json(data, filename): |
|
|
"""Saves a dictionary to a JSON file.""" |
|
|
with open(filename, 'w', encoding='utf-8') as f: |
|
|
json.dump(data, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
keys = list(sorted_top_131k_vals_final_app_with_sp_and_ind.keys()) |
|
|
|
|
|
ordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
ordered_tokeniser[i] = id |
|
|
|
|
|
import random |
|
|
random.shuffle(keys) |
|
|
|
|
|
unordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
unordered_tokeniser[i] = id |
|
|
|
|
|
unordered_tokeniser |
|
|
|
|
|
ordered_tokeniser |
|
|
|
|
|
save_dict_to_json(sorted_top_131k_vals_final_app_with_sp_and_ind, "count_tokenizer_1b_val_test_data_raw.json") |
|
|
|
|
|
save_dict_to_json(ordered_tokeniser, "ordered_tokenizer_1b_val_test_data_raw.json") |
|
|
|
|
|
save_dict_to_json(unordered_tokeniser, "unordered_tokenizer_1b_val_test_data_raw.json") |
|
|
|
|
|
sorted_n_gram_tok_counts['#'] |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = sorted_n_gram_tok_counts[' '] |
|
|
last_i = None |
|
|
for i in list(sorted_top_131k_vals_final_app_with_sp_and_ind.keys())[:131_072]: |
|
|
if i in sorted_n_gram_tok_counts: |
|
|
if sorted_n_gram_tok_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_n_gram_tok_counts[i] |
|
|
last_i = i |
|
|
else: |
|
|
if sorted_char_counts[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = sorted_char_counts[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
try: |
|
|
print(list(sorted_n_gram_tok_counts.keys()).index(last_i)) |
|
|
except: |
|
|
pass |
|
|
|
|
|
top_all_vals_final_app_with_sp_no_ind = {} |
|
|
for i in tqdm(sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp.keys()): |
|
|
if i not in sorted_char_counts: |
|
|
top_all_vals_final_app_with_sp_no_ind[i] = sorted_n_gram_tok_counts_anti_length_sup_sq_smart_2_with_sp[i] |
|
|
|
|
|
len(top_all_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
for i in tqdm(top_all_vals_final_app_with_sp_no_ind): |
|
|
if i in sorted_char_counts: |
|
|
print(i) |
|
|
|
|
|
print(list(sorted_char_counts.keys())[-1]) |
|
|
|
|
|
len(sorted_char_counts) |
|
|
|
|
|
net_corpus_keys_order = list(sorted_char_counts.keys()) |
|
|
net_corpus_keys_order.extend(top_all_vals_final_app_with_sp_no_ind) |
|
|
|
|
|
net_corpus_keys_order |
|
|
|
|
|
net_corpus = sorted_char_counts | top_all_vals_final_app_with_sp_no_ind |
|
|
net_corpus |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
save_dict_to_json(net_corpus, "net_corpus_1b_val_test.json") |
|
|
|
|
|
save_dict_to_json(sorted_char_counts, "sorted_char_counts_1b_val_test.json") |
|
|
|
|
|
save_dict_to_json(top_all_vals_final_app_with_sp_no_ind, "top_all_vals_final_app_with_sp_no_ind_1b_val_test.json") |
|
|
|
|
|
with open("net_corpus_keys_order_1b_val_test.txt", 'w') as file: |
|
|
for item in tqdm(net_corpus_keys_order): |
|
|
file.write(str(item) + '\n') |
|
|
|
|
|
!git clone https://github.com/Tasmay-Tibrewal/tokenizer.git |
|
|
|
|
|
import json |
|
|
|
|
|
if not continue_seamless: |
|
|
with open("tokenizer/net_corpus_1b_val_test.json", "r", encoding="utf-8") as file: |
|
|
net_corpus = json.load(file) |
|
|
|
|
|
len(net_corpus) |
|
|
|
|
|
net_corpus |
|
|
|
|
|
from tqdm.auto import tqdm |
|
|
|
|
|
net_corpus_keys_pos = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
net_corpus_keys_pos[i] = id |
|
|
net_corpus_keys_pos |
|
|
|
|
|
len(net_corpus_keys_pos) |
|
|
|
|
|
mean_len_term = 0 |
|
|
for i in net_corpus: |
|
|
mean_len_term += len(i) * (len(i) + 1) / 2 |
|
|
mean_len_term /= len(net_corpus) |
|
|
mean_len_term |
|
|
|
|
|
total_its = mean_len_term*len(net_corpus) |
|
|
round(total_its) |
|
|
|
|
|
parent_strings_dict = {} |
|
|
for id, i in tqdm(enumerate(net_corpus)): |
|
|
if len(i) == 1: |
|
|
if i not in parent_strings_dict: |
|
|
parent_strings_dict[i] = [id] |
|
|
else: |
|
|
parent_strings_dict[i].append(id) |
|
|
continue |
|
|
elif len(i)>50 or net_corpus[i] < 10: |
|
|
continue |
|
|
for j in range(len(i)): |
|
|
for k in range(j + 1, len(i) + 1): |
|
|
sub_word = i[j:k] |
|
|
if sub_word not in parent_strings_dict: |
|
|
parent_strings_dict[sub_word] = [id] |
|
|
else: |
|
|
parent_strings_dict[sub_word].append(id) |
|
|
|
|
|
parent_strings_dict |
|
|
|
|
|
len(parent_strings_dict) |
|
|
|
|
|
keys = list(net_corpus.keys()) |
|
|
keys |
|
|
|
|
|
def test_max_num(max_parent_id): |
|
|
rem_counts = 0 |
|
|
single_max_occ = 0 |
|
|
total_added = 0 |
|
|
last_occ = 0 |
|
|
max_parent_id_occ = 0 |
|
|
min_parent_id_occ_outside = -1 |
|
|
net_corpus_small_adj = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
if total_added >= 131_072: |
|
|
break |
|
|
elif total_added % 10_000 == 0: |
|
|
print(f"{total_added/1000:.0f}K ", end="") |
|
|
if i not in parent_strings_dict: |
|
|
print(f"'{i[:50]}'", len(i), net_corpus[i]) |
|
|
continue |
|
|
single_occ = 0 |
|
|
add = 1 |
|
|
if len(i) > 1: |
|
|
for parent_id in parent_strings_dict[i]: |
|
|
parent = keys[parent_id] |
|
|
if len(i) > len(parent): |
|
|
print("Some issue, less or same length",i, parent) |
|
|
continue |
|
|
elif len(i) == len(parent) and i == parent: |
|
|
single_occ += 1 |
|
|
continue |
|
|
elif len(i) == len(parent): |
|
|
print("Child parent length same but child != parent", i, parent) |
|
|
continue |
|
|
if net_corpus[parent] > net_corpus[i] * 0.7 : |
|
|
if parent_id < max_parent_id: |
|
|
if parent_id > max_parent_id_occ: |
|
|
max_parent_id_occ = parent_id |
|
|
single_occ += 1 |
|
|
rem_counts += 1 |
|
|
add = 0 |
|
|
break |
|
|
else: |
|
|
if min_parent_id_occ_outside == -1: |
|
|
min_parent_id_occ_outside = parent_id |
|
|
if parent_id < min_parent_id_occ_outside: |
|
|
min_parent_id_occ_outside = parent_id |
|
|
if add == 1: |
|
|
last_occ = id |
|
|
total_added += 1 |
|
|
net_corpus_small_adj[i] = net_corpus[i] |
|
|
if single_occ > single_max_occ: |
|
|
single_max_occ = single_occ |
|
|
return last_occ, max_parent_id_occ, min_parent_id_occ_outside |
|
|
|
|
|
max_parent_id = len(net_corpus) |
|
|
min = 131_072 |
|
|
max = max_parent_id |
|
|
it = 0 |
|
|
while True: |
|
|
last_occ, max_parent_id_occ, min_parent_id_occ_outside = test_max_num(max_parent_id) |
|
|
print(f"\nIt: {it}, Threshold: {max_parent_id}, Looped till: {last_occ}") |
|
|
print(f"Inner max id: {max_parent_id_occ}, Outer min id: {min_parent_id_occ_outside}") |
|
|
print(f"Id b/w inner max and outer min: {(last_occ < min_parent_id_occ_outside and last_occ > max_parent_id_occ)}") |
|
|
if last_occ < min_parent_id_occ_outside and last_occ > max_parent_id_occ: |
|
|
break |
|
|
if last_occ < max_parent_id: |
|
|
max = last_occ |
|
|
elif last_occ > max_parent_id: |
|
|
min = last_occ |
|
|
max_parent_id = (min + max)/2 |
|
|
print(f"New overall min: {min}, max: {max}\nNew Max parent id: {max_parent_id}\n{'='*40}\n\n") |
|
|
|
|
|
max_parent_id |
|
|
|
|
|
import math |
|
|
max_parent_id = math.ceil(max_parent_id) |
|
|
max_parent_id |
|
|
|
|
|
rem_counts = 0 |
|
|
single_max_occ = 0 |
|
|
total_added = 0 |
|
|
last_occ = 0 |
|
|
net_corpus_small_adj = {} |
|
|
for id,i in tqdm(enumerate(net_corpus)): |
|
|
if total_added >= 131_072: |
|
|
break |
|
|
elif total_added % 10_000 == 0: |
|
|
print(f"{total_added/1000:.0f}K ", end="") |
|
|
if i not in parent_strings_dict: |
|
|
print(f"'{i[:50]}'", len(i), net_corpus[i]) |
|
|
continue |
|
|
single_occ = 0 |
|
|
add = 1 |
|
|
if len(i) > 1: |
|
|
for parent_id in parent_strings_dict[i]: |
|
|
parent = keys[parent_id] |
|
|
if len(i) > len(parent): |
|
|
print("Some issue, less or same length",i, parent) |
|
|
continue |
|
|
elif len(i) == len(parent) and i == parent: |
|
|
single_occ += 1 |
|
|
continue |
|
|
elif len(i) == len(parent): |
|
|
print("Child parent length same but child != parent", i, parent) |
|
|
continue |
|
|
if net_corpus[parent] > net_corpus[i] * 0.7 : |
|
|
if parent_id < max_parent_id: |
|
|
|
|
|
|
|
|
single_occ += 1 |
|
|
rem_counts += 1 |
|
|
add = 0 |
|
|
break |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if add == 1: |
|
|
last_occ = id |
|
|
total_added += 1 |
|
|
net_corpus_small_adj[i] = net_corpus[i] |
|
|
if single_occ > single_max_occ: |
|
|
single_max_occ = single_occ |
|
|
|
|
|
single_max_occ |
|
|
|
|
|
last_occ |
|
|
|
|
|
total_added |
|
|
|
|
|
rem_counts |
|
|
|
|
|
rem_counts + total_added, rem_counts + total_added == last_occ + 1 |
|
|
|
|
|
len(net_corpus_small_adj) |
|
|
|
|
|
net_corpus_small_adj |
|
|
|
|
|
first_index = -1 |
|
|
for id, i in tqdm(enumerate(net_corpus)): |
|
|
if id > last_occ: |
|
|
break |
|
|
if len(i) == 1: |
|
|
if i not in parent_strings_dict: |
|
|
pass |
|
|
else: |
|
|
pass |
|
|
continue |
|
|
elif len(i)>50 or net_corpus[i] < 10: |
|
|
if first_index == -1: |
|
|
first_index = id |
|
|
print(id, len(i), net_corpus[i], i) |
|
|
continue |
|
|
else: |
|
|
pass |
|
|
|
|
|
max_length = 0 |
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
last_i_val = net_corpus[' '] |
|
|
last_i = None |
|
|
for i in list(net_corpus_small_adj.keys())[:131_072]: |
|
|
if net_corpus[i] < last_i_val and len(i) > 1: |
|
|
last_i_val = net_corpus[i] |
|
|
last_i = i |
|
|
if len(i) > max_length: |
|
|
max_length = len(i) |
|
|
if len(set(list(i))) * 3 < len(i): |
|
|
print(i) |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
s = [0,0,0] |
|
|
s2 = [0,0] |
|
|
for i in list(net_corpus_small_adj.keys())[:131_072]: |
|
|
if len(i) > 5: |
|
|
if len(i) > 10: |
|
|
s2[1] += 1 |
|
|
s2[0] += 1 |
|
|
if len(i) > 15: |
|
|
if len(i) > 50: |
|
|
s[2] += 1 |
|
|
s[1] += 1 |
|
|
s[0] += 1 |
|
|
s, max_length, s2 |
|
|
|
|
|
last_i |
|
|
|
|
|
net_corpus['ља'] |
|
|
|
|
|
net_corpus[last_i] |
|
|
|
|
|
sorted_net_corpus_small_adj = dict(tqdm(sorted(net_corpus_small_adj.items(), key=lambda x: x[1], reverse=True))) |
|
|
sorted_net_corpus_small_adj |
|
|
|
|
|
keys = list(net_corpus_small_adj.keys()) |
|
|
|
|
|
ordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
ordered_tokeniser[i] = id |
|
|
|
|
|
import random |
|
|
random.shuffle(keys) |
|
|
|
|
|
unordered_tokeniser = {} |
|
|
for id, i in enumerate(keys): |
|
|
unordered_tokeniser[i] = id |
|
|
|
|
|
ordered_tokeniser |
|
|
|
|
|
unordered_tokeniser |
|
|
|
|
|
def save_dict_to_json(data, filename): |
|
|
"""Saves a dictionary to a JSON file.""" |
|
|
with open(filename, 'w', encoding='utf-8') as f: |
|
|
json.dump(data, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
!pwd |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
save_dict_to_json(sorted_net_corpus_small_adj, "count_tokenizer_1b_val_test_data.json") |
|
|
|
|
|
save_dict_to_json(ordered_tokeniser, "ordered_tokenizer_1b_val_test_data.json") |
|
|
|
|
|
save_dict_to_json(unordered_tokeniser, "unordered_tokenizer_1b_val_test_data.json") |