Upload streamlit_app.py
Browse files- src/streamlit_app.py +365 -34
src/streamlit_app.py
CHANGED
|
@@ -1,40 +1,371 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
-
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
-
In the meantime, below is an example of what you can do with just a few lines of code:
|
| 14 |
-
"""
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Запуск streamlit run streamlit_app.py
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
import tempfile
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import List, Dict, Tuple
|
| 8 |
import numpy as np
|
| 9 |
+
|
| 10 |
import streamlit as st
|
| 11 |
+
import pandas as pd
|
| 12 |
+
import plotly.express as px
|
| 13 |
|
| 14 |
+
from tokenizers import Tokenizer
|
| 15 |
+
from tokenizers.models import BPE, WordPiece, Unigram
|
| 16 |
+
from tokenizers.trainers import BpeTrainer, WordPieceTrainer, UnigramTrainer
|
| 17 |
+
from tokenizers.pre_tokenizers import Whitespace
|
| 18 |
+
from tokenizers.normalizers import Sequence, NFKC
|
| 19 |
+
from rapidfuzz.distance import Levenshtein as RFLevenshtein
|
| 20 |
|
| 21 |
+
def normalized_distance(a: str, b: str) -> float:
|
| 22 |
+
if not a and not b:
|
| 23 |
+
return 0.0
|
| 24 |
+
return float(RFLevenshtein.normalized_distance(a, b))
|
| 25 |
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
# -------------------------
|
| 28 |
+
# Загрузка корпуса
|
| 29 |
+
# -------------------------
|
| 30 |
+
def load_jsonl_texts(file_bytes: bytes, text_field: str = "text", max_docs: int = None) -> List[str]:
|
| 31 |
+
"""
|
| 32 |
+
Извлекаем текст для обучения из JSONL файла
|
| 33 |
+
"""
|
| 34 |
+
texts: List[str] = []
|
| 35 |
+
try:
|
| 36 |
+
s = file_bytes.decode("utf-8")
|
| 37 |
+
except Exception:
|
| 38 |
+
s = file_bytes.decode("utf-8", errors="replace")
|
| 39 |
+
for line in s.splitlines():
|
| 40 |
+
line = line.strip()
|
| 41 |
+
if not line:
|
| 42 |
+
continue
|
| 43 |
+
try:
|
| 44 |
+
obj = json.loads(line)
|
| 45 |
+
t = obj.get(text_field, "")
|
| 46 |
+
if t is None:
|
| 47 |
+
continue
|
| 48 |
+
t = str(t).strip()
|
| 49 |
+
if t:
|
| 50 |
+
texts.append(t)
|
| 51 |
+
except Exception:
|
| 52 |
+
# игнорируем некорректные строки
|
| 53 |
+
continue
|
| 54 |
+
if max_docs and len(texts) >= max_docs:
|
| 55 |
+
break
|
| 56 |
+
return texts
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def jsonl_bytes_to_textfile(file_bytes: bytes, out_path: str, text_field: str = "text"):
|
| 60 |
+
texts = load_jsonl_texts(file_bytes, text_field=text_field)
|
| 61 |
+
with open(out_path, "w", encoding="utf-8") as f:
|
| 62 |
+
for t in texts:
|
| 63 |
+
f.write(t.replace("\n", " ") + "\n")
|
| 64 |
+
return texts
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# -------------------------
|
| 68 |
+
# Тренировка токенизаторов
|
| 69 |
+
# -------------------------
|
| 70 |
+
def train_bpe(filepaths: List[str], vocab_size: int = 16000, min_freq: int = 2, unk_token: str = "[UNK]") -> Tokenizer:
|
| 71 |
+
tok = Tokenizer(BPE(unk_token=unk_token))
|
| 72 |
+
tok.normalizer = Sequence([NFKC()])
|
| 73 |
+
tok.pre_tokenizer = Whitespace()
|
| 74 |
+
trainer = BpeTrainer(vocab_size=vocab_size, min_frequency=min_freq, special_tokens=[unk_token])
|
| 75 |
+
tok.train(filepaths, trainer)
|
| 76 |
+
return tok
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def train_wordpiece(filepaths: List[str], vocab_size: int = 16000, min_freq: int = 2, unk_token: str = "[UNK]") -> Tokenizer:
|
| 80 |
+
tok = Tokenizer(WordPiece(unk_token=unk_token))
|
| 81 |
+
tok.normalizer = Sequence([NFKC()])
|
| 82 |
+
tok.pre_tokenizer = Whitespace()
|
| 83 |
+
trainer = WordPieceTrainer(vocab_size=vocab_size, min_frequency=min_freq, special_tokens=[unk_token])
|
| 84 |
+
tok.train(filepaths, trainer)
|
| 85 |
+
return tok
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def train_unigram(filepaths: List[str], vocab_size: int = 16000) -> Tokenizer:
|
| 89 |
+
tok = Tokenizer(Unigram())
|
| 90 |
+
tok.normalizer = Sequence([NFKC()])
|
| 91 |
+
tok.pre_tokenizer = Whitespace()
|
| 92 |
+
trainer = UnigramTrainer(
|
| 93 |
+
vocab_size=vocab_size,
|
| 94 |
+
unk_token="[UNK]"
|
| 95 |
+
)
|
| 96 |
+
tok.train(filepaths, trainer)
|
| 97 |
+
return tok
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# -------------------------
|
| 101 |
+
# токенизация и метрики
|
| 102 |
+
# -------------------------
|
| 103 |
+
def tokenize_texts(tok: Tokenizer, texts: List[str]) -> Tuple[List[List[str]], List[List[int]]]:
|
| 104 |
+
"""
|
| 105 |
+
Для каждой строки возвращает:
|
| 106 |
+
- список токенов для каждого текста
|
| 107 |
+
- список идентификаторов токенов для каждого текста
|
| 108 |
+
"""
|
| 109 |
+
tokens_per_line = []
|
| 110 |
+
ids_per_line = []
|
| 111 |
+
for line in texts:
|
| 112 |
+
enc = tok.encode(line)
|
| 113 |
+
tokens_per_line.append(enc.tokens)
|
| 114 |
+
ids_per_line.append(enc.ids)
|
| 115 |
+
return tokens_per_line, ids_per_line
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def compute_token_statistics(tok: Tokenizer, texts: List[str], top_n: int = 30) -> Dict:
|
| 119 |
+
"""
|
| 120 |
+
Вычисляем:
|
| 121 |
+
- частоту токена (глобальная)
|
| 122 |
+
- распределение токенов на слово
|
| 123 |
+
- распределение длины токена
|
| 124 |
+
- Коэффициент OOV (счёт токенов равен unk)
|
| 125 |
+
"""
|
| 126 |
+
total_tokens = 0
|
| 127 |
+
unk_count = 0
|
| 128 |
+
token_freq = {}
|
| 129 |
+
|
| 130 |
+
tokens_per_word_counts = []
|
| 131 |
+
token_lengths = []
|
| 132 |
+
|
| 133 |
+
for line in texts:
|
| 134 |
+
enc_line = tok.encode(line)
|
| 135 |
+
toks = enc_line.tokens
|
| 136 |
+
ids = enc_line.ids
|
| 137 |
+
total_tokens += len(ids)
|
| 138 |
+
for t in toks:
|
| 139 |
+
token_freq[t] = token_freq.get(t, 0) + 1
|
| 140 |
+
token_lengths.append(len(t))
|
| 141 |
+
if t == "[UNK]" or t == "[unk]":
|
| 142 |
+
unk_count += 1
|
| 143 |
+
|
| 144 |
+
words = line.split()
|
| 145 |
+
for w in words:
|
| 146 |
+
enc_w = tok.encode(w)
|
| 147 |
+
toks_w = enc_w.tokens
|
| 148 |
+
tokens_per_word_counts.append(len(toks_w))
|
| 149 |
+
|
| 150 |
+
# защита от деления на ноль
|
| 151 |
+
oov_ratio = (unk_count / total_tokens) if total_tokens > 0 else 0.0
|
| 152 |
+
|
| 153 |
+
freq_items = sorted(token_freq.items(), key=lambda x: x[1], reverse=True)
|
| 154 |
+
top_tokens = freq_items[:top_n]
|
| 155 |
+
|
| 156 |
+
stats = {
|
| 157 |
+
"total_tokens": total_tokens,
|
| 158 |
+
"unk_count": unk_count,
|
| 159 |
+
"oov_ratio": oov_ratio,
|
| 160 |
+
"top_tokens": top_tokens,
|
| 161 |
+
"tokens_per_word_counts": tokens_per_word_counts,
|
| 162 |
+
"token_lengths": token_lengths,
|
| 163 |
+
"token_freq_series": pd.Series(token_freq),
|
| 164 |
+
"tokens_per_word_mean": float(np.mean(tokens_per_word_counts)) if tokens_per_word_counts else 0.0,
|
| 165 |
+
"tokens_per_word_median": float(np.median(tokens_per_word_counts)) if tokens_per_word_counts else 0.0,
|
| 166 |
+
}
|
| 167 |
+
return stats
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# -------------------------
|
| 171 |
+
# экспортируем статистику (в html)
|
| 172 |
+
# -------------------------
|
| 173 |
+
def build_html_report(texts: List[str], tok: Tokenizer, stats: Dict, title: str = "Отчёт по токенизации") -> str:
|
| 174 |
+
# создаём DataFrame самых частых токенов
|
| 175 |
+
top_tokens = stats.get("top_tokens", [])
|
| 176 |
+
df_top = pd.DataFrame(top_tokens, columns=["token", "count"])
|
| 177 |
+
|
| 178 |
+
# создаём фигуры Plotly
|
| 179 |
+
fig_len = px.histogram(stats.get("token_lengths", []), nbins=40, labels={"value": "Длина токена (симв.)"},
|
| 180 |
+
title="Распределение длины токена")
|
| 181 |
+
fig_tpw = px.histogram(stats.get("tokens_per_word_counts", []), nbins=20, labels={"value": "Подслов на слово"},
|
| 182 |
+
title="Распределение токенов на слово")
|
| 183 |
+
fig_top = px.bar(df_top.head(50), x="token", y="count", title="Частовстречаемые токены (топ)")
|
| 184 |
+
|
| 185 |
+
html_parts = []
|
| 186 |
+
html_parts.append(f"<h1>{title}</h1>")
|
| 187 |
+
html_parts.append(f"<p>Всего текстов: {len(texts)}; Всего токенов: {stats.get('total_tokens', 0)}; OOV: {stats.get('oov_ratio', 0.0):.4f}</p>")
|
| 188 |
+
html_parts.append("<h2>Самые частовстречаемые токены</h2>")
|
| 189 |
+
html_parts.append(df_top.to_html(index=False))
|
| 190 |
+
html_parts.append("<h2>Графики</h2>")
|
| 191 |
+
# Подключаем plotly js в первом графике (cdn), остальные вставляем без повторного include
|
| 192 |
+
html_parts.append(fig_len.to_html(full_html=False, include_plotlyjs="cdn"))
|
| 193 |
+
html_parts.append(fig_tpw.to_html(full_html=False, include_plotlyjs=False))
|
| 194 |
+
html_parts.append(fig_top.to_html(full_html=False, include_plotlyjs=False))
|
| 195 |
+
|
| 196 |
+
return "\n".join(html_parts)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# -------------------------
|
| 200 |
+
# Streamlit UI
|
| 201 |
+
# -------------------------
|
| 202 |
+
st.set_page_config(page_title="Токенизатор", layout="wide")
|
| 203 |
+
st.title("Токенизатор - интерактивный анализ (JSONL)")
|
| 204 |
+
|
| 205 |
+
st.markdown(
|
| 206 |
+
"""
|
| 207 |
+
Прототип веб-интерфейса для интерактивного анализа токенизаторов (BPE / WordPiece / Unigram) с использованием tokenizers.
|
| 208 |
+
Формат корпуса: JSONL (каждая строка - JSON с полем text, которое хранит данные для обучения).
|
| 209 |
+
"""
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
# боковая панель
|
| 213 |
+
with st.sidebar:
|
| 214 |
+
st.header("Корпус & модель")
|
| 215 |
+
uploaded = st.file_uploader("Загрузите corpus.jsonl (JSONL, поле 'text')", type=["jsonl", "json"], accept_multiple_files=False)
|
| 216 |
+
|
| 217 |
+
text_field = st.text_input("JSON с полем text", value="text")
|
| 218 |
+
max_docs = st.number_input("Максимум загруженных документов (0=все)", min_value=0, step=1, value=0)
|
| 219 |
+
st.markdown("---")
|
| 220 |
+
st.subheader("Токенизатор")
|
| 221 |
+
model_choice = st.selectbox("Выберите модель", ["BPE", "WordPiece", "Unigram"])
|
| 222 |
+
vocab_size = st.selectbox("Размер словаря", [8000, 16000, 32000], index=2)
|
| 223 |
+
min_freq = st.selectbox("min_frequency (BPE/WordPiece)", [2, 3, 4, 5], index=1)
|
| 224 |
+
st.markdown("Unigram: min_frequency игнорируется")
|
| 225 |
+
st.markdown("---")
|
| 226 |
+
st.write("Можно загрузить готовый tokenizer JSON (tokenizers .json)")
|
| 227 |
+
uploaded_tokenizer = st.file_uploader("Загрузить tokenizer .json (опционально)", type=["json"], accept_multiple_files=False)
|
| 228 |
+
st.markdown("---")
|
| 229 |
+
st.write("Экспорт")
|
| 230 |
+
export_html_name = st.text_input("Имя HTML отчёта", value="tokenizer_report.html")
|
| 231 |
+
|
| 232 |
+
# --- Инициализация session_state ---
|
| 233 |
+
if "tokenizer_obj" not in st.session_state:
|
| 234 |
+
st.session_state.tokenizer_obj = None
|
| 235 |
+
if "stats" not in st.session_state:
|
| 236 |
+
st.session_state.stats = None
|
| 237 |
+
if "texts" not in st.session_state:
|
| 238 |
+
st.session_state.texts = []
|
| 239 |
+
|
| 240 |
+
# загружаем корпус, если файл выбран
|
| 241 |
+
if uploaded is not None:
|
| 242 |
+
try:
|
| 243 |
+
raw = uploaded.getvalue()
|
| 244 |
+
# преобразование в список текстов и сохранение в st.session_state
|
| 245 |
+
st.session_state.texts = load_jsonl_texts(raw, text_field=text_field, max_docs=(None if max_docs == 0 else max_docs))
|
| 246 |
+
# также создаём файл для тренера (если потребуется тренировать)
|
| 247 |
+
tmp_dir = tempfile.mkdtemp()
|
| 248 |
+
corpus_txt = Path(tmp_dir) / "corpus_for_training.txt"
|
| 249 |
+
with open(corpus_txt, "w", encoding="utf-8") as f:
|
| 250 |
+
for t in st.session_state.texts:
|
| 251 |
+
f.write(t.replace("\n", " ") + "\n")
|
| 252 |
+
st.success(f"Загружено документов: {len(st.session_state.texts)} (поле '{text_field}')")
|
| 253 |
+
except Exception as e:
|
| 254 |
+
st.error("Ошибка чтения jsonl: " + str(e))
|
| 255 |
+
else:
|
| 256 |
+
st.info("Загрузите corpus.jsonl в сайдбаре")
|
| 257 |
+
|
| 258 |
+
# загрузка внешнего tokenizer
|
| 259 |
+
if uploaded_tokenizer is not None:
|
| 260 |
+
try:
|
| 261 |
+
tmp = uploaded_tokenizer.getvalue()
|
| 262 |
+
tmp_path = Path(tempfile.mkdtemp()) / "uploaded_tok.json"
|
| 263 |
+
with open(tmp_path, "wb") as f:
|
| 264 |
+
f.write(tmp)
|
| 265 |
+
st.session_state.tokenizer_obj = Tokenizer.from_file(str(tmp_path))
|
| 266 |
+
st.success("Загружен внешний tokenizer.")
|
| 267 |
+
except Exception as e:
|
| 268 |
+
st.error(f"Не удалось загрузить tokenizer: {e}")
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
# Кнопка обучения / применения
|
| 272 |
+
if st.button("Обучение / Применить токенизатор"):
|
| 273 |
+
if not st.session_state.texts:
|
| 274 |
+
st.error("Нет загруженного корпуса для обучения/оценки.")
|
| 275 |
+
else:
|
| 276 |
+
with st.spinner("Обучаем / применяем tokenizer..."):
|
| 277 |
+
# если токенизатор не загружен извне - тренируем
|
| 278 |
+
if st.session_state.tokenizer_obj is None:
|
| 279 |
+
tmp_dir = tempfile.mkdtemp()
|
| 280 |
+
corpus_txt_path = str(Path(tmp_dir) / "corpus_for_training.txt")
|
| 281 |
+
# записываем файл для тренера
|
| 282 |
+
with open(corpus_txt_path, "w", encoding="utf-8") as f:
|
| 283 |
+
for t in st.session_state.texts:
|
| 284 |
+
f.write(t.replace("\n", " ") + "\n")
|
| 285 |
+
try:
|
| 286 |
+
if model_choice == "BPE":
|
| 287 |
+
st.session_state.tokenizer_obj = train_bpe([corpus_txt_path], vocab_size=vocab_size, min_freq=min_freq)
|
| 288 |
+
elif model_choice == "WordPiece":
|
| 289 |
+
st.session_state.tokenizer_obj = train_wordpiece([corpus_txt_path], vocab_size=vocab_size, min_freq=min_freq)
|
| 290 |
+
else: # Unigram
|
| 291 |
+
st.session_state.tokenizer_obj = train_unigram([corpus_txt_path], vocab_size=vocab_size)
|
| 292 |
+
st.success(f"Модель {model_choice} обучена.")
|
| 293 |
+
except Exception as e:
|
| 294 |
+
st.error(f"Ошибка при обучении модели: {e}")
|
| 295 |
+
st.session_state.tokenizer_obj = None
|
| 296 |
+
|
| 297 |
+
# если есть токенизатор - показываем статистику
|
| 298 |
+
if st.session_state.tokenizer_obj is not None:
|
| 299 |
+
st.subheader("Краткая информация о токенизаторе")
|
| 300 |
+
try:
|
| 301 |
+
st.write("Модель:", st.session_state.tokenizer_obj.model.__class__.__name__)
|
| 302 |
+
except Exception:
|
| 303 |
+
st.write("Модель загружена (тип неизвестен).")
|
| 304 |
+
|
| 305 |
+
# вычисляем статистику
|
| 306 |
+
st.session_state.stats = compute_token_statistics(st.session_state.tokenizer_obj, st.session_state.texts, top_n=50)
|
| 307 |
+
|
| 308 |
+
# Отображаем результаты, если статистика готова
|
| 309 |
+
if st.session_state.stats is not None:
|
| 310 |
+
st.metric("Всего токенов", st.session_state.stats["total_tokens"])
|
| 311 |
+
st.metric("OOV соотношение (неизвестных токенов / всего токенов)", f"{st.session_state.stats['oov_ratio']:.4f}")
|
| 312 |
+
st.metric("Среднее число токенов на слово", f"{st.session_state.stats['tokens_per_word_mean']:.3f}")
|
| 313 |
+
|
| 314 |
+
col1, col2 = st.columns(2)
|
| 315 |
+
with col1:
|
| 316 |
+
fig_len = px.histogram(st.session_state.stats["token_lengths"], nbins=40, title="Распределение длины токена")
|
| 317 |
+
st.plotly_chart(fig_len, use_container_width=True)
|
| 318 |
+
with col2:
|
| 319 |
+
fig_tpw = px.histogram(st.session_state.stats["tokens_per_word_counts"], nbins=20, title="Распределение токенов по словам")
|
| 320 |
+
st.plotly_chart(fig_tpw, use_container_width=True)
|
| 321 |
+
|
| 322 |
+
st.subheader("Частовстречаемые токены")
|
| 323 |
+
df_top = pd.DataFrame(st.session_state.stats["top_tokens"], columns=["Токен", "Количество"])
|
| 324 |
+
st.dataframe(df_top.head(50))
|
| 325 |
+
|
| 326 |
+
fig_top = px.bar(df_top.head(30), x="Токен", y="Количество", title="Топ 30 токенов")
|
| 327 |
+
st.plotly_chart(fig_top, use_container_width=True)
|
| 328 |
+
|
| 329 |
+
st.write(f"Медиана токенов на слово: {st.session_state.stats['tokens_per_word_median']:.3f}")
|
| 330 |
+
|
| 331 |
+
save_col1, save_col2 = st.columns(2)
|
| 332 |
+
with save_col1:
|
| 333 |
+
# Сохранение tokenizer: формируем json-строку и даём кнопку скачивания (если tokenizer есть)
|
| 334 |
+
try:
|
| 335 |
+
tok_json_bytes = st.session_state.tokenizer_obj.to_str().encode("utf-8")
|
| 336 |
+
st.download_button(
|
| 337 |
+
label="Скачать tokenizer .json",
|
| 338 |
+
data=tok_json_bytes,
|
| 339 |
+
file_name=f"{model_choice.lower()}_v{vocab_size}.json",
|
| 340 |
+
mime="application/json",
|
| 341 |
+
key="download_tokenizer"
|
| 342 |
+
)
|
| 343 |
+
except Exception as e:
|
| 344 |
+
st.error(f"Не удалось подготовить tokenizer к скачиванию: {e}")
|
| 345 |
+
|
| 346 |
+
with save_col2:
|
| 347 |
+
# Скачивание топа токенов CSV - формируем CSV и показываем кнопку
|
| 348 |
+
try:
|
| 349 |
+
tmpdf_bytes = df_top.to_csv(index=False).encode("utf-8")
|
| 350 |
+
st.download_button(
|
| 351 |
+
label="Скачать топ токенов CSV",
|
| 352 |
+
data=tmpdf_bytes,
|
| 353 |
+
file_name="top_tokens.csv",
|
| 354 |
+
mime="text/csv",
|
| 355 |
+
key="download_top_tokens"
|
| 356 |
+
)
|
| 357 |
+
except Exception as e:
|
| 358 |
+
st.error(f"Не удалось подготовить CSV к скачиванию: {e}")
|
| 359 |
+
|
| 360 |
+
# Сформировать и скачать HTML отчёт - генерируем HTML и предоставляем download_button сразу
|
| 361 |
+
try:
|
| 362 |
+
html_report = build_html_report(st.session_state.texts, st.session_state.tokenizer_obj, st.session_state.stats, title=f"Отчёт: {model_choice} словарь={vocab_size}")
|
| 363 |
+
st.download_button(
|
| 364 |
+
label="Сформировать и скачать HTML отчёт",
|
| 365 |
+
data=html_report.encode("utf-8"),
|
| 366 |
+
file_name=export_html_name,
|
| 367 |
+
mime="text/html",
|
| 368 |
+
key="download_html"
|
| 369 |
+
)
|
| 370 |
+
except Exception as e:
|
| 371 |
+
st.error(f"Ошибка при формировании HTML отчёта: {e}")
|