| | from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| | import streamlit as st
|
| | import torch
|
| |
|
| |
|
| | MODEL_PATH = "rubert-finetuned"
|
| | model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
|
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| | model.eval()
|
| |
|
| |
|
| | st.set_page_config(page_title="Оценка токсичности", layout="centered")
|
| | st.title("💬 Оценка токсичности текста")
|
| |
|
| | text = st.text_area("Введите сообщение", "Ты ужасный человек!")
|
| | submit = st.button("Проверить токсичность")
|
| |
|
| | if submit and text.strip():
|
| |
|
| | inputs = tokenizer(text, return_tensors="pt", truncation=True)
|
| |
|
| |
|
| | with torch.no_grad():
|
| | outputs = model(**inputs)
|
| | logits = outputs.logits
|
| | score = torch.sigmoid(logits).item()
|
| |
|
| |
|
| | st.subheader("Результат:")
|
| | st.write(f"**Степень токсичности:** `{score:.3f}`")
|
| |
|
| | if score > 0.8:
|
| | st.error("⚠️ Высокая токсичность!")
|
| | elif score > 0.4:
|
| | st.warning("⚠️ Средняя токсичность")
|
| | else:
|
| | st.success("✅ Низкая токсичность") |