| | from transformers import AutoTokenizer, AutoModelForSequenceClassification |
| | import streamlit as st |
| | import torch |
| |
|
| | def run(): |
| | MODEL_PATH = "rubert2" |
| | model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH) |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) |
| | model.eval() |
| | |
| | |
| | text = st.text_area("Введите сообщение", "Ты ужасный человек!") |
| | submit = st.button("Проверить токсичность") |
| | |
| | if submit and text.strip(): |
| | |
| | inputs = tokenizer(text, return_tensors="pt", truncation=True) |
| | |
| | |
| | with torch.no_grad(): |
| | outputs = model(**inputs) |
| | logits = outputs.logits |
| | score = torch.sigmoid(logits).item() |
| | |
| | |
| | st.subheader("Результат:") |
| | st.write(f"**Степень токсичности:** `{score:.3f}`") |
| | |
| | if score > 0.8: |
| | st.error("⚠️ Высокая токсичность!") |
| | elif score > 0.4: |
| | st.warning("⚠️ Средняя токсичность") |
| | else: |
| | st.success("✅ Низкая токсичность") |