Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, T5ForConditionalGeneration | |
| import torch | |
| # Dùng đúng tokenizer gốc: t5-small | |
| tokenizer = AutoTokenizer.from_pretrained("t5-small") | |
| model = T5ForConditionalGeneration.from_pretrained("naot97/vietnamese-toxicity-detection_3") | |
| def detect_toxic(text): | |
| prompt = f"toxic classification: {text}" | |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| input_ids=inputs["input_ids"], | |
| attention_mask=inputs["attention_mask"], | |
| max_length=10 | |
| ) | |
| decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return f"Toxicity: {decoded}" | |
| gr.Interface( | |
| fn=detect_toxic, | |
| inputs="text", | |
| outputs="text", | |
| title="Vietnamese Toxicity Detector", | |
| description="Dựa trên mô hình T5 phát hiện độc hại tiếng Việt" | |
| ).launch() |