Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| MODEL_NAME = "GroNLP/hateBERT" # troque para seu modelo fine-tunado se quiser | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) | |
| def predict(text): | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probs = torch.softmax(outputs.logits, dim=1).tolist()[0] | |
| return { | |
| "Not Hate": float(probs[0]), | |
| "Hate Speech": float(probs[1]) | |
| } | |
| demo = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Textbox(lines=3, placeholder="Digite o texto aqui..."), | |
| outputs=gr.Label(num_top_classes=2), | |
| title="HateBERT Classifier", | |
| ) | |
| demo.launch() | |