Spaces:
Sleeping
Sleeping
File size: 1,490 Bytes
872e6c7 e70fa96 f5d94b8 e70fa96 872e6c7 e8719ce bd4670b e70fa96 bd4670b 872e6c7 e70fa96 bd4670b f5d94b8 bd4670b 872e6c7 e70fa96 872e6c7 bd4670b 872e6c7 e70fa96 600eed2 e70fa96 872e6c7 bd4670b f5d94b8 bd4670b e70fa96 f5d94b8 bd4670b 872e6c7 bd4670b e70fa96 f5d94b8 e70fa96 872e6c7 bd4670b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | import torch
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification
)
import gradio as gr
# ---------------------------------------------------------
# 1. Load DeBERTa text model
# ---------------------------------------------------------
text_model_path = "./DeBERTa"
tokenizer = AutoTokenizer.from_pretrained(text_model_path)
text_model = AutoModelForSequenceClassification.from_pretrained(text_model_path)
text_model.eval()
# ---------------------------------------------------------
# 2. Prediction function (text only)
# ---------------------------------------------------------
def predict_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = text_model(**inputs)
probs = torch.softmax(outputs.logits, dim=1).squeeze().tolist()
return {
"Real News": float(probs[0]),
"Fake News": float(probs[1])
}
# ---------------------------------------------------------
# 3. Gradio UI (single tab)
# ---------------------------------------------------------
app = gr.Interface(
fn=predict_text,
inputs=gr.Textbox(lines=4, placeholder="Enter news article or headline..."),
outputs=gr.Label(num_top_classes=2),
title="Text Fake News Detector",
)
# ---------------------------------------------------------
# 4. Launch
# ---------------------------------------------------------
if __name__ == "__main__":
app.launch()
|