import torch from transformers import ( AutoTokenizer, AutoModelForSequenceClassification ) import gradio as gr # --------------------------------------------------------- # 1. Load DeBERTa text model # --------------------------------------------------------- text_model_path = "./DeBERTa" tokenizer = AutoTokenizer.from_pretrained(text_model_path) text_model = AutoModelForSequenceClassification.from_pretrained(text_model_path) text_model.eval() # --------------------------------------------------------- # 2. Prediction function (text only) # --------------------------------------------------------- def predict_text(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): outputs = text_model(**inputs) probs = torch.softmax(outputs.logits, dim=1).squeeze().tolist() return { "Real News": float(probs[0]), "Fake News": float(probs[1]) } # --------------------------------------------------------- # 3. Gradio UI (single tab) # --------------------------------------------------------- app = gr.Interface( fn=predict_text, inputs=gr.Textbox(lines=4, placeholder="Enter news article or headline..."), outputs=gr.Label(num_top_classes=2), title="Text Fake News Detector", ) # --------------------------------------------------------- # 4. Launch # --------------------------------------------------------- if __name__ == "__main__": app.launch()