Spaces:
Runtime error
Runtime error
File size: 3,581 Bytes
2d5ea4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import random
import numpy as np
import torch
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
pipeline
)
from transformers_interpret import SequenceClassificationExplainer
import gradio as gr
# ---------------------------
# Configuration
# ---------------------------
SEED = 42
MODEL_ID = "sosohrabian/my-fine-tuned-bert" # مدل شما در Model Hub
# ---------------------------
# Setup Reproducibility
# ---------------------------
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(SEED)
USE_MPS = torch.backends.mps.is_available()
device = torch.device("mps" if USE_MPS else "cpu")
print("Using device:", device)
# ---------------------------
# Load model and tokenizer from Model Hub
# ---------------------------
print("Loading model from:", MODEL_ID)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
# ---------------------------
# Prepare pipeline and explainer
# ---------------------------
label_names = {0: "World", 1: "Sports", 2: "Business", 3: "Sci/Tech"}
device_index = 0 if torch.cuda.is_available() else -1
clf = pipeline("text-classification", model=model, tokenizer=tokenizer, top_k=None, device=device_index)
explainer = SequenceClassificationExplainer(model=model, tokenizer=tokenizer)
# ---------------------------
# Prediction and explanation functions
# ---------------------------
def predict(text: str):
"""Predicts the class probabilities for a given input text."""
text = (text or "").strip()
if not text:
return {}
out = clf(text, truncation=True)
if isinstance(out, list) and isinstance(out[0], list):
out = out[0]
results = {}
for o in sorted(out, key=lambda x: -x["score"]):
idx = int(o["label"].split("_")[1])
results[label_names[idx]] = float(o["score"])
return results
def explain_html(text: str) -> str:
"""Generates HTML visualization of important words."""
text = (text or "").strip()
if not text:
return "<i>Enter text to see highlighted words.</i>"
atts = explainer(text)
toks = [t for t, _ in atts]
scores = np.abs([s for _, s in atts])
smin, smax = float(np.min(scores)), float(np.max(scores))
scores = (scores - smin) / (smax - smin + 1e-8)
spans = [
f"<span style='background: rgba(255,0,0,{0.15+0.85*s:.2f});"
f"padding:2px 3px; margin:1px; border-radius:4px; display:inline-block'>{tok}</span>"
for tok, s in zip(toks, scores)
]
return "<div style='line-height:2'>" + " ".join(spans) + "</div>"
def predict_and_explain(text: str):
"""Runs both prediction and explanation."""
return predict(text), explain_html(text)
# ---------------------------
# Gradio App
# ---------------------------
demo = gr.Interface(
fn=predict_and_explain,
inputs=gr.Textbox(lines=3, label="Enter news headline"),
outputs=[
gr.Label(num_top_classes=4, label="Predicted topic"),
gr.HTML(label="Important-word highlights"),
],
title="AG News Topic Classifier (Fine-tuned BERT)",
description="Classifies news headlines and highlights words that influenced the prediction.",
theme="default",
examples=[
["Apple unveils new iPhone during annual event"],
["The stock market saw major gains today"],
["Scientists discover new exoplanet"],
["The local team wins the championship"],
],
)
if __name__ == "__main__":
demo.launch()
|