File size: 1,480 Bytes
d65cc8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815193
759d702
 
d65cc8b
1815193
759d702
 
 
 
 
 
3abcb4d
d65cc8b
759d702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import os

# Fix: Set Hugging Face cache to a writable directory
os.environ["HF_HOME"] = "/tmp/huggingface"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface"
os.environ["HF_DATASETS_CACHE"] = "/tmp/huggingface"
os.environ["HF_METRICS_CACHE"] = "/tmp/huggingface"

# Optional but safe
os.makedirs("/tmp/huggingface", exist_ok=True)





import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch


# Load your model from Hugging Face Hub
model_name = "laiBatool/laiba-spam-classifier-bert"  # replace with your actual model repo name


@st.cache_resource
def load_model():
     
 
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForSequenceClassification.from_pretrained(model_name)
    return tokenizer, model

tokenizer, model = load_model()

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
    outputs = model(**inputs)
    probs = torch.nn.functional.softmax(outputs.logits, dim=1)
    pred = torch.argmax(probs, dim=1).item()
    return "Spam" if pred == 1 else "Not Spam"

# Streamlit UI
st.title("📧 Spam Detector - BERT")
st.write("Paste an email message and check if it's spam.")

user_input = st.text_area("Email content", height=200)

if st.button("Classify"):
    if not user_input.strip():
        st.warning("Please enter some text.")
    else:
        result = predict(user_input)
        st.success(f"Prediction: {result}")