spam-detector / src /streamlit_app.py
laiBatool's picture
Update src/streamlit_app.py
d65cc8b verified
import os
# Fix: Set Hugging Face cache to a writable directory
os.environ["HF_HOME"] = "/tmp/huggingface"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface"
os.environ["HF_DATASETS_CACHE"] = "/tmp/huggingface"
os.environ["HF_METRICS_CACHE"] = "/tmp/huggingface"
# Optional but safe
os.makedirs("/tmp/huggingface", exist_ok=True)
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load your model from Hugging Face Hub
model_name = "laiBatool/laiba-spam-classifier-bert" # replace with your actual model repo name
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
return tokenizer, model
tokenizer, model = load_model()
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
pred = torch.argmax(probs, dim=1).item()
return "Spam" if pred == 1 else "Not Spam"
# Streamlit UI
st.title("📧 Spam Detector - BERT")
st.write("Paste an email message and check if it's spam.")
user_input = st.text_area("Email content", height=200)
if st.button("Classify"):
if not user_input.strip():
st.warning("Please enter some text.")
else:
result = predict(user_input)
st.success(f"Prediction: {result}")