File size: 2,100 Bytes
c395f90
 
60e3188
c395f90
 
 
 
 
 
cb5aeb3
682ebcc
cb5aeb3
60e3188
2b2e0e0
 
 
 
 
 
60e3188
 
2b2e0e0
cb5aeb3
2b2e0e0
 
682ebcc
 
cb5aeb3
682ebcc
cb5aeb3
682ebcc
 
 
60e3188
 
 
 
 
 
 
 
 
 
 
 
 
 
cb5aeb3
682ebcc
60e3188
 
 
 
 
 
 
 
 
 
682ebcc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os

# ✅ Fix: Use writable directories for cache and metrics
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf"
os.environ["HF_HOME"] = "/tmp/hf"
os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit"
os.makedirs("/tmp/hf", exist_ok=True)
os.makedirs("/tmp/.streamlit", exist_ok=True)

import streamlit as st
from transformers import pipeline

# 🔐 Load Hugging Face token from Space secrets
HF_TOKEN = os.getenv("HF_TOKEN")

# 🔁 Load the private model using the token
classifier = pipeline(
    "text-classification",
    model="azratuni/isl-classifier",
    token=HF_TOKEN,
    return_all_scores=True  # ✅ Return all class scores
)

# 🧠 Streamlit UI
st.set_page_config(page_title="Islamophobia Classifier", page_icon="🛡️")
st.title("🛡️ Islamophobia Detection")
st.write("Enter a sentence below to check if it's Islamophobic or not.")

text_input = st.text_area("Input text", placeholder="e.g. Muslims are terrorists.")

if st.button("Classify"):
    if text_input.strip():
        with st.spinner("Classifying..."):
            scores = classifier(text_input)[0]
            scores = sorted(scores, key=lambda x: x["score"], reverse=True)

            top_label = scores[0]["label"]
            top_score = scores[0]["score"]
            second_score = scores[1]["score"]
            margin = top_score - second_score

            label_map = {
                "LABEL_0": "Not Islamophobic",
                "LABEL_1": "Islamophobic"
            }

            label = label_map.get(top_label, top_label)

            st.success(f"**Prediction:** {label}")
            st.write(f"**Confidence:** {top_score:.2%}")

            # ⚠️ Uncertainty warning if margin is low
            if margin < 0.15:
                st.warning("⚠️ The model is uncertain. Both classes received similar confidence scores.")

            # 🧾 Optional: show both scores
            st.markdown("**Score breakdown:**")
            for s in scores:
                st.write(f"- {label_map.get(s['label'], s['label'])}: {s['score']:.2%}")
    else:
        st.warning("Please enter some text.")