import os # โœ… Fix: Use writable directories for cache and metrics os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf" os.environ["HF_HOME"] = "/tmp/hf" os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit" os.makedirs("/tmp/hf", exist_ok=True) os.makedirs("/tmp/.streamlit", exist_ok=True) import streamlit as st from transformers import pipeline # ๐Ÿ” Load Hugging Face token from Space secrets HF_TOKEN = os.getenv("HF_TOKEN") # ๐Ÿ” Load the private model using the token classifier = pipeline( "text-classification", model="azratuni/isl-classifier", token=HF_TOKEN, return_all_scores=True # โœ… Return all class scores ) # ๐Ÿง  Streamlit UI st.set_page_config(page_title="Islamophobia Classifier", page_icon="๐Ÿ›ก๏ธ") st.title("๐Ÿ›ก๏ธ Islamophobia Detection") st.write("Enter a sentence below to check if it's Islamophobic or not.") text_input = st.text_area("Input text", placeholder="e.g. Muslims are terrorists.") if st.button("Classify"): if text_input.strip(): with st.spinner("Classifying..."): scores = classifier(text_input)[0] scores = sorted(scores, key=lambda x: x["score"], reverse=True) top_label = scores[0]["label"] top_score = scores[0]["score"] second_score = scores[1]["score"] margin = top_score - second_score label_map = { "LABEL_0": "Not Islamophobic", "LABEL_1": "Islamophobic" } label = label_map.get(top_label, top_label) st.success(f"**Prediction:** {label}") st.write(f"**Confidence:** {top_score:.2%}") # โš ๏ธ Uncertainty warning if margin is low if margin < 0.15: st.warning("โš ๏ธ The model is uncertain. Both classes received similar confidence scores.") # ๐Ÿงพ Optional: show both scores st.markdown("**Score breakdown:**") for s in scores: st.write(f"- {label_map.get(s['label'], s['label'])}: {s['score']:.2%}") else: st.warning("Please enter some text.")