import gradio as gr from transformers import pipeline import requests import json # Model name on Hugging Face Hub model_name = "Woolv7007/egyptian-text-classification" # Load labels.json from Hugging Face labels_url = f"https://huggingface.co/{model_name}/resolve/main/labels.json" try: response = requests.get(labels_url) response.raise_for_status() labels = response.json() if isinstance(labels, dict): labels = list(labels.values()) print("Labels loaded:", labels) except requests.exceptions.RequestException as e: print("Failed to load labels.json:", e) labels = None # Load the model pipeline pipe = pipeline("text-classification", model=model_name) print("Model loaded.") # Prediction function def predict(text): print("Input:", text) try: result = pipe(text)[0] print("Raw result:", result) label_id = int(result['label'].replace("LABEL_", "")) label_text = labels[label_id] if labels and label_id < len(labels) else result['label'] print("Mapped label:", label_text) # Define which labels are considered "True" true_labels = ["ads", "neutral"] prediction_bool = label_text.lower() in true_labels confidence = round(result['score'], 3) json_output = { "prediction": prediction_bool, "original_label": label_text, "confidence": confidence } return str(prediction_bool), json.dumps(json_output, indent=4, ensure_ascii=False) except Exception as e: error_msg = str(e) print("Prediction error:", error_msg) return "Error", json.dumps({"error": error_msg}, indent=4, ensure_ascii=False) # Gradio interface gr.Interface( fn=predict, inputs=gr.Textbox(lines=3, placeholder="Enter Egyptian Arabic text..."), outputs=[ gr.Textbox(label="Prediction (True/False)"), gr.Textbox(label="Full JSON Output") ], title="Egyptian Text Classification", description="This model classifies Egyptian Arabic text. Only 'ads' and 'neutral' are considered True; all other labels are considered False." ).launch()