Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from inference import ContentClassifierInference | |
| import os | |
| import json | |
| import logging | |
| # Setup logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Initialize the model | |
| try: | |
| model = ContentClassifierInference() | |
| model_initialized = True | |
| logger.info("Model initialized successfully") | |
| except Exception as e: | |
| logger.error(f"Error initializing model: {e}") | |
| model_initialized = False | |
| def classify_text(text): | |
| """Classify single text input""" | |
| if not model_initialized: | |
| return json.dumps({"error": "Model initialization failed"}, indent=2) | |
| if not text or not text.strip(): | |
| return json.dumps({"error": "Please provide valid text input"}, indent=2) | |
| try: | |
| result = model.predict(text.strip()) | |
| logger.info(f"Processed text classification: {result['threat_prediction']}") | |
| return json.dumps(result, indent=2) | |
| except Exception as e: | |
| logger.error(f"Classification error: {e}") | |
| return json.dumps({"error": str(e)}, indent=2) | |
| def classify_batch(text): | |
| """Classify batch of texts (one per line)""" | |
| if not model_initialized: | |
| return json.dumps({"error": "Model initialization failed"}, indent=2) | |
| if not text or not text.strip(): | |
| return json.dumps({"error": "Please provide valid text input"}, indent=2) | |
| try: | |
| # Split by newlines for batch processing | |
| texts = [t.strip() for t in text.split("\n") if t.strip()] | |
| if not texts: | |
| return json.dumps({"error": "No valid texts provided"}, indent=2) | |
| if len(texts) > 10: # Limit batch size | |
| return json.dumps({"error": "Batch size limited to 10 texts"}, indent=2) | |
| results = model.predict_batch(texts) | |
| logger.info(f"Processed batch of {len(texts)} texts") | |
| return json.dumps(results, indent=2) | |
| except Exception as e: | |
| logger.error(f"Batch classification error: {e}") | |
| return json.dumps({"error": str(e)}, indent=2) | |
| # API function for external use | |
| def predict_api(text): | |
| """API endpoint for programmatic access""" | |
| if isinstance(text, list): | |
| return [json.loads(classify_text(t)) for t in text] | |
| else: | |
| return json.loads(classify_text(text)) | |
| # Create Gradio interface | |
| with gr.Blocks(title="Content Classifier", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π Content Classifier") | |
| gr.Markdown(""" | |
| This tool classifies content as either **safe** or **unsafe** using an ONNX model. | |
| Perfect for content moderation, safety checks, and automated text analysis. | |
| """) | |
| # Status indicator | |
| if model_initialized: | |
| gr.Markdown("β **Model Status**: Ready") | |
| else: | |
| gr.Markdown("β **Model Status**: Failed to initialize") | |
| with gr.Tab("Single Text Classification"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| text_input = gr.Textbox( | |
| label="Enter text to classify", | |
| lines=5, | |
| placeholder="Type or paste your text here...", | |
| max_lines=10 | |
| ) | |
| classify_btn = gr.Button("π Classify", variant="primary", size="lg") | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| ["This is a normal, safe piece of content."], | |
| ["Hello, how are you doing today?"], | |
| ["Example text for content classification"] | |
| ], | |
| inputs=text_input, | |
| label="Try these examples:" | |
| ) | |
| with gr.Column(): | |
| result_output = gr.JSON(label="Classification Result", show_label=True) | |
| classify_btn.click(fn=classify_text, inputs=text_input, outputs=result_output) | |
| with gr.Tab("Batch Processing"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| batch_input = gr.Textbox( | |
| label="Enter multiple texts (one per line)", | |
| lines=10, | |
| placeholder="Text 1\nText 2\nText 3\n...(max 10 texts)", | |
| max_lines=15 | |
| ) | |
| batch_btn = gr.Button("π Process Batch", variant="primary", size="lg") | |
| gr.Markdown("**Note**: Maximum 10 texts per batch") | |
| with gr.Column(): | |
| batch_output = gr.JSON(label="Batch Classification Results", show_label=True) | |
| batch_btn.click(fn=classify_batch, inputs=batch_input, outputs=batch_output) | |
| with gr.Tab("API Documentation"): | |
| gr.Markdown(""" | |
| ## π API Usage | |
| This Space can be used as an API endpoint for programmatic access. | |
| ### Single Text Classification | |
| ```python | |
| import requests | |
| url = "https://your-space-name.hf.space/predict" | |
| response = requests.post(url, json={"text": "Your content to classify"}) | |
| result = response.json() | |
| ``` | |
| ### Batch Processing | |
| ```python | |
| import requests | |
| url = "https://your-space-name.hf.space/predict" | |
| texts = ["Text 1", "Text 2", "Text 3"] | |
| response = requests.post(url, json={"text": texts}) | |
| results = response.json() | |
| ``` | |
| ### Response Format | |
| ```json | |
| { | |
| "is_threat": false, | |
| "final_confidence": 0.85, | |
| "threat_prediction": "safe", | |
| "onnx_prediction": { | |
| "safe": 0.85, | |
| "unsafe": 0.15 | |
| }, | |
| "models_used": ["onnx"], | |
| "raw_predictions": {...} | |
| } | |
| ``` | |
| ### Using with curl | |
| ```bash | |
| curl -X POST https://your-space-name.hf.space/predict \\ | |
| -H "Content-Type: application/json" \\ | |
| -d '{"text": "Your content to classify"}' | |
| ``` | |
| """) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860, share=False) | |