Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
| import torch | |
| import torch.nn.functional as F | |
| # Load the Hugging Face model and tokenizer for text classification | |
| model_name = "vai0511/ai-content-classifier" | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Function to classify text (Synchronous Function) | |
| def classify_text(text: str): | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
| with torch.no_grad(): # Disable gradient calculations for inference | |
| outputs = model(**inputs) | |
| logits = outputs.logits # Raw model predictions (logits) | |
| probabilities = F.softmax(logits, dim=1) # Convert logits to probabilities using softmax | |
| percentages = probabilities[0].tolist() # Convert probabilities to a list for easy access | |
| labels = {0: "Human-Written", 1: "AI-Generated", 2: "Paraphrased"} | |
| predicted_class = torch.argmax(logits, dim=1).item() | |
| result = labels[predicted_class] | |
| percentages = {labels[i]: round(percentages[i] * 100, 2) for i in range(len(percentages))} | |
| return result, percentages | |
| # Create Gradio interface | |
| iface = gr.Interface( | |
| fn=classify_text, | |
| inputs=gr.Textbox(label="Enter Text to Classify"), | |
| outputs=[gr.Textbox(label="Classification Result"), gr.JSON(label="Classification Percentages")], | |
| live=True | |
| ) | |
| # Launch Gradio interface | |
| iface.launch() | |