|
|
import gradio as gr
|
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
import torch
|
|
|
import torch.nn.functional as F
|
|
|
|
|
|
|
|
|
model_name = "vai0511/ai-content-classifier"
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
|
|
|
|
|
def classify_text(text: str):
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
|
|
with torch.no_grad():
|
|
|
outputs = model(**inputs)
|
|
|
|
|
|
logits = outputs.logits
|
|
|
probabilities = F.softmax(logits, dim=1)
|
|
|
percentages = probabilities[0].tolist()
|
|
|
labels = {0: "Human-Written", 1: "AI-Generated", 2: "Paraphrased"}
|
|
|
predicted_class = torch.argmax(logits, dim=1).item()
|
|
|
result = labels[predicted_class]
|
|
|
percentages = {labels[i]: round(percentages[i] * 100, 2) for i in range(len(percentages))}
|
|
|
return result, percentages
|
|
|
|
|
|
|
|
|
iface = gr.Interface(
|
|
|
fn=classify_text,
|
|
|
inputs=gr.Textbox(label="Enter Text to Classify"),
|
|
|
outputs=[gr.Textbox(label="Classification Result"), gr.JSON(label="Classification Percentages")],
|
|
|
live=True
|
|
|
)
|
|
|
|
|
|
|
|
|
iface.launch()
|
|
|
|
|
|
|
|
|
https://huggingface.co/spaces/MUSKAN17/AI_Content_Source_Identifier |