detectAI / backend /debug_model.py
vivek1192's picture
Setup CI/CD for Hugging Face
171eb01
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
MODEL_NAME = "openai-community/roberta-base-openai-detector"
print(f"Loading {MODEL_NAME}...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
human_text = "I went to the grocery store today to buy some apples and bananas for my lunch."
ai_text = "The quick brown fox jumps over the lazy dog." # actually typical test text, but let's assume human-like
inputs = tokenizer(human_text, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
probs = torch.softmax(logits, dim=1)
print(f"Text: {human_text}")
print(f"Logits: {logits}")
print(f"Probs: {probs}")
print(f"Label 0 (Fake/AI?): {probs[0][0].item():.4f}")
print(f"Label 1 (Real/Human?): {probs[0][1].item():.4f}")
id2label = model.config.id2label
print(f"Config Labels: {id2label}")