Spaces:
Sleeping
Sleeping
File size: 1,758 Bytes
9cf13c3 a03c764 9b17a72 e0b229b 9b17a72 54cad9d 9b17a72 a03c764 9b17a72 a03c764 54cad9d 9b17a72 e0b229b a03c764 9b17a72 29cd84d 9b17a72 9cf13c3 9b17a72 9cf13c3 9b17a72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import uvicorn
app = FastAPI(title="AI Detector API")
# Load model once at startup
MODEL_NAME = "roberta-base-openai-detector"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
model.eval()
def get_ai_probability(text: str) -> float:
"""Return the AI probability (0–100%) for a given text."""
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
logits = model(**inputs).logits
probs = torch.softmax(logits, dim=1)
ai_score = probs[0][1].item() * 100
return round(ai_score, 2)
@app.post("/analyze")
async def analyze_text(request: Request):
"""
Example body:
{
"text": "Your long article text here"
}
"""
data = await request.json()
text = data.get("text", "").strip()
if not text:
return {"error": "No text provided"}
paragraphs = [p.strip() for p in text.split("\n") if p.strip()]
results = []
for i, para in enumerate(paragraphs, start=1):
ai_score = get_ai_probability(para)
results.append({
"paragraph": i,
"ai_score": ai_score,
"human_score": round(100 - ai_score, 2),
"content": para[:200] + ("..." if len(para) > 200 else "")
})
overall = sum([r["ai_score"] for r in results]) / len(results)
return {
"overall_ai_score": round(overall, 2),
"overall_human_score": round(100 - overall, 2),
"paragraphs": results
}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)
|