Spaces:
Running
Running
File size: 3,856 Bytes
2c99709 84d7952 516a37d ebfa43c 2c99709 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
from fastapi import FastAPI, File, UploadFile, Body
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware
from PIL import Image
import io
import numpy as np
from structure import ImagePredictionResponse, TextPredictionRequest, TextPredictionResponse, PredictionEntry
from textPreprocess import predict_text
from imagePreprocess import profile_image_for_cnn_predict, CNNPredict, ResnetPredict, clip_predict
import tensorflow as tf
origins=[
"http://localhost:5173",
"http://localhost",
"https://authentica-ai.vercel.app",
]
app = FastAPI(
title="Authentica API",
description=(
"Simple demo API for image and text prediction. "
"Upload an image to `/predict/image` or POST text to `/predict/text`."
),
version="0.1.0",
)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", include_in_schema=False)
async def root():
# Redirect to the automatic Swagger UI provided by FastAPI
return RedirectResponse(url="/docs")
@app.post(
"/predict/image",
response_model=ImagePredictionResponse,
summary="Predict image using all available models",
description="Upload an image file (jpg/png). It is evaluated on all 3 models and class index/confidence is returned.",
)
async def predict(image: UploadFile = File(...)):
"""Accept an image upload and return a prediction using loaded model."""
image_data = await image.read()
pil_img = Image.open(io.BytesIO(image_data)).convert("RGB")
profile_img = profile_image_for_cnn_predict(pil_img, crop_size=512)
if isinstance(profile_img, str):
return (f"Error processing image: {profile_img}")
else:
print(f"Profile image shape: {profile_img.shape}")
cnnPred = CNNPredict(profile_img)
resnetPred = ResnetPredict(profile_img)
clipPred = clip_predict(pil_img, crop_size=512)
#print(f"CNN Prediction (Real prob): {cnnPred:.4f}")
#print(f"ResNet Prediction (Real prob): {resnetPred:.4f}")
#print(f"CLIP Prediction (AI prob): {clipPred:.4f}")
resnet_class = 1 if resnetPred >= 0.5 else 0
cnn_class = 1 if cnnPred >= 0.5 else 0
clip_class = 0 if clipPred > 0.5 else 1
resnet_conf = resnetPred if resnetPred >= 0.5 else 1 - resnetPred
cnn_conf = cnnPred if cnnPred >= 0.5 else 1 - cnnPred
clip_conf = clipPred if clipPred > 0.5 else 1 - clipPred
#Predicted classes 1 is Real, 0 is AI
predictions = [
PredictionEntry(model="CNN", predicted_class=cnn_class, confidence=round(float(cnn_conf), 4)),
PredictionEntry(model="ResNet", predicted_class=resnet_class, confidence=round(float(resnet_conf), 4)),
PredictionEntry(model="CLIP", predicted_class=clip_class, confidence=round(float(clip_conf), 4)),
]
return ImagePredictionResponse(predictions=predictions)
@app.post(
"/predict/text",
response_model=TextPredictionResponse,
summary="Predict text",
description="POST a JSON body with `text` to get a predicted label and confidence.",
)
async def predict_text_endpoint(payload: TextPredictionRequest = Body(...)):
"""Accept a text string and return a prediction of whether it's human or AI-generated."""
try:
# Use the text prediction function from textPreprocess.py
result = predict_text(payload.text)
return TextPredictionResponse(
predicted_class=result["predicted_class"],
confidence=result["confidence"]
)
except Exception as e:
# Return a fallback response in case of error
print(f"Error in text prediction: {e}")
return TextPredictionResponse(predicted_class="Human", confidence=0.5) |