Spaces:
Running
Running
Upload 4 files
Browse files- app.py +98 -0
- imagePreprocess.py +223 -0
- structure.py +18 -0
- textPreprocess.py +123 -0
app.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, File, UploadFile, Body
|
| 2 |
+
from fastapi.responses import RedirectResponse
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import io
|
| 6 |
+
import numpy as np
|
| 7 |
+
from Apps.structure import ImagePredictionResponse, TextPredictionRequest, TextPredictionResponse, PredictionEntry
|
| 8 |
+
from Apps.imagePreprocess import profile_image_for_cnn_predict, CNNPredict, ResnetPredict, clip_predict
|
| 9 |
+
from Apps.textPreprocess import predict_text
|
| 10 |
+
import tensorflow as tf
|
| 11 |
+
|
| 12 |
+
origins=[
|
| 13 |
+
"http://localhost:5173",
|
| 14 |
+
"http://localhost",
|
| 15 |
+
"https://authentica-ai.vercel.app",
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
app = FastAPI(
|
| 19 |
+
title="Authentica API",
|
| 20 |
+
description=(
|
| 21 |
+
"Simple demo API for image and text prediction. "
|
| 22 |
+
"Upload an image to `/predict/image` or POST text to `/predict/text`."
|
| 23 |
+
),
|
| 24 |
+
version="0.1.0",
|
| 25 |
+
)
|
| 26 |
+
app.add_middleware(
|
| 27 |
+
CORSMiddleware,
|
| 28 |
+
allow_origins=origins,
|
| 29 |
+
allow_credentials=True,
|
| 30 |
+
allow_methods=["*"],
|
| 31 |
+
allow_headers=["*"],
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
@app.get("/", include_in_schema=False)
|
| 35 |
+
async def root():
|
| 36 |
+
# Redirect to the automatic Swagger UI provided by FastAPI
|
| 37 |
+
return RedirectResponse(url="/docs")
|
| 38 |
+
|
| 39 |
+
@app.post(
|
| 40 |
+
"/predict/image",
|
| 41 |
+
response_model=ImagePredictionResponse,
|
| 42 |
+
summary="Predict image using all available models",
|
| 43 |
+
description="Upload an image file (jpg/png). It is evaluated on all 3 models and class index/confidence is returned.",
|
| 44 |
+
)
|
| 45 |
+
async def predict(image: UploadFile = File(...)):
|
| 46 |
+
"""Accept an image upload and return a prediction using loaded model."""
|
| 47 |
+
|
| 48 |
+
image_data = await image.read()
|
| 49 |
+
pil_img = Image.open(io.BytesIO(image_data)).convert("RGB")
|
| 50 |
+
profile_img = profile_image_for_cnn_predict(pil_img, crop_size=512)
|
| 51 |
+
if isinstance(profile_img, str):
|
| 52 |
+
return (f"Error processing image: {profile_img}")
|
| 53 |
+
else:
|
| 54 |
+
print(f"Profile image shape: {profile_img.shape}")
|
| 55 |
+
cnnPred = CNNPredict(profile_img)
|
| 56 |
+
resnetPred = ResnetPredict(profile_img)
|
| 57 |
+
clipPred = clip_predict(pil_img, crop_size=512)
|
| 58 |
+
#print(f"CNN Prediction (Real prob): {cnnPred:.4f}")
|
| 59 |
+
#print(f"ResNet Prediction (Real prob): {resnetPred:.4f}")
|
| 60 |
+
#print(f"CLIP Prediction (AI prob): {clipPred:.4f}")
|
| 61 |
+
resnet_class = 1 if resnetPred >= 0.5 else 0
|
| 62 |
+
cnn_class = 1 if cnnPred >= 0.5 else 0
|
| 63 |
+
clip_class = 0 if clipPred > 0.5 else 1
|
| 64 |
+
resnet_conf = resnetPred if resnetPred >= 0.5 else 1 - resnetPred
|
| 65 |
+
cnn_conf = cnnPred if cnnPred >= 0.5 else 1 - cnnPred
|
| 66 |
+
clip_conf = clipPred if clipPred > 0.5 else 1 - clipPred
|
| 67 |
+
#Predicted classes 1 is Real, 0 is AI
|
| 68 |
+
predictions = [
|
| 69 |
+
PredictionEntry(model="CNN", predicted_class=cnn_class, confidence=round(float(cnn_conf), 4)),
|
| 70 |
+
PredictionEntry(model="ResNet", predicted_class=resnet_class, confidence=round(float(resnet_conf), 4)),
|
| 71 |
+
PredictionEntry(model="CLIP", predicted_class=clip_class, confidence=round(float(clip_conf), 4)),
|
| 72 |
+
]
|
| 73 |
+
return ImagePredictionResponse(predictions=predictions)
|
| 74 |
+
|
| 75 |
+
@app.post(
|
| 76 |
+
"/predict/text",
|
| 77 |
+
response_model=TextPredictionResponse,
|
| 78 |
+
summary="Predict text",
|
| 79 |
+
description="POST a JSON body with `text` to get a predicted label and confidence.",
|
| 80 |
+
)
|
| 81 |
+
async def predict_text_endpoint(payload: TextPredictionRequest = Body(...)):
|
| 82 |
+
"""Accept a text string and return a prediction of whether it's human or AI-generated."""
|
| 83 |
+
try:
|
| 84 |
+
# Use the text prediction function from textPreprocess.py
|
| 85 |
+
result = predict_text(payload.text)
|
| 86 |
+
|
| 87 |
+
return TextPredictionResponse(
|
| 88 |
+
predicted_class=result["predicted_class"],
|
| 89 |
+
confidence=result["confidence"]
|
| 90 |
+
)
|
| 91 |
+
except Exception as e:
|
| 92 |
+
# Return a fallback response in case of error
|
| 93 |
+
print(f"Error in text prediction: {e}")
|
| 94 |
+
return TextPredictionResponse(predicted_class="Human", confidence=0.5)
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
import uvicorn
|
| 98 |
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
imagePreprocess.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from PIL import Image, ImageOps
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
from tensorflow.keras.applications.resnet50 import preprocess_input
|
| 8 |
+
import torch
|
| 9 |
+
import clip
|
| 10 |
+
|
| 11 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 12 |
+
MODELS_DIR = os.path.join(BASE_DIR, "Lib/Models/Image")
|
| 13 |
+
|
| 14 |
+
# Load models and preprocessing once at module level
|
| 15 |
+
clip_mod, clip_pre = clip.load("ViT-B/32", jit=False)
|
| 16 |
+
clip_mod.eval()
|
| 17 |
+
for p in clip_mod.parameters():
|
| 18 |
+
p.requires_grad = False
|
| 19 |
+
mlp_model= tf.keras.models.load_model(os.path.join(MODELS_DIR, "clip_model.keras"))
|
| 20 |
+
cnn_model = tf.keras.models.load_model(os.path.join(MODELS_DIR, "cnn_model.keras"))
|
| 21 |
+
resnet_model = tf.keras.models.load_model(os.path.join(MODELS_DIR, "resnet_model.keras"))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def center_crop(image: Image.Image, crop_size=512) -> Image.Image | str:
|
| 25 |
+
try:
|
| 26 |
+
image = ImageOps.exif_transpose(image)
|
| 27 |
+
w, h = image.size
|
| 28 |
+
if w < crop_size or h < crop_size:
|
| 29 |
+
# skip small images
|
| 30 |
+
return f"skipped image (too small) ({w}x{h})"
|
| 31 |
+
left = (w - crop_size) // 2
|
| 32 |
+
top = (h - crop_size) // 2
|
| 33 |
+
right = left + crop_size
|
| 34 |
+
bottom = top + crop_size
|
| 35 |
+
cropped = image.crop((left, top, right, bottom))
|
| 36 |
+
return cropped
|
| 37 |
+
except Exception as e:
|
| 38 |
+
return f"Error when cropping center: {e}"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def denoise(src_image: Image) -> np.ndarray | str:
|
| 42 |
+
"""Read image, denoise (GPU if available) and return denoised image."""
|
| 43 |
+
img = np.array(src_image) # BGR uint8 numpy array
|
| 44 |
+
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 45 |
+
if src_image is None:
|
| 46 |
+
print(f"WARNING: No source image, skipping.")
|
| 47 |
+
return False
|
| 48 |
+
# Denoising parameters
|
| 49 |
+
H = 10 # filter strength for luminance component (recommended 3-15)
|
| 50 |
+
H_COLOR = 10 # same for color components
|
| 51 |
+
TEMPLATE_WINDOW_SIZE = 7
|
| 52 |
+
SEARCH_WINDOW_SIZE = 21
|
| 53 |
+
# Use CUDA if available, otherwise CPU fallback
|
| 54 |
+
use_cuda = False
|
| 55 |
+
try:
|
| 56 |
+
use_cuda = hasattr(cv2, 'cuda') and cv2.cuda.getCudaEnabledDeviceCount() > 0
|
| 57 |
+
except Exception:
|
| 58 |
+
use_cuda = False
|
| 59 |
+
if use_cuda:
|
| 60 |
+
# Create a GpuMat and upload the numpy image to GPU
|
| 61 |
+
gpu_img = cv2.cuda_GpuMat()
|
| 62 |
+
gpu_img.upload(img) # <-- this converts numpy -> GpuMat on device
|
| 63 |
+
den_gpu = cv2.cuda.fastNlMeansDenoisingColored(
|
| 64 |
+
gpu_img,H,H_COLOR,None,SEARCH_WINDOW_SIZE,TEMPLATE_WINDOW_SIZE
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Download result back to CPU
|
| 68 |
+
den = den_gpu.download()
|
| 69 |
+
else:
|
| 70 |
+
# Fallback to CPU implementation
|
| 71 |
+
print("NOTICE: CUDA not available β using CPU denoiser.")
|
| 72 |
+
den = cv2.fastNlMeansDenoisingColored(
|
| 73 |
+
img, None,
|
| 74 |
+
H, H_COLOR,
|
| 75 |
+
TEMPLATE_WINDOW_SIZE,
|
| 76 |
+
SEARCH_WINDOW_SIZE
|
| 77 |
+
)
|
| 78 |
+
#cv2.imwrite("denoised.png", den) # for debugging
|
| 79 |
+
den = cv2.cvtColor(den, cv2.COLOR_BGR2RGB)
|
| 80 |
+
den = Image.fromarray(den)
|
| 81 |
+
return den
|
| 82 |
+
|
| 83 |
+
def compute_profile(raw_image: Image, den_image: Image, normalize=False ,verbose= True) -> np.ndarray | str:
|
| 84 |
+
# read images
|
| 85 |
+
if raw_image is None:
|
| 86 |
+
return print(f"WARNING: couldn't read raw image")
|
| 87 |
+
if den_image is None:
|
| 88 |
+
return print(f"WARNING: couldn't read denoised image")
|
| 89 |
+
|
| 90 |
+
raw = np.array(raw_image) # RGB uint8 numpy array
|
| 91 |
+
raw = cv2.cvtColor(raw, cv2.COLOR_RGB2BGR)
|
| 92 |
+
den = np.array(den_image) # RGB uint8 numpy array
|
| 93 |
+
den = cv2.cvtColor(den, cv2.COLOR_RGB2BGR)
|
| 94 |
+
# if shapes differ, resize den to raw's size (keeps alignment); warn
|
| 95 |
+
if den.shape != raw.shape:
|
| 96 |
+
if verbose:
|
| 97 |
+
print(f"NOTE: shape mismatch, resizing denoised from {den.shape[:2]} to {raw.shape[:2]}")
|
| 98 |
+
den = cv2.resize(den, (raw.shape[1], raw.shape[0]), interpolation=cv2.INTER_LINEAR)
|
| 99 |
+
|
| 100 |
+
# absolute difference per-channel
|
| 101 |
+
diff = cv2.absdiff(raw, den) # BGR, uint8
|
| 102 |
+
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) # single-channel uint8
|
| 103 |
+
|
| 104 |
+
# optionally normalize to full 0-255 (per-image)
|
| 105 |
+
if normalize:
|
| 106 |
+
# cv2.normalize will map min->0 and max->255
|
| 107 |
+
# but if the image is flat (min==max) normalize will set to 0; handle that
|
| 108 |
+
minv = int(gray.min())
|
| 109 |
+
maxv = int(gray.max())
|
| 110 |
+
if maxv > minv:
|
| 111 |
+
norm = cv2.normalize(gray, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
|
| 112 |
+
out = norm
|
| 113 |
+
else:
|
| 114 |
+
# nothing to normalize (flat), keep as-is (all zeros)
|
| 115 |
+
out = gray
|
| 116 |
+
else:
|
| 117 |
+
# keep raw diff values but ensure dtype uint8 (already uint8) and values are 0..255
|
| 118 |
+
out = gray
|
| 119 |
+
#cv2.imwrite("profile.png", out) # for debugging
|
| 120 |
+
return out
|
| 121 |
+
|
| 122 |
+
def profile_image_for_cnn_predict(pil_img: Image, crop_size=512):
|
| 123 |
+
"""Preprocess the input image and return a numpy array ready for model prediction."""
|
| 124 |
+
# Step 1: Center crop the image
|
| 125 |
+
cropped_img = center_crop(pil_img, crop_size=crop_size)
|
| 126 |
+
if isinstance(cropped_img, str):
|
| 127 |
+
return cropped_img # return error message if cropping failed
|
| 128 |
+
# Step 2: Denoise the cropped image
|
| 129 |
+
denoised_img = denoise(cropped_img)
|
| 130 |
+
if isinstance(denoised_img, str):
|
| 131 |
+
return denoised_img # return error message if denoising failed
|
| 132 |
+
# Step 3: Compute the profile image
|
| 133 |
+
profile_img = compute_profile(cropped_img, denoised_img, normalize=False)
|
| 134 |
+
if isinstance(profile_img, str):
|
| 135 |
+
return profile_img # return error message if profile computation failed
|
| 136 |
+
return profile_img
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def prepare_cv2_image_for_resnet(cv2_gray_img, target_size=(512,512)):
|
| 140 |
+
img_rgb = cv2.cvtColor(cv2_gray_img, cv2.COLOR_GRAY2RGB)
|
| 141 |
+
img_rgb = cv2.resize(img_rgb, (target_size[1], target_size[0]), interpolation=cv2.INTER_AREA)
|
| 142 |
+
img_rgb = img_rgb.astype('float32')
|
| 143 |
+
# 5) add batch dim
|
| 144 |
+
x = np.expand_dims(img_rgb, axis=0) # shape (1, H, W, 3)
|
| 145 |
+
x = preprocess_input(x)
|
| 146 |
+
return x
|
| 147 |
+
|
| 148 |
+
def predict_image_prob_clip(image: Image.Image, threshold=0.5,
|
| 149 |
+
clip_model=None, clip_preprocess=None,
|
| 150 |
+
keras_mlp=None):
|
| 151 |
+
"""
|
| 152 |
+
Predicts probability that image is AI-generated (AI=1) using CLIP + Keras MLP.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
path_or_image: str (file path) or PIL.Image.Image or numpy array (H,W,3)
|
| 156 |
+
threshold: float threshold for binary label
|
| 157 |
+
clip_model, clip_preprocess: optionally pass existing CLIP objects
|
| 158 |
+
keras_mlp: optionally pass existing loaded Keras model
|
| 159 |
+
Returns:
|
| 160 |
+
dict: {'prob': float_prob_AI, 'label': 'AI' or 'Real'}
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# --- try to reuse provided CLIP objects, otherwise load ---
|
| 165 |
+
if clip_model is None or clip_preprocess is None:
|
| 166 |
+
print("Loading Default CLIP model...")
|
| 167 |
+
# pick a model name: prefer provided arg, else try global, else ViT-B/32
|
| 168 |
+
cmn = "ViT-B/32"
|
| 169 |
+
clip_model, clip_preprocess = clip.load(cmn, device="cpu", jit=False)
|
| 170 |
+
clip_model.eval()
|
| 171 |
+
for p in clip_model.parameters():
|
| 172 |
+
p.requires_grad = False
|
| 173 |
+
|
| 174 |
+
# --- try to reuse provided keras model, otherwise load from disk ---
|
| 175 |
+
if keras_mlp is None:
|
| 176 |
+
print("No keras model provided...")
|
| 177 |
+
return None
|
| 178 |
+
# --- load/normalize image ---
|
| 179 |
+
# assume PIL image
|
| 180 |
+
img = image.convert('RGB')
|
| 181 |
+
|
| 182 |
+
# --- preprocess for CLIP and get embedding ---
|
| 183 |
+
input_tensor = clip_preprocess(img).unsqueeze(0).to("cpu") # shape (1,C,H,W)
|
| 184 |
+
with torch.no_grad():
|
| 185 |
+
emb = clip_model.encode_image(input_tensor) # (1, D)
|
| 186 |
+
emb = emb / emb.norm(dim=-1, keepdim=True) # L2 normalize
|
| 187 |
+
|
| 188 |
+
emb_np = emb.cpu().numpy().astype('float32') # shape (1, D)
|
| 189 |
+
|
| 190 |
+
# --- predict with Keras MLP ---
|
| 191 |
+
probs = keras_mlp.predict(emb_np, verbose=0).reshape(-1,)
|
| 192 |
+
prob = float(probs[0])
|
| 193 |
+
return prob
|
| 194 |
+
|
| 195 |
+
def clip_predict(pil_img: Image, crop_size=512):
|
| 196 |
+
# pass model objects explicitly (faster if you call this repeatedly)
|
| 197 |
+
pil_img = center_crop(pil_img, crop_size=crop_size)
|
| 198 |
+
|
| 199 |
+
if isinstance(pil_img, str):
|
| 200 |
+
return pil_img # return error message
|
| 201 |
+
|
| 202 |
+
return predict_image_prob_clip(pil_img,
|
| 203 |
+
clip_model=clip_mod,
|
| 204 |
+
clip_preprocess=clip_pre,
|
| 205 |
+
keras_mlp=mlp_model)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def CNNPredict(predict_img: np.ndarray):
|
| 209 |
+
#1 Real 0 AI
|
| 210 |
+
#normalize image
|
| 211 |
+
# expand dims to add channel axis
|
| 212 |
+
predict_img = predict_img.astype('float32') / 255.0 # shape (H, W)
|
| 213 |
+
predict_img = np.expand_dims(predict_img, axis=-1) # shape (H, W, 1)
|
| 214 |
+
# expand dims to add batch axis
|
| 215 |
+
predict_img = np.expand_dims(predict_img, axis=0) # shape (1, H, W, 1)
|
| 216 |
+
prediction = cnn_model.predict(predict_img)
|
| 217 |
+
return prediction[0][0]
|
| 218 |
+
|
| 219 |
+
def ResnetPredict(predict_img):
|
| 220 |
+
#1 Real 0 AI
|
| 221 |
+
predict_img = prepare_cv2_image_for_resnet(predict_img)
|
| 222 |
+
prediction = resnet_model.predict(predict_img)
|
| 223 |
+
return prediction[0][0]
|
structure.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
class PredictionEntry(BaseModel):
|
| 5 |
+
model: str = Field(..., description="Name of the model used for prediction")
|
| 6 |
+
predicted_class: int = Field(..., description="Predicted class index")
|
| 7 |
+
confidence: float = Field(..., ge=0.0, le=1.0, description="Prediction confidence (0-1)")
|
| 8 |
+
|
| 9 |
+
class ImagePredictionResponse(BaseModel):
|
| 10 |
+
predictions: List[PredictionEntry] = Field(..., description="List of predictions with model, class, and confidence")
|
| 11 |
+
|
| 12 |
+
class TextPredictionRequest(BaseModel):
|
| 13 |
+
text: str = Field(..., example="This is a sample text to classify")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TextPredictionResponse(BaseModel):
|
| 17 |
+
predicted_class: str = Field(..., description="Predicted label for the input text")
|
| 18 |
+
confidence: float = Field(..., ge=0.0, le=1.0)
|
textPreprocess.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoConfig
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# ββ 1) Configuration ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 6 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 7 |
+
MODEL_DIR = os.path.join(BASE_DIR, "Lib/Models/Text") # Update this path to your model location
|
| 8 |
+
MAX_LEN = 512
|
| 9 |
+
|
| 10 |
+
# ββ 2) Load model & tokenizer ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 11 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
+
print(f"Text prediction device: {device}")
|
| 13 |
+
|
| 14 |
+
# Global variables for model and tokenizer
|
| 15 |
+
tokenizer = None
|
| 16 |
+
model = None
|
| 17 |
+
ID2LABEL = {0: "human", 1: "ai"}
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
# Config carries id2label/label2id if you saved them
|
| 21 |
+
config = AutoConfig.from_pretrained(MODEL_DIR)
|
| 22 |
+
|
| 23 |
+
# Loads tokenizer.json + special_tokens_map.json automatically
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, use_fast=True)
|
| 25 |
+
|
| 26 |
+
# Loads model.safetensors automatically (no extra flags needed)
|
| 27 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_DIR, config=config)
|
| 28 |
+
model.eval().to(device)
|
| 29 |
+
|
| 30 |
+
# Update label mapping from config if available
|
| 31 |
+
ID2LABEL = model.config.id2label if getattr(model.config, "id2label", None) else {0: "human", 1: "ai"}
|
| 32 |
+
|
| 33 |
+
print(f"Text classification model loaded successfully")
|
| 34 |
+
print("Labels:", ID2LABEL)
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"Error loading text model: {e}")
|
| 37 |
+
print("Text prediction will return fallback responses")
|
| 38 |
+
|
| 39 |
+
# ββ 3) Inference function ββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 40 |
+
@torch.inference_mode()
|
| 41 |
+
def predict_text(text: str, max_length: int = None):
|
| 42 |
+
"""
|
| 43 |
+
Predict whether the given text is human-written or AI-generated.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
text (str): The text to classify
|
| 47 |
+
max_length (int): Maximum sequence length for tokenization (defaults to MAX_LEN)
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
dict: Contains predicted_class and confidence
|
| 51 |
+
"""
|
| 52 |
+
if model is None or tokenizer is None:
|
| 53 |
+
return {"predicted_class": "Human", "confidence": 0}
|
| 54 |
+
|
| 55 |
+
if max_length is None:
|
| 56 |
+
max_length = MAX_LEN
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
# Tokenize input
|
| 60 |
+
enc = tokenizer(
|
| 61 |
+
text,
|
| 62 |
+
return_tensors="pt",
|
| 63 |
+
truncation=True,
|
| 64 |
+
max_length=max_length,
|
| 65 |
+
)
|
| 66 |
+
enc = {k: v.to(device) for k, v in enc.items()}
|
| 67 |
+
|
| 68 |
+
# Get predictions
|
| 69 |
+
logits = model(**enc).logits
|
| 70 |
+
probs = torch.softmax(logits, dim=-1).squeeze(0).detach().cpu().numpy()
|
| 71 |
+
pred_id = int(probs.argmax(-1))
|
| 72 |
+
|
| 73 |
+
# Get label (capitalize first letter for consistency)
|
| 74 |
+
label = ID2LABEL.get(pred_id, str(pred_id))
|
| 75 |
+
label = label.capitalize() # "human" -> "Human", "ai" -> "Ai"
|
| 76 |
+
|
| 77 |
+
return {
|
| 78 |
+
"predicted_class": label,
|
| 79 |
+
"confidence": float(probs[pred_id])
|
| 80 |
+
}
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"Error during text prediction: {e}")
|
| 83 |
+
return {"predicted_class": "Human", "confidence": 0}
|
| 84 |
+
|
| 85 |
+
# ββ 4) Batch prediction (optional, for future use) βββββββββββββββββββββββββββββ
|
| 86 |
+
@torch.inference_mode()
|
| 87 |
+
def predict_batch(texts, batch_size=16):
|
| 88 |
+
"""
|
| 89 |
+
Predict multiple texts in batches.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
texts (list): List of text strings to classify
|
| 93 |
+
batch_size (int): Batch size for processing
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
list: List of prediction dictionaries
|
| 97 |
+
"""
|
| 98 |
+
if model is None or tokenizer is None:
|
| 99 |
+
return [{"predicted_class": "Human", "confidence": 0} for _ in texts]
|
| 100 |
+
|
| 101 |
+
results = []
|
| 102 |
+
for i in range(0, len(texts), batch_size):
|
| 103 |
+
chunk = texts[i:i+batch_size]
|
| 104 |
+
enc = tokenizer(
|
| 105 |
+
chunk,
|
| 106 |
+
return_tensors="pt",
|
| 107 |
+
truncation=True,
|
| 108 |
+
max_length=MAX_LEN,
|
| 109 |
+
padding=True,
|
| 110 |
+
)
|
| 111 |
+
enc = {k: v.to(device) for k, v in enc.items()}
|
| 112 |
+
logits = model(**enc).logits
|
| 113 |
+
probs = torch.softmax(logits, dim=-1).detach().cpu().numpy()
|
| 114 |
+
ids = probs.argmax(-1)
|
| 115 |
+
|
| 116 |
+
for t, pid, p in zip(chunk, ids, probs):
|
| 117 |
+
label = ID2LABEL.get(int(pid), str(int(pid))).capitalize()
|
| 118 |
+
results.append({
|
| 119 |
+
"text": t,
|
| 120 |
+
"predicted_class": label,
|
| 121 |
+
"confidence": float(p[int(pid)])
|
| 122 |
+
})
|
| 123 |
+
return results
|