MAS-AI-0000 commited on
Commit
0a278f1
·
verified ·
1 Parent(s): 5efd7a3

Update imagePreprocess.py

Browse files
Files changed (1) hide show
  1. imagePreprocess.py +34 -94
imagePreprocess.py CHANGED
@@ -17,21 +17,19 @@ MODELS_DIR = os.path.join(BASE_DIR, "Lib/Models/Image")
17
  REPO_ID = "MAS-AI-0000/Authentica"
18
  CLIP_MODEL_FILENAME = "Lib/Models/Image/clip_model.keras"
19
  CNN_MODEL_FILENAME = "Lib/Models/Image/cnn_model.keras"
20
- RESNET_MODEL_FILENAME = "Lib/Models/Image/resnet_model.keras"
21
 
22
  # ==== Load assets ====
23
  clip_model_path = hf_hub_download(repo_id=REPO_ID, filename=CLIP_MODEL_FILENAME)
24
  cnn_model_path = hf_hub_download(repo_id=REPO_ID, filename=CNN_MODEL_FILENAME)
25
- resnet_model_path = hf_hub_download(repo_id=REPO_ID, filename=RESNET_MODEL_FILENAME)
26
 
27
  # Load models and preprocessing once at module level
28
  clip_mod, clip_pre = clip.load("ViT-B/32", jit=False)
29
  clip_mod.eval()
30
  for p in clip_mod.parameters():
31
  p.requires_grad = False
32
- mlp_model= tf.keras.models.load_model(clip_model_path)
33
- cnn_model = tf.keras.models.load_model(cnn_model_path)
34
- resnet_model = tf.keras.models.load_model(resnet_model_path)
35
 
36
 
37
  def center_crop(image: Image.Image, crop_size=512) -> Image.Image | str:
@@ -40,7 +38,7 @@ def center_crop(image: Image.Image, crop_size=512) -> Image.Image | str:
40
  w, h = image.size
41
  if w < crop_size or h < crop_size:
42
  # skip small images
43
- return f"skipped image (too small) ({w}x{h})"
44
  left = (w - crop_size) // 2
45
  top = (h - crop_size) // 2
46
  right = left + crop_size
@@ -48,10 +46,10 @@ def center_crop(image: Image.Image, crop_size=512) -> Image.Image | str:
48
  cropped = image.crop((left, top, right, bottom))
49
  return cropped
50
  except Exception as e:
51
- return f"Error when cropping center: {e}"
52
 
53
 
54
- def denoise(src_image: Image) -> np.ndarray | str:
55
  """Read image, denoise (GPU if available) and return denoised image."""
56
  img = np.array(src_image) # BGR uint8 numpy array
57
  img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
@@ -59,8 +57,8 @@ def denoise(src_image: Image) -> np.ndarray | str:
59
  print(f"WARNING: No source image, skipping.")
60
  return False
61
  # Denoising parameters
62
- H = 10 # filter strength for luminance component (recommended 3-15)
63
- H_COLOR = 10 # same for color components
64
  TEMPLATE_WINDOW_SIZE = 7
65
  SEARCH_WINDOW_SIZE = 21
66
  # Use CUDA if available, otherwise CPU fallback
@@ -88,79 +86,37 @@ def denoise(src_image: Image) -> np.ndarray | str:
88
  TEMPLATE_WINDOW_SIZE,
89
  SEARCH_WINDOW_SIZE
90
  )
91
- #cv2.imwrite("denoised.png", den) # for debugging
92
- den = cv2.cvtColor(den, cv2.COLOR_BGR2RGB)
93
- den = Image.fromarray(den)
94
- return den
95
-
96
- def compute_profile(raw_image: Image, den_image: Image, normalize=False ,verbose= True) -> np.ndarray | str:
97
- # read images
98
- if raw_image is None:
99
- return print(f"WARNING: couldn't read raw image")
100
- if den_image is None:
101
- return print(f"WARNING: couldn't read denoised image")
102
-
103
- raw = np.array(raw_image) # RGB uint8 numpy array
104
- raw = cv2.cvtColor(raw, cv2.COLOR_RGB2BGR)
105
- den = np.array(den_image) # RGB uint8 numpy array
106
- den = cv2.cvtColor(den, cv2.COLOR_RGB2BGR)
107
- # if shapes differ, resize den to raw's size (keeps alignment); warn
108
- if den.shape != raw.shape:
109
- if verbose:
110
- print(f"NOTE: shape mismatch, resizing denoised from {den.shape[:2]} to {raw.shape[:2]}")
111
- den = cv2.resize(den, (raw.shape[1], raw.shape[0]), interpolation=cv2.INTER_LINEAR)
112
 
113
  # absolute difference per-channel
114
- diff = cv2.absdiff(raw, den) # BGR, uint8
115
  gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) # single-channel uint8
116
 
117
- # optionally normalize to full 0-255 (per-image)
118
- if normalize:
119
- # cv2.normalize will map min->0 and max->255
120
- # but if the image is flat (min==max) normalize will set to 0; handle that
121
- minv = int(gray.min())
122
- maxv = int(gray.max())
123
- if maxv > minv:
124
  norm = cv2.normalize(gray, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
125
  out = norm
126
- else:
127
- # nothing to normalize (flat), keep as-is (all zeros)
128
- out = gray
129
  else:
130
- # keep raw diff values but ensure dtype uint8 (already uint8) and values are 0..255
131
  out = gray
132
- #cv2.imwrite("profile.png", out) # for debugging
133
  return out
134
 
135
- def profile_image_for_cnn_predict(pil_img: Image, crop_size=512):
136
  """Preprocess the input image and return a numpy array ready for model prediction."""
137
  # Step 1: Center crop the image
138
- cropped_img = center_crop(pil_img, crop_size=crop_size)
139
  if isinstance(cropped_img, str):
140
  return cropped_img # return error message if cropping failed
141
- # Step 2: Denoise the cropped image
142
- denoised_img = denoise(cropped_img)
143
- if isinstance(denoised_img, str):
144
- return denoised_img # return error message if denoising failed
145
  # Step 3: Compute the profile image
146
- profile_img = compute_profile(cropped_img, denoised_img, normalize=False)
147
  if isinstance(profile_img, str):
148
  return profile_img # return error message if profile computation failed
149
  return profile_img
150
 
151
 
152
- def prepare_cv2_image_for_resnet(cv2_gray_img, target_size=(512,512)):
153
- img_rgb = cv2.cvtColor(cv2_gray_img, cv2.COLOR_GRAY2RGB)
154
- img_rgb = cv2.resize(img_rgb, (target_size[1], target_size[0]), interpolation=cv2.INTER_AREA)
155
- img_rgb = img_rgb.astype('float32')
156
- # 5) add batch dim
157
- x = np.expand_dims(img_rgb, axis=0) # shape (1, H, W, 3)
158
- x = preprocess_input(x)
159
- return x
160
-
161
- def predict_image_prob_clip(image: Image.Image, threshold=0.5,
162
- clip_model=None, clip_preprocess=None,
163
- keras_mlp=None):
164
  """
165
  Predicts probability that image is AI-generated (AI=1) using CLIP + Keras MLP.
166
 
@@ -172,8 +128,7 @@ def predict_image_prob_clip(image: Image.Image, threshold=0.5,
172
  Returns:
173
  dict: {'prob': float_prob_AI, 'label': 'AI' or 'Real'}
174
  """
175
-
176
-
177
  # --- try to reuse provided CLIP objects, otherwise load ---
178
  if clip_model is None or clip_preprocess is None:
179
  print("Loading Default CLIP model...")
@@ -186,10 +141,12 @@ def predict_image_prob_clip(image: Image.Image, threshold=0.5,
186
 
187
  # --- try to reuse provided keras model, otherwise load from disk ---
188
  if keras_mlp is None:
189
- print("No keras model provided...")
190
- return None
191
  # --- load/normalize image ---
192
  # assume PIL image
 
 
 
193
  img = image.convert('RGB')
194
 
195
  # --- preprocess for CLIP and get embedding ---
@@ -205,32 +162,15 @@ def predict_image_prob_clip(image: Image.Image, threshold=0.5,
205
  prob = float(probs[0])
206
  return prob
207
 
208
- def clip_predict(pil_img: Image, crop_size=512):
209
- # pass model objects explicitly (faster if you call this repeatedly)
210
- pil_img = center_crop(pil_img, crop_size=crop_size)
211
-
212
- if isinstance(pil_img, str):
213
- return pil_img # return error message
214
-
215
- return predict_image_prob_clip(pil_img,
216
- clip_model=clip_mod,
217
- clip_preprocess=clip_pre,
218
- keras_mlp=mlp_model)
219
-
220
-
221
- def CNNPredict(predict_img: np.ndarray):
222
- #1 Real 0 AI
223
- #normalize image
224
- # expand dims to add channel axis
225
- predict_img = predict_img.astype('float32') / 255.0 # shape (H, W)
226
- predict_img = np.expand_dims(predict_img, axis=-1) # shape (H, W, 1)
227
  # expand dims to add batch axis
228
- predict_img = np.expand_dims(predict_img, axis=0) # shape (1, H, W, 1)
229
- prediction = cnn_model.predict(predict_img)
230
- return prediction[0][0]
231
-
232
- def ResnetPredict(predict_img):
233
- #1 Real 0 AI
234
- predict_img = prepare_cv2_image_for_resnet(predict_img)
235
- prediction = resnet_model.predict(predict_img)
236
  return prediction[0][0]
 
 
17
  REPO_ID = "MAS-AI-0000/Authentica"
18
  CLIP_MODEL_FILENAME = "Lib/Models/Image/clip_model.keras"
19
  CNN_MODEL_FILENAME = "Lib/Models/Image/cnn_model.keras"
 
20
 
21
  # ==== Load assets ====
22
  clip_model_path = hf_hub_download(repo_id=REPO_ID, filename=CLIP_MODEL_FILENAME)
23
  cnn_model_path = hf_hub_download(repo_id=REPO_ID, filename=CNN_MODEL_FILENAME)
 
24
 
25
  # Load models and preprocessing once at module level
26
  clip_mod, clip_pre = clip.load("ViT-B/32", jit=False)
27
  clip_mod.eval()
28
  for p in clip_mod.parameters():
29
  p.requires_grad = False
30
+
31
+ mlp_model= tf.keras.models.load_model(os.path.join(MODELS_DIR, "clip_model.keras"))
32
+ cnn_model = tf.keras.models.load_model(os.path.join(MODELS_DIR, "cnn_model.keras"))
33
 
34
 
35
  def center_crop(image: Image.Image, crop_size=512) -> Image.Image | str:
 
38
  w, h = image.size
39
  if w < crop_size or h < crop_size:
40
  # skip small images
41
+ return f"Image is too small: ({w}x{h}), Minimum size is {crop_size}x{crop_size}"
42
  left = (w - crop_size) // 2
43
  top = (h - crop_size) // 2
44
  right = left + crop_size
 
46
  cropped = image.crop((left, top, right, bottom))
47
  return cropped
48
  except Exception as e:
49
+ return f"Error when cropping image: {e}"
50
 
51
 
52
+ def compute_profile(src_image: Image) -> np.ndarray | str:
53
  """Read image, denoise (GPU if available) and return denoised image."""
54
  img = np.array(src_image) # BGR uint8 numpy array
55
  img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
 
57
  print(f"WARNING: No source image, skipping.")
58
  return False
59
  # Denoising parameters
60
+ H = 5 # filter strength for luminance component (recommended 3-15)
61
+ H_COLOR = 5 # same for color components
62
  TEMPLATE_WINDOW_SIZE = 7
63
  SEARCH_WINDOW_SIZE = 21
64
  # Use CUDA if available, otherwise CPU fallback
 
86
  TEMPLATE_WINDOW_SIZE,
87
  SEARCH_WINDOW_SIZE
88
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  # absolute difference per-channel
91
+ diff = cv2.absdiff(img, den) # BGR, uint8
92
  gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) # single-channel uint8
93
 
94
+ minv = int(gray.min())
95
+ maxv = int(gray.max())
96
+ if maxv > minv:
 
 
 
 
97
  norm = cv2.normalize(gray, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
98
  out = norm
 
 
 
99
  else:
100
+ # nothing to normalize (flat), keep as-is (all zeros)
101
  out = gray
 
102
  return out
103
 
104
+ def preprocess_cnn(pil_img: Image):
105
  """Preprocess the input image and return a numpy array ready for model prediction."""
106
  # Step 1: Center crop the image
107
+ cropped_img = center_crop(pil_img)
108
  if isinstance(cropped_img, str):
109
  return cropped_img # return error message if cropping failed
 
 
 
 
110
  # Step 3: Compute the profile image
111
+ profile_img = compute_profile(cropped_img)
112
  if isinstance(profile_img, str):
113
  return profile_img # return error message if profile computation failed
114
  return profile_img
115
 
116
 
117
+ def CLIPPredict(image: Image.Image,
118
+ clip_model=clip_mod, clip_preprocess=clip_pre,
119
+ keras_mlp=mlp_model) -> float | str:
 
 
 
 
 
 
 
 
 
120
  """
121
  Predicts probability that image is AI-generated (AI=1) using CLIP + Keras MLP.
122
 
 
128
  Returns:
129
  dict: {'prob': float_prob_AI, 'label': 'AI' or 'Real'}
130
  """
131
+ #0 Real 1 AI
 
132
  # --- try to reuse provided CLIP objects, otherwise load ---
133
  if clip_model is None or clip_preprocess is None:
134
  print("Loading Default CLIP model...")
 
141
 
142
  # --- try to reuse provided keras model, otherwise load from disk ---
143
  if keras_mlp is None:
144
+ return "No keras model provided..."
 
145
  # --- load/normalize image ---
146
  # assume PIL image
147
+ image = center_crop(image, crop_size=512)
148
+ if isinstance(image, str):
149
+ return image # return error message if cropping failed
150
  img = image.convert('RGB')
151
 
152
  # --- preprocess for CLIP and get embedding ---
 
162
  prob = float(probs[0])
163
  return prob
164
 
165
+
166
+ def CNNPredict(img: Image.Image) -> float | str:
167
+ predict_img = preprocess_cnn(img)
168
+ if isinstance(predict_img, str):
169
+ return predict_img # return error message if preprocessing failed
170
+ predict_img = predict_img.astype('float32') / 255.0 # shape (H, W)
171
+ predict_img = np.expand_dims(predict_img, axis=-1) # shape (H, W, 1)
 
 
 
 
 
 
 
 
 
 
 
 
172
  # expand dims to add batch axis
173
+ predict_img = np.expand_dims(predict_img, axis=0) # shape (1, H, W, 1)
174
+ prediction = cnn_model.predict(predict_img)
 
 
 
 
 
 
175
  return prediction[0][0]
176
+