muhammadhamza-stack commited on
Commit
9f6bfc6
·
1 Parent(s): cb35992

refine the gradio app

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
37
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv
app.py CHANGED
@@ -1,118 +1,628 @@
1
- import os
2
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
 
5
  import os
6
- import tempfile
7
  import numpy as np
8
  import cv2
9
  import gradio as gr
10
  from tensorflow.keras.applications import ResNet50
11
  from tensorflow.keras.applications.resnet50 import preprocess_input
12
- from tensorflow.keras.preprocessing import image
13
  from skimage.metrics import structural_similarity as ssim
14
  from PIL import Image
 
15
 
16
  # Disable GPU for TensorFlow
17
  os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
18
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  class ImageCharacterClassifier:
21
  def __init__(self, similarity_threshold=0.5):
22
  self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
23
  self.similarity_threshold = similarity_threshold
24
 
25
  def load_and_preprocess_image(self, img):
26
- # Convert image to array and preprocess it
27
  img = img.convert('RGB')
28
  img_array = np.array(img)
29
- img_array = cv2.resize(img_array, (224, 224)) # Ensure correct size
30
  img_array = np.expand_dims(img_array, axis=0)
31
  img_array = preprocess_input(img_array)
32
  return img_array
33
 
34
  def extract_features(self, img):
35
  preprocessed_img = self.load_and_preprocess_image(img)
36
- features = self.model.predict(preprocessed_img)
37
  return features
38
 
39
  def calculate_ssim(self, img1, img2):
40
  img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
41
  img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
42
  img2_gray = cv2.resize(img2_gray, (img1_gray.shape[1], img1_gray.shape[0]))
43
- return ssim(img1_gray, img2_gray)
44
 
45
- def process_images(reference_image, comparison_images, similarity_threshold):
46
  try:
47
- if reference_image is None:
48
- return "Please upload a reference image.", []
49
- if not comparison_images:
50
- return "Please upload comparison images.", []
51
 
52
  classifier = ImageCharacterClassifier(similarity_threshold)
53
 
54
- # Convert reference image to NumPy array
55
- ref_image = Image.fromarray(reference_image)
56
- ref_features = classifier.extract_features(ref_image)
57
 
58
  results = []
59
  html_output = "<h3>Comparison Results:</h3>"
60
 
61
- for comp_image in comparison_images:
62
  try:
63
- # Read image file as PIL Image
64
- comp_pil = Image.open(comp_image)
65
- comp_pil = comp_pil.convert("RGB")
 
 
 
 
 
 
 
66
 
67
- # Convert to NumPy format for SSIM
68
- comp_array = np.array(comp_pil)
69
-
70
- # Calculate SSIM score
71
- ssim_score = classifier.calculate_ssim(reference_image, comp_array)
72
 
73
- # Extract features
 
 
 
 
 
74
  comp_features = classifier.extract_features(comp_pil)
75
  max_feature_diff = np.max(np.abs(ref_features - comp_features))
76
- is_similar = max_feature_diff < 6.0
77
-
78
- status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
 
 
79
  status_color = "green" if is_similar else "red"
80
 
81
- html_output += f"<p style='color:{status_color};'>{comp_image.name}: {status_text}</p>"
82
  results.append(comp_array)
83
 
84
  except Exception as e:
85
- html_output += f"<p style='color:red;'>Error processing {comp_image.name}: {str(e)}</p>"
 
 
86
 
87
- return html_output, results
88
 
89
  except Exception as e:
90
- return f"<p style='color:red;'>Error: {str(e)}</p>", []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  def create_interface():
93
- with gr.Blocks() as interface:
94
- gr.Markdown("# Image Similarity Classifier")
95
- gr.Markdown("Upload a reference image and multiple comparison images.")
 
 
 
 
 
 
 
 
 
 
 
96
 
 
97
  with gr.Row():
98
  with gr.Column():
99
- reference_input = gr.Image(label="Reference Image", type="numpy")
100
- comparison_input = gr.Files(label="Comparison Images", type="filepath")
101
- threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Similarity Threshold")
102
- submit_button = gr.Button("Compare Images")
 
 
 
 
 
 
 
 
 
 
103
 
104
- with gr.Column():
105
- output_html = gr.HTML(label="Results")
106
- output_gallery = gr.Gallery(label="Processed Images", columns=3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
 
 
 
 
 
108
  submit_button.click(
109
  fn=process_images,
110
  inputs=[reference_input, comparison_input, threshold_slider],
111
  outputs=[output_html, output_gallery]
112
  )
113
-
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  return interface
115
 
116
  if __name__ == "__main__":
 
 
 
117
  interface = create_interface()
118
- interface.launch(share=True)
 
 
1
+ # import os
2
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
3
+
4
+
5
+ # import os
6
+ # import tempfile
7
+ # import numpy as np
8
+ # import cv2
9
+ # import gradio as gr
10
+ # from tensorflow.keras.applications import ResNet50
11
+ # from tensorflow.keras.applications.resnet50 import preprocess_input
12
+ # from tensorflow.keras.preprocessing import image
13
+ # from skimage.metrics import structural_similarity as ssim
14
+ # from PIL import Image
15
+ # from io import BytesIO
16
+
17
+
18
+ # # Disable GPU for TensorFlow
19
+ # os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
20
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
21
+
22
+ # class ImageCharacterClassifier:
23
+ # def __init__(self, similarity_threshold=0.5):
24
+ # self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
25
+ # self.similarity_threshold = similarity_threshold
26
+
27
+ # def load_and_preprocess_image(self, img):
28
+ # # Convert image to array and preprocess it
29
+ # img = img.convert('RGB')
30
+ # img_array = np.array(img)
31
+ # img_array = cv2.resize(img_array, (224, 224)) # Ensure correct size
32
+ # img_array = np.expand_dims(img_array, axis=0)
33
+ # img_array = preprocess_input(img_array)
34
+ # return img_array
35
+
36
+ # def extract_features(self, img):
37
+ # preprocessed_img = self.load_and_preprocess_image(img)
38
+ # features = self.model.predict(preprocessed_img)
39
+ # return features
40
+
41
+ # def calculate_ssim(self, img1, img2):
42
+ # img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
43
+ # img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
44
+ # img2_gray = cv2.resize(img2_gray, (img1_gray.shape[1], img1_gray.shape[0]))
45
+ # return ssim(img1_gray, img2_gray)
46
+
47
+ # def process_images(reference_image, comparison_images, similarity_threshold):
48
+ # try:
49
+ # if reference_image is None:
50
+ # return "Please upload a reference image.", []
51
+ # if not comparison_images:
52
+ # return "Please upload comparison images.", []
53
+
54
+ # classifier = ImageCharacterClassifier(similarity_threshold)
55
+
56
+ # # Convert reference image to NumPy array
57
+ # ref_image = Image.fromarray(reference_image)
58
+ # ref_features = classifier.extract_features(ref_image)
59
+
60
+ # results = []
61
+ # html_output = "<h3>Comparison Results:</h3>"
62
+
63
+ # # for comp_image in comparison_images:
64
+ # # try:
65
+ # # # Read image file as PIL Image
66
+ # # comp_pil = Image.open(comp_image)
67
+ # # comp_pil = comp_pil.convert("RGB")
68
+
69
+ # # # Convert to NumPy format for SSIM
70
+ # # comp_array = np.array(comp_pil)
71
+
72
+
73
+ # for comp_image in comparison_images:
74
+ # try:
75
+ # with open(comp_image.name, "rb") as f:
76
+ # comp_pil = Image.open(BytesIO(f.read()))
77
+ # comp_pil = comp_pil.convert("RGB")
78
+
79
+ # comp_array = np.array(comp_pil)
80
+
81
+
82
+ # # Calculate SSIM score
83
+ # ssim_score = classifier.calculate_ssim(reference_image, comp_array)
84
+
85
+ # # Extract features
86
+ # comp_features = classifier.extract_features(comp_pil)
87
+ # max_feature_diff = np.max(np.abs(ref_features - comp_features))
88
+ # is_similar = max_feature_diff < 6.0
89
+
90
+ # status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
91
+ # status_color = "green" if is_similar else "red"
92
+
93
+ # html_output += f"<p style='color:{status_color};'>{comp_image.name}: {status_text}</p>"
94
+ # results.append(comp_array)
95
+
96
+ # except Exception as e:
97
+ # html_output += f"<p style='color:red;'>Error processing {comp_image.name}: {str(e)}</p>"
98
+
99
+ # return html_output, results
100
+
101
+ # except Exception as e:
102
+ # return f"<p style='color:red;'>Error: {str(e)}</p>", []
103
+
104
+ # def create_interface():
105
+ # with gr.Blocks() as interface:
106
+ # gr.Markdown("# Image Similarity Classifier")
107
+ # gr.Markdown("Upload a reference image and multiple comparison images.")
108
+
109
+ # with gr.Row():
110
+ # with gr.Column():
111
+ # reference_input = gr.Image(label="Reference Image", type="numpy")
112
+ # comparison_input = gr.Files(label="Comparison Images", type="file")
113
+ # threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Similarity Threshold")
114
+ # submit_button = gr.Button("Compare Images")
115
+
116
+ # with gr.Column():
117
+ # output_html = gr.HTML(label="Results")
118
+ # output_gallery = gr.Gallery(label="Processed Images", columns=3)
119
+
120
+ # submit_button.click(
121
+ # fn=process_images,
122
+ # inputs=[reference_input, comparison_input, threshold_slider],
123
+ # outputs=[output_html, output_gallery]
124
+ # )
125
+
126
+ # return interface
127
+
128
+ # if __name__ == "__main__":
129
+ # interface = create_interface()
130
+ # interface.launch(share=True)
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+
140
+ # import os
141
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
142
+
143
+
144
+ # import os
145
+ # import tempfile
146
+ # import numpy as np
147
+ # import cv2
148
+ # import gradio as gr
149
+ # from tensorflow.keras.applications import ResNet50
150
+ # from tensorflow.keras.applications.resnet50 import preprocess_input
151
+ # from tensorflow.keras.preprocessing import image
152
+ # from skimage.metrics import structural_similarity as ssim
153
+ # from PIL import Image
154
+ # from io import BytesIO
155
+
156
+ # # Disable GPU for TensorFlow
157
+ # os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
158
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
159
+
160
+ # # --- DOCUMENTATION STRINGS (English Only) ---
161
+
162
+ # GUIDELINE_SETUP = """
163
+ # ## 1. Quick Start Guide: Setup and Run Instructions
164
+
165
+ # This application uses a combination of advanced feature extraction (ResNet50) and structural analysis (SSIM) to determine if comparison images are structurally and semantically similar to a reference image.
166
+
167
+ # 1. **Upload Reference:** Upload the main image you want to compare against in the 'Reference Image' box.
168
+ # 2. **Upload Comparisons:** Upload one or more images you want to test for similarity in the 'Comparison Images' file upload area.
169
+ # 3. **Set Threshold:** Adjust the 'Similarity Threshold' slider. This primarily affects the structural (SSIM) component, but the feature comparison also plays a role (currently fixed).
170
+ # 4. **Run:** Click the **"Compare Images"** button.
171
+ # 5. **Review:** Results will appear in the 'Results' panel, indicating if each comparison image is "SIMILAR" or "NOT SIMILAR".
172
+ # """
173
+
174
+ # GUIDELINE_INPUT = """
175
+ # ## 2. Expected Inputs
176
+
177
+ # | Input Field | Purpose | Requirement |
178
+ # | :--- | :--- | :--- |
179
+ # | **Reference Image** | The baseline image against which all others will be compared. | Must be a single image file (JPG, PNG). |
180
+ # | **Comparison Images** | One or more images to be tested for similarity. | Must be multiple image files. Upload them using the file selector. |
181
+ # | **Similarity Threshold** | A slider controlling the sensitivity (0.0 to 1.0) for structural similarity (SSIM). | Higher values (closer to 1.0) mean stricter similarity requirements. Default is 0.5. |
182
+
183
+ # **Image Preprocessing:** All uploaded images are automatically resized to 224x224 pixels and standardized according to the requirements of the ResNet model before feature extraction.
184
+ # """
185
+
186
+ # GUIDELINE_OUTPUT = """
187
+ # ## 3. Expected Outputs (Similarity Results)
188
+
189
+ # The application provides two main outputs:
190
+
191
+ # 1. **Results (HTML Panel):**
192
+ # * A list detailing the outcome for each comparison image.
193
+ # * Status: **SIMILAR** (Green) or **NOT SIMILAR** (Red).
194
+ # * Similarity is determined by a combined metric: Structural Similarity (SSIM) AND feature vector distance (ResNet features).
195
+
196
+ # 2. **Processed Images (Gallery):**
197
+ # * A gallery displaying the input comparison images after they have been processed.
198
+
199
+ # ### How Similarity is Determined:
200
+ # The classification relies on two checks:
201
+ # 1. **Feature Distance:** The distance between the deep features extracted by the ResNet50 model (checking semantic content).
202
+ # 2. **Structural Similarity (SSIM):** A metric comparing the structural fidelity between the reference and comparison images (checking visual layout and quality).
203
+ # An image is typically marked "SIMILAR" only if both checks suggest a close match.
204
+ # """
205
+
206
+ # # --- CLASSIFIER CLASS ---
207
+ # class ImageCharacterClassifier:
208
+ # def __init__(self, similarity_threshold=0.5):
209
+ # # Setting include_top=False loads the ResNet50 convolutional layers
210
+ # self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
211
+ # self.similarity_threshold = similarity_threshold
212
+
213
+ # def load_and_preprocess_image(self, img):
214
+ # # Convert image to array and preprocess it
215
+ # img = img.convert('RGB')
216
+ # img_array = np.array(img)
217
+ # img_array = cv2.resize(img_array, (224, 224)) # Ensure correct size
218
+ # img_array = np.expand_dims(img_array, axis=0)
219
+ # img_array = preprocess_input(img_array)
220
+ # return img_array
221
+
222
+ # def extract_features(self, img):
223
+ # preprocessed_img = self.load_and_preprocess_image(img)
224
+ # # Use predict_on_batch for potentially better memory usage
225
+ # features = self.model.predict(preprocessed_img, verbose=0)
226
+ # return features
227
+
228
+ # def calculate_ssim(self, img1, img2):
229
+ # # Ensure images are in numpy array format for cv2 and SSIM
230
+ # img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
231
+ # img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
232
+
233
+ # # Resize comparison image to match reference image size for SSIM calculation
234
+ # img2_gray = cv2.resize(img2_gray, (img1_gray.shape[1], img1_gray.shape[0]))
235
+
236
+ # # Ensure data types are consistent (usually float/uint8 works)
237
+ # # SSIM calculation
238
+ # return ssim(img1_gray, img2_gray, data_range=img1_gray.max() - img1_gray.min())
239
+
240
+ # def process_images(reference_image_array, comparison_images, similarity_threshold):
241
+ # try:
242
+ # if reference_image_array is None:
243
+ # return "<p style='color:red;'>Please upload a reference image.</p>", []
244
+ # if not comparison_images:
245
+ # return "<p style='color:red;'>Please upload comparison images.</p>", []
246
+
247
+ # classifier = ImageCharacterClassifier(similarity_threshold)
248
+
249
+ # # 1. Process Reference Image
250
+ # ref_image_pil = Image.fromarray(reference_image_array).convert("RGB")
251
+ # ref_features = classifier.extract_features(ref_image_pil)
252
+
253
+ # # Convert array back to RGB for SSIM comparison later
254
+ # ref_image_for_ssim = cv2.cvtColor(reference_image_array, cv2.COLOR_BGR2RGB)
255
+
256
+
257
+ # results = []
258
+ # html_output = "<h3>Comparison Results:</h3>"
259
+
260
+ # # 2. Process Comparison Images
261
+ # for comp_file in comparison_images:
262
+ # try:
263
+ # # Open image file using PIL
264
+ # with open(comp_file.name, "rb") as f:
265
+ # comp_pil = Image.open(BytesIO(f.read())).convert("RGB")
266
+
267
+ # comp_array = np.array(comp_pil)
268
+
269
+ # # --- Similarity Checks ---
270
+
271
+ # # A. SSIM Check (Structural Similarity)
272
+ # ssim_score = classifier.calculate_ssim(ref_image_for_ssim, comp_array)
273
+ # ssim_match = ssim_score >= similarity_threshold
274
+
275
+ # # B. Feature Check (Semantic Similarity using ResNet features)
276
+ # comp_features = classifier.extract_features(comp_pil)
277
+
278
+ # # Using a hardcoded feature difference threshold (6.0 in original code)
279
+ # max_feature_diff = np.max(np.abs(ref_features - comp_features))
280
+ # feature_match = max_feature_diff < 6.0
281
+
282
+ # # Combined Result
283
+ # is_similar = feature_match # The original logic primarily used the feature match
284
+
285
+ # # If you want to require both SSIM and Feature Match:
286
+ # # is_similar = ssim_match and feature_match
287
+
288
+ # status_text = f"SIMILAR (SSIM: {ssim_score:.3f})" if is_similar else f"NOT SIMILAR (SSIM: {ssim_score:.3f})"
289
+ # status_color = "green" if is_similar else "red"
290
+
291
+ # html_output += f"<p style='color:{status_color};'>{os.path.basename(comp_file.name)}: {status_text}</p>"
292
+ # results.append(comp_array) # Add the numpy array of the comparison image
293
+
294
+ # except Exception as e:
295
+ # html_output += f"<p style='color:red;'>Error processing {os.path.basename(comp_file.name)}: {str(e)}</p>"
296
+ # results.append(None) # Add None to keep list consistent
297
+
298
+ # return html_output, [r for r in results if r is not None]
299
+
300
+ # except Exception as e:
301
+ # return f"<p style='color:red;'>Critical Error: {str(e)}</p>", []
302
+
303
+ # def create_interface():
304
+ # with gr.Blocks(title="Image Similarity Classifier") as interface:
305
+
306
+ # gr.Markdown("# Image Similarity Classifier (ResNet + SSIM)")
307
+ # gr.Markdown("Tool to compare a reference image against multiple comparison images based on structural and deep feature similarity.")
308
+
309
+ # # 1. Guidelines Section
310
+ # with gr.Accordion("Tips & Guidelines ", open=False):
311
+ # gr.Markdown(GUIDELINE_SETUP)
312
+ # gr.Markdown("---")
313
+ # gr.Markdown(GUIDELINE_INPUT)
314
+ # gr.Markdown("---")
315
+ # gr.Markdown(GUIDELINE_OUTPUT)
316
+
317
+ # gr.Markdown("---")
318
+
319
+ # # 2. Application Interface
320
+ # with gr.Row():
321
+ # with gr.Column():
322
+ # gr.Markdown("## Step 1: Upload a Reference Image ")
323
+ # reference_input = gr.Image(label="Reference Image", type="numpy", height=300)
324
+ # gr.Markdown("## Step 2: Upload Multiple Images to Compair with Reference Image ")
325
+ # comparison_input = gr.Files(label="Comparison Images", type="file")
326
+ # gr.Markdown("## Step 3: Set the Confidence Score (Optional) ")
327
+ # threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Similarity Threshold (SSIM)")
328
+ # gr.Markdown("## Step 4: Click Compare Images ")
329
+ # submit_button = gr.Button("Compare Images", variant="primary")
330
+ # gr.Markdown("# Results ")
331
+ # gr.Markdown("## Comparison Result ")
332
+ # output_html = gr.HTML(label="Comparison Results")
333
+ # gr.Markdown("## Processed Comparison Images")
334
+ # output_gallery = gr.Gallery(label="Processed Comparison Images", columns=3)
335
+
336
+ # # 3. Event Handling
337
+ # submit_button.click(
338
+ # fn=process_images,
339
+ # inputs=[reference_input, comparison_input, threshold_slider],
340
+ # outputs=[output_html, output_gallery]
341
+ # )
342
+
343
+ # # Example data setup (Requires placeholder images to exist)
344
+ # gr.Markdown("---")
345
+ # gr.Markdown("## Sample Data for Testing")
346
+
347
+ # # Note: You would need to provide actual file paths for reference and comparison samples
348
+ # # Example setup demonstrating how to structure inputs for gr.Examples:
349
+ # example_data = [
350
+ # [np.zeros((100, 100, 3), dtype=np.uint8), [gr.File("sample_data/license3.jpg"), gr.File("sample_data/licence.jpeg")], 0.6], # Placeholder example
351
+ # ]
352
+
353
+ # # Since examples for Files/Gallery can be complex to set up without actual files,
354
+ # # we will use a simple explanation here instead of a runnable Example block.
355
+ # gr.Markdown("Due to the multi-file input requirement, please manually upload a reference image and several comparison images to test.")
356
+
357
+
358
+ # return interface
359
+
360
+ # if __name__ == "__main__":
361
+ # interface = create_interface()
362
+ # # Note: Using share=True might expose the app publicly if run without authorization.
363
+ # interface.launch()
364
+
365
+
366
+
367
+
368
+
369
+
370
+
371
+
372
+
373
+
374
+
375
+
376
+
377
+
378
+
379
+
380
 
381
 
382
  import os
 
383
  import numpy as np
384
  import cv2
385
  import gradio as gr
386
  from tensorflow.keras.applications import ResNet50
387
  from tensorflow.keras.applications.resnet50 import preprocess_input
 
388
  from skimage.metrics import structural_similarity as ssim
389
  from PIL import Image
390
+ from io import BytesIO
391
 
392
  # Disable GPU for TensorFlow
393
  os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
394
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
395
 
396
+ # --- DOCUMENTATION STRINGS (English Only) ---
397
+
398
+ GUIDELINE_SETUP = """
399
+ ## 1. Quick Start Guide: Setup and Run Instructions
400
+
401
+ This application uses a combination of advanced feature extraction (ResNet50) and structural analysis (SSIM) to determine if comparison images are structurally and semantically similar to a reference image.
402
+
403
+ 1. **Upload Reference:** Upload the main image you want to compare against in the 'Reference Image' box.
404
+ 2. **Upload Comparisons:** Upload one or more images you want to test for similarity in the 'Comparison Images' file upload area.
405
+ 3. **Set Threshold:** Adjust the 'Similarity Threshold' slider. This controls the sensitivity for structural similarity (SSIM).
406
+ 4. **Run:** Click the **"Compare Images"** button.
407
+ 5. **Review:** Results will appear in the 'Results' panel, indicating if each comparison image is "SIMILAR" or "NOT SIMILAR".
408
+ """
409
+
410
+ GUIDELINE_INPUT = """
411
+ ## 2. Expected Inputs and Preprocessing
412
+
413
+ | Input Field | Purpose | Requirement |
414
+ | :--- | :--- | :--- |
415
+ | **Reference Image** | The baseline image against which all others will be compared. | Must be a single image file (JPG, PNG). |
416
+ | **Comparison Images** | One or more images to be tested for similarity. | Must be multiple image files. Upload them using the file selector. |
417
+ | **Similarity Threshold** | A slider controlling the sensitivity (0.0 to 1.0) for structural similarity (SSIM). | Higher values (closer to 1.0) mean stricter similarity requirements. Default is 0.5. |
418
+
419
+ **Image Preprocessing:** All uploaded images are automatically resized to 224x224 pixels and standardized according to the requirements of the ResNet model before feature extraction.
420
+ """
421
+
422
+ GUIDELINE_OUTPUT = """
423
+ ## 3. Expected Outputs (Similarity Results)
424
+
425
+ The application provides two main outputs:
426
+
427
+ 1. **Results (HTML Panel):**
428
+ * A list detailing the outcome for each comparison image.
429
+ * Status: **SIMILAR** (Green) or **NOT SIMILAR** (Red).
430
+
431
+ 2. **Processed Images (Gallery):**
432
+ * A gallery displaying the input comparison images after they have been processed.
433
+
434
+ ### How Similarity is Determined:
435
+ The classification relies on two checks: Structural Similarity (SSIM) and Deep Feature Distance (ResNet). An image is marked "SIMILAR" if both structural and semantic properties suggest a close match.
436
+ """
437
+
438
+ # --- CLASSIFIER CLASS ---
439
  class ImageCharacterClassifier:
440
  def __init__(self, similarity_threshold=0.5):
441
  self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
442
  self.similarity_threshold = similarity_threshold
443
 
444
  def load_and_preprocess_image(self, img):
 
445
  img = img.convert('RGB')
446
  img_array = np.array(img)
447
+ img_array = cv2.resize(img_array, (224, 224))
448
  img_array = np.expand_dims(img_array, axis=0)
449
  img_array = preprocess_input(img_array)
450
  return img_array
451
 
452
  def extract_features(self, img):
453
  preprocessed_img = self.load_and_preprocess_image(img)
454
+ features = self.model.predict(preprocessed_img, verbose=0)
455
  return features
456
 
457
  def calculate_ssim(self, img1, img2):
458
  img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
459
  img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
460
  img2_gray = cv2.resize(img2_gray, (img1_gray.shape[1], img1_gray.shape[0]))
461
+ return ssim(img1_gray, img2_gray, data_range=img1_gray.max() - img1_gray.min())
462
 
463
+ def process_images(reference_image_array, comparison_files, similarity_threshold):
464
  try:
465
+ if reference_image_array is None:
466
+ return "<p style='color:red;'>Please upload a reference image.</p>", []
467
+ if not comparison_files:
468
+ return "<p style='color:red;'>Please upload comparison images.</p>", []
469
 
470
  classifier = ImageCharacterClassifier(similarity_threshold)
471
 
472
+ ref_image_pil = Image.fromarray(reference_image_array).convert("RGB")
473
+ ref_features = classifier.extract_features(ref_image_pil)
474
+ ref_image_for_ssim = cv2.cvtColor(reference_image_array, cv2.COLOR_BGR2RGB)
475
 
476
  results = []
477
  html_output = "<h3>Comparison Results:</h3>"
478
 
479
+ for comp_file_item in comparison_files:
480
  try:
481
+ # FIX: Extract file path correctly regardless of whether it's a dict (internal Gradio)
482
+ # or a gr.File object (returned by our custom loader function).
483
+ if isinstance(comp_file_item, str):
484
+ file_path = comp_file_item
485
+ elif hasattr(comp_file_item, 'name'):
486
+ file_path = comp_file_item.name
487
+ elif isinstance(comp_file_item, dict) and 'name' in comp_file_item:
488
+ file_path = comp_file_item['name']
489
+ else:
490
+ raise ValueError("Invalid file object structure.")
491
 
492
+ with open(file_path, "rb") as f:
493
+ comp_pil = Image.open(BytesIO(f.read())).convert("RGB")
 
 
 
494
 
495
+ comp_array = np.array(comp_pil)
496
+
497
+ # SSIM Check
498
+ ssim_score = classifier.calculate_ssim(ref_image_for_ssim, comp_array)
499
+
500
+ # Feature Check
501
  comp_features = classifier.extract_features(comp_pil)
502
  max_feature_diff = np.max(np.abs(ref_features - comp_features))
503
+ feature_match = max_feature_diff < 6.0
504
+
505
+ is_similar = feature_match # Primary criterion
506
+
507
+ status_text = f"SIMILAR (SSIM: {ssim_score:.3f})" if is_similar else f"NOT SIMILAR (SSIM: {ssim_score:.3f})"
508
  status_color = "green" if is_similar else "red"
509
 
510
+ html_output += f"<p style='color:{status_color};'>{os.path.basename(file_path)}: {status_text}</p>"
511
  results.append(comp_array)
512
 
513
  except Exception as e:
514
+ # Use the path for logging the error
515
+ error_name = os.path.basename(file_path) if 'file_path' in locals() else 'Unknown File'
516
+ html_output += f"<p style='color:red;'>Error processing {error_name}: {str(e)}</p>"
517
 
518
+ return html_output, [r for r in results if r is not None]
519
 
520
  except Exception as e:
521
+ return f"<p style='color:red;'>Critical Error: {str(e)}</p>", []
522
+
523
+ # --- SAMPLE DATA DEFINITION ---
524
+
525
+ # Placeholder file paths (MUST EXIST for examples to work)
526
+ # NOTE: Adjusted paths to match your provided snippet structure 'sample_data/filename'
527
+ SAMPLE_FILES_SET1 = {
528
+ "reference": "sample_data/license3.jpg",
529
+ "comparisons": ["sample_data/license3.jpg", "sample_data/license3.jpg", "sample_data/licence.jpeg"]
530
+ }
531
+
532
+ SAMPLE_FILES_SET2 = {
533
+ "reference": "sample_data/licence.jpeg",
534
+ "comparisons": ["sample_data/licence.jpeg", "sample_data/license3.jpg", "sample_data/licence.jpeg", "sample_data/licence.jpeg"]
535
+ }
536
+
537
+
538
+ # --- GRADIO UI SETUP ---
539
 
540
  def create_interface():
541
+ with gr.Blocks(title="Image Similarity Classifier") as interface:
542
+
543
+ gr.Markdown("# Image Similarity Classifier (ResNet + SSIM)")
544
+ gr.Markdown("Tool to compare a reference image against multiple comparison images based on structural and deep feature similarity.")
545
+
546
+ # 1. Guidelines Section
547
+ with gr.Accordion("User Guidelines and Documentation", open=False):
548
+ gr.Markdown(GUIDELINE_SETUP)
549
+ gr.Markdown("---")
550
+ gr.Markdown(GUIDELINE_INPUT)
551
+ gr.Markdown("---")
552
+ gr.Markdown(GUIDELINE_OUTPUT)
553
+
554
+ gr.Markdown("---")
555
 
556
+ # 2. Application Interface
557
  with gr.Row():
558
  with gr.Column():
559
+ gr.Markdown("## Step 1: Upload a Reference Image ")
560
+ reference_input = gr.Image(label="Reference Image", type="numpy", height=300)
561
+ gr.Markdown("## Step 2: Upload Multiple Images to Compair with Reference Image ")
562
+ comparison_input = gr.Files(label="Comparison Images", type="file")
563
+ gr.Markdown("## Step 3: Set the Confidence Score (Optional) ")
564
+ threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Similarity Threshold (SSIM)")
565
+ gr.Markdown("## Step 4: Click Compare Images ")
566
+ submit_button = gr.Button("Compare Images", variant="primary")
567
+ gr.Markdown("---")
568
+ gr.Markdown("# Results ")
569
+ gr.Markdown("## Comparison Result ")
570
+ output_html = gr.HTML(label="Comparison Results")
571
+ gr.Markdown("## Processed Comparison Images")
572
+ output_gallery = gr.Gallery(label="Processed Comparison Images", columns=3)
573
 
574
+ # 3. Example Loading Setup
575
+ gr.Markdown("---")
576
+ gr.Markdown("## Sample Data for Testing")
577
+ gr.Markdown("### Click on any of these two set to run the test set ")
578
+
579
+ def load_and_run_set(reference_path, comparison_paths, threshold_value=0.5):
580
+ """Loads data into inputs, triggers processing, and returns all results."""
581
+
582
+ # 1. Load Reference Image as NumPy array
583
+ ref_img_pil = Image.open(reference_path).convert("RGB")
584
+ ref_img_array = np.array(ref_img_pil)
585
+
586
+ # 2. Comparison Files: Prepare the list of paths (strings) for the processor
587
+ # We return a list of strings/paths here, which Gradio's gr.Files component accepts
588
+ comparison_file_paths = comparison_paths
589
+
590
+ # 3. Process the images immediately using the paths
591
+ html, gallery = process_images(ref_img_array, comparison_file_paths, threshold_value)
592
+
593
+ # 4. Return inputs and outputs for component update
594
+ return ref_img_array, comparison_file_paths, threshold_value, html, gallery
595
 
596
+ with gr.Row():
597
+ btn_set1 = gr.Button("Load & Run Sample Set 1 (Similar Docs)", size="sm")
598
+ btn_set2 = gr.Button("Load & Run Sample Set 2 (Dissimilar Docs)", size="sm")
599
+
600
+ # 4. Event Handling
601
  submit_button.click(
602
  fn=process_images,
603
  inputs=[reference_input, comparison_input, threshold_slider],
604
  outputs=[output_html, output_gallery]
605
  )
606
+
607
+ # Event handlers for example buttons: load data into inputs/outputs
608
+ btn_set1.click(
609
+ fn=lambda: load_and_run_set(SAMPLE_FILES_SET1['reference'], SAMPLE_FILES_SET1['comparisons'], 0.6),
610
+ inputs=[],
611
+ outputs=[reference_input, comparison_input, threshold_slider, output_html, output_gallery]
612
+ )
613
+
614
+ btn_set2.click(
615
+ fn=lambda: load_and_run_set(SAMPLE_FILES_SET2['reference'], SAMPLE_FILES_SET2['comparisons'], 0.4),
616
+ inputs=[],
617
+ outputs=[reference_input, comparison_input, threshold_slider, output_html, output_gallery]
618
+ )
619
+
620
  return interface
621
 
622
  if __name__ == "__main__":
623
+ # Ensure the 'sample_data/' directory exists with 'license3.jpg' and 'licence.jpeg'
624
+ # and any other necessary files.
625
+
626
  interface = create_interface()
627
+ interface.queue()
628
+ interface.launch()
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
  tensorflow==2.10.0
2
- tensorflow-gpu==2.10.0
3
  keras==2.10.0
4
  numpy==1.23.5
5
  opencv-python==4.7.0.72
 
1
  tensorflow==2.10.0
 
2
  keras==2.10.0
3
  numpy==1.23.5
4
  opencv-python==4.7.0.72
sample_data/licence.jpeg ADDED

Git LFS Details

  • SHA256: 4df78ddf6bdda816514c709964727e1b04576a6d36cc6afd218420a4d22cd040
  • Pointer size: 129 Bytes
  • Size of remote file: 7.8 kB
sample_data/license3.jpg ADDED

Git LFS Details

  • SHA256: d91e7df267f0ebb5e3df50aa4ff261e3c800a62ad166d65a17622c5fe1ee05bd
  • Pointer size: 130 Bytes
  • Size of remote file: 34.7 kB