HunzalaRasheed1 commited on
Commit
9cb64db
·
verified ·
1 Parent(s): cbab20b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +487 -55
app.py CHANGED
@@ -1,90 +1,522 @@
1
  import gradio as gr
2
- import requests
3
- import json
 
4
  import io
 
 
 
 
 
 
5
  import base64
6
- from PIL import Image
 
 
 
 
 
7
 
8
- # FastAPI backend URL
9
- API_URL = "http://localhost:8000"
 
 
 
 
10
 
11
  #############################
12
  # HELPER FUNCTIONS
13
  #############################
14
 
 
 
 
 
 
 
 
 
 
 
 
15
  def base64_to_pil(base64_str):
16
  """Convert base64 string to PIL image"""
17
  img_data = base64.b64decode(base64_str)
18
  return Image.open(io.BytesIO(img_data))
19
 
20
- def upload_image(image, endpoint):
21
- """Upload an image to the specified API endpoint"""
22
- # Save image to bytes
23
- img_byte_arr = io.BytesIO()
24
- image.save(img_byte_arr, format='JPEG')
25
- img_byte_arr = img_byte_arr.getvalue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Send to API
28
- files = {'file': ('image.jpg', img_byte_arr, 'image/jpeg')}
29
- response = requests.post(f"{API_URL}{endpoint}", files=files)
30
 
31
- # Return the JSON response
32
- return response.json()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- #############################
35
- # API INTERFACE FUNCTIONS
36
- #############################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- def analyze_image(image):
39
- """Main function that sends the image to the API for analysis"""
40
- if image is None:
41
- return {
42
- original_image: None,
43
- ela_image: None,
44
- noise_image: None,
45
- heatmap_image: None,
46
- clone_image: None,
47
- exif_data: "{}",
48
- analysis_results: "Please upload an image first.",
49
- probability_slider: 0
 
 
 
 
 
 
 
 
50
  }
51
 
52
- # Send to API for full analysis
53
- try:
54
- response = upload_image(image, "/api/analyze_image")
 
 
55
 
56
- # Process results
57
- return {
58
- original_image: image,
59
- ela_image: base64_to_pil(response["ela_image"]),
60
- noise_image: base64_to_pil(response["noise_image"]),
61
- heatmap_image: base64_to_pil(response["heatmap_image"]),
62
- clone_image: base64_to_pil(response["clone_image"]),
63
- exif_data: json.dumps(response["exif_data"], indent=2),
64
- analysis_results: response["analysis_text"],
65
- probability_slider: response["manipulation_probability"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  }
 
 
 
67
  except Exception as e:
68
  return {
69
- original_image: image,
70
- ela_image: None,
71
- noise_image: None,
72
- heatmap_image: None,
73
- clone_image: None,
74
- exif_data: f"Error: {str(e)}",
75
- analysis_results: f"Error occurred during analysis: {str(e)}",
76
- probability_slider: 0
77
  }
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  #############################
81
  # GRADIO INTERFACE
82
  #############################
83
 
84
- with gr.Blocks(title="Image Forensic & Fraud Detection Tool - MVP Demo") as demo:
85
  gr.Markdown("""
86
  # Image Forensic & Fraud Detection Tool
87
-
88
 
89
  Upload an image to analyze it for potential manipulation using various forensic techniques.
90
  """)
@@ -157,4 +589,4 @@ with gr.Blocks(title="Image Forensic & Fraud Detection Tool - MVP Demo") as demo
157
 
158
  # Launch the app
159
  if __name__ == "__main__":
160
- demo.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ from PIL import Image, ImageChops, ImageEnhance
5
  import io
6
+ import os
7
+ import random
8
+ import matplotlib.pyplot as plt
9
+ from matplotlib.colors import LinearSegmentedColormap
10
+ import tempfile
11
+ import json
12
  import base64
13
+ from sklearn.metrics.pairwise import cosine_similarity
14
+ import shutil
15
+ from typing import Dict, Any
16
+ from scipy.spatial import cKDTree
17
+ from multiprocessing import Pool, cpu_count
18
+ import nest_asyncio
19
 
20
+ # Apply nest_asyncio to allow async operations
21
+ nest_asyncio.apply()
22
+
23
+ # Create temporary directory for saving files
24
+ TEMP_DIR = tempfile.mkdtemp()
25
+ print(f"Using temporary directory: {TEMP_DIR}")
26
 
27
  #############################
28
  # HELPER FUNCTIONS
29
  #############################
30
 
31
+ def save_pil_image(img, path):
32
+ """Save a PIL image and return the path"""
33
+ img.save(path)
34
+ return path
35
+
36
+ def pil_to_base64(img):
37
+ """Convert PIL image to base64 string for JSON response"""
38
+ buffered = io.BytesIO()
39
+ img.save(buffered, format="PNG")
40
+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
41
+
42
  def base64_to_pil(base64_str):
43
  """Convert base64 string to PIL image"""
44
  img_data = base64.b64decode(base64_str)
45
  return Image.open(io.BytesIO(img_data))
46
 
47
+ #############################
48
+ # FORENSIC ANALYSIS FUNCTIONS
49
+ #############################
50
+
51
+ # Define find_matches as a global function instead of nested
52
+ def find_matches(args):
53
+ """
54
+ Find matching blocks within the given indices.
55
+
56
+ Args:
57
+ args: A tuple containing (block_indices, blocks, tree, similarity_threshold)
58
+
59
+ Returns:
60
+ A set of matching block pairs
61
+ """
62
+ block_indices, blocks, tree, similarity_threshold = args
63
+ local_matches = set()
64
+ for i in block_indices:
65
+ # Find all blocks within the similarity threshold
66
+ distances, indices = tree.query(blocks[i], k=10, distance_upper_bound=similarity_threshold)
67
+ for j, dist in zip(indices, distances):
68
+ # Skip self-matches and invalid indices
69
+ if j != i and j < len(blocks) and dist <= similarity_threshold:
70
+ # Store matches as sorted tuples to avoid duplicates
71
+ local_matches.add(tuple(sorted([i, j])))
72
+ return local_matches
73
+
74
+
75
+ def detect_clones(image_path, max_dimension=2000):
76
+ """
77
+ Detects cloned/copy-pasted regions in the image with optimized performance.
78
+
79
+ Args:
80
+ image_path: Path to the image file
81
+ max_dimension: Maximum dimension to resize large images to
82
+
83
+ Returns:
84
+ PIL Image containing the clone detection result and count of clones
85
+ """
86
+ # Read image
87
+ img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
88
+ if img is None:
89
+ raise ValueError(f"Could not read image at {image_path}")
90
+
91
+ height, width = img.shape
92
+
93
+ # Handle large images by resizing if needed
94
+ scale = 1.0
95
+ if height > max_dimension or width > max_dimension:
96
+ scale = max_dimension / max(height, width)
97
+ new_height, new_width = int(height * scale), int(width * scale)
98
+ img = cv2.resize(img, (new_width, new_height))
99
+ height, width = img.shape
100
+
101
+ # Create output image
102
+ clone_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
103
+
104
+ # Define parameters
105
+ block_size = 16
106
+ stride = 8
107
+
108
+ # For very large images, increase stride
109
+ if (height * width) > 4000000:
110
+ stride = 16
111
+
112
+ # Extract block features
113
+ blocks = []
114
+ positions = []
115
+
116
+ # Apply DCT to each block for feature extraction (faster than raw pixels)
117
+ for y in range(0, height - block_size, stride):
118
+ for x in range(0, width - block_size, stride):
119
+ block = img[y:y+block_size, x:x+block_size].astype(np.float32)
120
+ # Apply DCT and keep only top 16 coefficients (reduces dimensionality)
121
+ dct = cv2.dct(block)
122
+ feature = dct[:4, :4].flatten() # Use only low-frequency components
123
+ blocks.append(feature)
124
+ positions.append((x, y))
125
+
126
+ # Convert to numpy array for faster processing
127
+ blocks = np.array(blocks, dtype=np.float32)
128
+
129
+ # Normalize features for better comparison
130
+ norms = np.linalg.norm(blocks, axis=1)
131
+ norms[norms == 0] = 1 # Avoid division by zero
132
+ blocks = blocks / norms[:, np.newaxis]
133
+
134
+ # Use KD-Tree for efficient nearest neighbor search (much faster than cosine_similarity)
135
+ tree = cKDTree(blocks)
136
+
137
+ # Find similar blocks using radius search (equivalent to high cosine similarity)
138
+ # This is much more efficient than computing the full similarity matrix
139
+ similarity_threshold = 0.04 # Equivalent to ~0.95 cosine similarity
140
+ matches = set()
141
+
142
+ # Use multiple processes to speed up the search
143
+ num_processes = min(8, cpu_count())
144
+
145
+ # Split work among processes
146
+ chunk_size = len(blocks) // num_processes + 1
147
+ block_chunks = [range(i, min(i + chunk_size, len(blocks))) for i in range(0, len(blocks), chunk_size)]
148
+
149
+ # Prepare arguments for the find_matches function
150
+ args_list = [(chunk, blocks, tree, similarity_threshold) for chunk in block_chunks]
151
 
152
+ with Pool(num_processes) as pool:
153
+ results = pool.map(find_matches, args_list)
 
154
 
155
+ # Combine results
156
+ for result in results:
157
+ matches.update(result)
158
+
159
+ # Draw rectangles for matches
160
+ for i, j in matches:
161
+ x1, y1 = positions[i]
162
+ x2, y2 = positions[j]
163
+ cv2.rectangle(clone_img, (x1, y1), (x1+block_size, y1+block_size), (0, 0, 255), 1)
164
+ cv2.rectangle(clone_img, (x2, y2), (x2+block_size, y2+block_size), (255, 0, 0), 1)
165
+
166
+ # Convert OpenCV image to PIL format
167
+ clone_result = Image.fromarray(cv2.cvtColor(clone_img, cv2.COLOR_BGR2RGB))
168
+
169
+ # Restore original scale if the image was resized
170
+ if scale != 1.0:
171
+ orig_size = (int(clone_img.shape[1]/scale), int(clone_img.shape[0]/scale))
172
+ clone_result = clone_result.resize(orig_size, Image.LANCZOS)
173
+
174
+ return clone_result, len(matches)
175
 
176
+ def error_level_analysis(image_path, quality=90, scale=10):
177
+ """
178
+ Performs Error Level Analysis (ELA) on the image.
179
+
180
+ Args:
181
+ image_path: Path to the image file
182
+ quality: JPEG quality level for recompression
183
+ scale: Amplification factor for differences
184
+
185
+ Returns:
186
+ PIL Image containing the ELA result
187
+ """
188
+ # Open the original image
189
+ original = Image.open(image_path).convert('RGB')
190
+
191
+ # Save and reopen a JPEG version at the specified quality
192
+ temp_filename = os.path.join(TEMP_DIR, "temp_ela_process.jpg")
193
+ original.save(temp_filename, 'JPEG', quality=quality)
194
+ recompressed = Image.open(temp_filename)
195
+
196
+ # Calculate the difference
197
+ diff = ImageChops.difference(original, recompressed)
198
+
199
+ # Amplify the difference for better visualization
200
+ diff = ImageEnhance.Brightness(diff).enhance(scale)
201
+
202
+ # Create a colored version of the diff for visualization
203
+ diff_array = np.array(diff)
204
+
205
+ # Convert to grayscale
206
+ if len(diff_array.shape) == 3:
207
+ diff_gray = np.mean(diff_array, axis=2)
208
+ else:
209
+ diff_gray = diff_array
210
+
211
+ # Apply colormap for better visualization
212
+ colormap = plt.get_cmap('jet')
213
+ colored_diff = (colormap(diff_gray / 255.0) * 255).astype(np.uint8)
214
+
215
+ # Create PIL image from the array (remove alpha channel)
216
+ colored_result = Image.fromarray(colored_diff[:, :, :3])
217
+
218
+ return colored_result
219
 
220
+ def extract_exif_metadata(image_path):
221
+ """
222
+ Extracts EXIF metadata from the image and identifies potential manipulation indicators.
223
+
224
+ Args:
225
+ image_path: Path to the image file
226
+
227
+ Returns:
228
+ Dictionary with metadata and analysis
229
+ """
230
+ try:
231
+ img = Image.open(image_path)
232
+ exif_data = img._getexif() or {}
233
+
234
+ # Map EXIF tags to readable names
235
+ exif_tags = {
236
+ 271: 'Make', 272: 'Model', 306: 'DateTime',
237
+ 36867: 'DateTimeOriginal', 36868: 'DateTimeDigitized',
238
+ 37510: 'UserComment', 40964: 'RelatedSoundFile',
239
+ 305: 'Software', 315: 'Artist', 33432: 'Copyright'
240
  }
241
 
242
+ # Process EXIF data into readable format
243
+ metadata = {}
244
+ for tag_id, value in exif_data.items():
245
+ tag = exif_tags.get(tag_id, str(tag_id))
246
+ metadata[tag] = str(value)
247
 
248
+ # Check for potential manipulation indicators
249
+ indicators = []
250
+
251
+ # Check for editing software
252
+ editing_software = ['photoshop', 'lightroom', 'gimp', 'paint', 'editor', 'filter']
253
+ if 'Software' in metadata:
254
+ software = metadata['Software'].lower()
255
+ for editor in editing_software:
256
+ if editor in software:
257
+ indicators.append(f"Image edited with {metadata['Software']}")
258
+ break
259
+
260
+ # Check for date discrepancies
261
+ if 'DateTimeOriginal' in metadata and 'DateTime' in metadata:
262
+ if metadata['DateTimeOriginal'] != metadata['DateTime']:
263
+ indicators.append("Capture time differs from modification time")
264
+
265
+ # Missing original date
266
+ if 'DateTime' in metadata and 'DateTimeOriginal' not in metadata:
267
+ indicators.append("Original capture time missing")
268
+
269
+ # Create result dictionary
270
+ result = {
271
+ "metadata": metadata,
272
+ "indicators": indicators,
273
+ "summary": "Potential manipulation detected" if indicators else "No obvious manipulation indicators",
274
+ "analysis_count": len(metadata)
275
  }
276
+
277
+ return result
278
+
279
  except Exception as e:
280
  return {
281
+ "metadata": {"Error": str(e)},
282
+ "indicators": ["Error extracting metadata"],
283
+ "summary": "Analysis failed",
284
+ "analysis_count": 0
 
 
 
 
285
  }
286
 
287
+ def noise_analysis(image_path, amplification=15):
288
+ """
289
+ Extracts and analyzes noise patterns in the image to detect inconsistencies.
290
+
291
+ Args:
292
+ image_path: Path to the image file
293
+ amplification: Factor to amplify noise for visualization
294
+
295
+ Returns:
296
+ PIL Image containing the noise analysis result
297
+ """
298
+ # Read the image
299
+ img = cv2.imread(image_path)
300
+
301
+ # Convert to grayscale
302
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
303
+
304
+ # Apply Gaussian blur to extract base image without noise
305
+ blur = cv2.GaussianBlur(gray, (5, 5), 0)
306
+
307
+ # Extract noise by subtracting the blurred image from the original
308
+ noise = cv2.subtract(gray, blur)
309
+
310
+ # Amplify the noise for better visualization
311
+ noise = cv2.multiply(noise, amplification)
312
+
313
+ # Apply a colormap for visualization
314
+ noise_colored = cv2.applyColorMap(noise, cv2.COLORMAP_JET)
315
+
316
+ # Convert back to PIL format
317
+ noise_pil = Image.fromarray(cv2.cvtColor(noise_colored, cv2.COLOR_BGR2RGB))
318
+
319
+ return noise_pil
320
+
321
+ def manipulation_likelihood(image_path):
322
+ """
323
+ Simulates a pre-trained model that evaluates the likelihood of image manipulation.
324
+ For demo purposes, this generates a random score with some biasing based on image properties.
325
+
326
+ Args:
327
+ image_path: Path to the image file
328
+
329
+ Returns:
330
+ Dictionary with manipulation probability and areas of interest
331
+ """
332
+ # Open the image
333
+ img = np.array(Image.open(image_path).convert('RGB'))
334
+
335
+ # Get image dimensions
336
+ height, width = img.shape[:2]
337
+
338
+ # In a real implementation, you would use your pre-trained model here
339
+ # For demo purposes, we'll simulate a model output based on image characteristics
340
+
341
+ # Create a heatmap of "suspicious" areas (for demo purposes)
342
+ heatmap = np.zeros((height, width), dtype=np.float32)
343
+
344
+ # Add some "suspicious" regions for demonstration
345
+ # This would be replaced by actual model output in a real implementation
346
+
347
+ # 1. Add some random regions of interest
348
+ num_regions = random.randint(1, 4)
349
+ for _ in range(num_regions):
350
+ x = random.randint(0, width - 1)
351
+ y = random.randint(0, height - 1)
352
+ radius = random.randint(width//10, width//5)
353
+
354
+ # Create a circular region of interest
355
+ y_indices, x_indices = np.ogrid[:height, :width]
356
+ dist_from_center = ((y_indices - y)**2 + (x_indices - x)**2)
357
+ mask = dist_from_center <= radius**2
358
+
359
+ # Add to heatmap with random intensity
360
+ intensity = random.uniform(0.5, 1.0)
361
+ heatmap[mask] = np.maximum(heatmap[mask], intensity * np.exp(-dist_from_center[mask] / (2 * (radius/2)**2)))
362
+
363
+ # Normalize the heatmap
364
+ if np.max(heatmap) > 0:
365
+ heatmap = heatmap / np.max(heatmap)
366
+
367
+ # Convert to RGB for visualization using a colormap
368
+ cmap = LinearSegmentedColormap.from_list('custom', [(0, 0, 0, 0), (1, 0, 0, 0.7)])
369
+ heatmap_rgb = (cmap(heatmap) * 255).astype(np.uint8)
370
+
371
+ # Overlay heatmap on the original image
372
+ orig_img = np.array(Image.open(image_path).convert('RGB'))
373
+ overlay = orig_img.copy()
374
+
375
+ # Only add red channel where heatmap has values
376
+ for c in range(3):
377
+ if c == 0: # Red channel
378
+ overlay[:, :, c] = np.where(heatmap_rgb[:, :, 3] > 0,
379
+ (overlay[:, :, c] * 0.5 + heatmap_rgb[:, :, 0] * 0.5).astype(np.uint8),
380
+ overlay[:, :, c])
381
+ else: # Green and blue channels - reduce them in highlighted areas
382
+ overlay[:, :, c] = np.where(heatmap_rgb[:, :, 3] > 0,
383
+ (overlay[:, :, c] * 0.5).astype(np.uint8),
384
+ overlay[:, :, c])
385
+
386
+ # Generate a "manipulation probability" for demo purposes
387
+ # In a real implementation, this would come from your model
388
+ exif_result = extract_exif_metadata(image_path)
389
+ exif_factor = 0.3 if exif_result["indicators"] else 0.0
390
+
391
+ # Slightly bias probability based on file characteristics for the demo
392
+ img_factor = 0.1 if ".jpg" in image_path.lower() else 0.0
393
+
394
+ # Combine factors with a random component for the demo
395
+ base_probability = random.uniform(0.2, 0.8)
396
+ manipulation_probability = min(0.95, base_probability + exif_factor + img_factor)
397
+
398
+ # Create a more realistic result for the demo
399
+ overlay_image = Image.fromarray(overlay)
400
+
401
+ # Return results
402
+ return {
403
+ "probability": manipulation_probability,
404
+ "heatmap_image": overlay_image,
405
+ "explanation": get_probability_explanation(manipulation_probability),
406
+ "confidence": "medium" if 0.3 < manipulation_probability < 0.7 else "high"
407
+ }
408
+
409
+ def get_probability_explanation(prob):
410
+ """Returns an explanation text based on the manipulation probability"""
411
+ if prob < 0.3:
412
+ return "The image appears to be authentic with no significant signs of manipulation."
413
+ elif prob < 0.6:
414
+ return "Some inconsistencies detected that might indicate limited manipulation."
415
+ else:
416
+ return "Strong indicators of digital manipulation detected in this image."
417
+
418
+ def get_clone_explanation(count):
419
+ """Returns an explanation based on the number of clone matches found"""
420
+ if count == 0:
421
+ return "No copy-paste manipulations detected in the image."
422
+ elif count < 10:
423
+ return "Few potential copy-paste regions detected, might be false positives."
424
+ else:
425
+ return "Significant number of copy-paste regions detected, suggesting manipulation."
426
+
427
+ def save_uploaded_image(image):
428
+ """Save a PIL image to disk and return the path"""
429
+ temp_path = os.path.join(TEMP_DIR, "temp_analyze.jpg")
430
+ image.save(temp_path)
431
+ return temp_path
432
+
433
+ def analyze_complete_image(image_path):
434
+ """Comprehensive analysis of an image, running all forensic tests"""
435
+ # Read the image as PIL
436
+ image = Image.open(image_path)
437
+
438
+ # Run all analyses
439
+ exif_result = extract_exif_metadata(image_path)
440
+ manipulation_result = manipulation_likelihood(image_path)
441
+ clone_result, clone_count = detect_clones(image_path)
442
+ ela_result = error_level_analysis(image_path)
443
+ noise_result = noise_analysis(image_path)
444
+
445
+ # Compile combined analysis text
446
+ analysis_text = f"""
447
+ ## Manipulation Analysis Results
448
+
449
+ **Overall Assessment: {manipulation_result['probability']*100:.1f}% likelihood of manipulation**
450
+
451
+ {manipulation_result['explanation']}
452
+
453
+ ### Clone Detection Analysis:
454
+ Found {clone_count} potential cloned regions in the image.
455
+ {get_clone_explanation(clone_count)}
456
+
457
+ ### EXIF Metadata Analysis:
458
+ {exif_result['summary']}
459
+
460
+ Indicators found: {len(exif_result['indicators'])}
461
+ """
462
+
463
+ if exif_result['indicators']:
464
+ analysis_text += "\nDetailed indicators:\n"
465
+ for indicator in exif_result['indicators']:
466
+ analysis_text += f"- {indicator}\n"
467
+
468
+ # Return complete result object
469
+ return {
470
+ "manipulation_probability": manipulation_result["probability"],
471
+ "analysis_text": analysis_text,
472
+ "exif_data": exif_result["metadata"],
473
+ "clone_count": clone_count,
474
+ "original_image": image,
475
+ "ela_image": ela_result,
476
+ "noise_image": noise_result,
477
+ "heatmap_image": manipulation_result["heatmap_image"],
478
+ "clone_image": clone_result
479
+ }
480
+
481
+ #############################
482
+ # GRADIO INTERFACE FUNCTIONS
483
+ #############################
484
+
485
+ def analyze_image(image):
486
+ """Main function for Gradio UI that processes the uploaded image"""
487
+ if image is None:
488
+ return None, None, None, None, None, "{}", "Please upload an image first.", 0
489
+
490
+ # Save the image
491
+ temp_path = save_uploaded_image(image)
492
+
493
+ try:
494
+ # Get analysis results
495
+ results = analyze_complete_image(temp_path)
496
+
497
+ # Return results in the format expected by Gradio
498
+ return (
499
+ image, # original_image
500
+ results["ela_image"], # ela_image
501
+ results["noise_image"], # noise_image
502
+ results["heatmap_image"], # heatmap_image
503
+ results["clone_image"], # clone_image
504
+ json.dumps(results["exif_data"], indent=2), # exif_data
505
+ results["analysis_text"], # analysis_results
506
+ results["manipulation_probability"] # probability_slider
507
+ )
508
+ except Exception as e:
509
+ error_message = f"Error occurred during analysis: {str(e)}"
510
+ print(error_message) # Log the error
511
+ return image, None, None, None, None, f"Error: {str(e)}", error_message, 0
512
 
513
  #############################
514
  # GRADIO INTERFACE
515
  #############################
516
 
517
+ with gr.Blocks(title="Image Forensic & Fraud Detection Tool") as demo:
518
  gr.Markdown("""
519
  # Image Forensic & Fraud Detection Tool
 
520
 
521
  Upload an image to analyze it for potential manipulation using various forensic techniques.
522
  """)
 
589
 
590
  # Launch the app
591
  if __name__ == "__main__":
592
+ demo.launch(server_name="0.0.0.0", server_port=7860)