HunzalaRasheed1 commited on
Commit
08d02ba
·
verified ·
1 Parent(s): 8156235

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +637 -0
app.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from fastapi.responses import JSONResponse, FileResponse
3
+ import numpy as np
4
+ import cv2
5
+ from PIL import Image, ImageChops, ImageEnhance
6
+ import io
7
+ import os
8
+ import random
9
+ import matplotlib.pyplot as plt
10
+ from matplotlib.colors import LinearSegmentedColormap
11
+ import tempfile
12
+ import json
13
+ import base64
14
+ from sklearn.metrics.pairwise import cosine_similarity
15
+ from fastapi.middleware.cors import CORSMiddleware
16
+ import shutil
17
+ from typing import Dict, Any
18
+
19
+ # Create temporary directory for saving files
20
+ # Use environment variable for Docker compatibility
21
+ TEMP_DIR = os.environ.get('TEMP_DIR', tempfile.mkdtemp())
22
+ os.makedirs(TEMP_DIR, exist_ok=True)
23
+ print(f"Using temporary directory: {TEMP_DIR}")
24
+
25
+ app = FastAPI(title="Image Forensic & Fraud Detection API")
26
+
27
+ # Add CORS middleware to allow requests from any origin
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=["*"],
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+ #############################
37
+ # HELPER FUNCTIONS
38
+ #############################
39
+
40
+ def save_pil_image(img, path):
41
+ """Save a PIL image and return the path"""
42
+ img.save(path)
43
+ return path
44
+
45
+ def pil_to_base64(img):
46
+ """Convert PIL image to base64 string for JSON response"""
47
+ buffered = io.BytesIO()
48
+ img.save(buffered, format="PNG")
49
+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
50
+
51
+ #############################
52
+ # FORENSIC ANALYSIS FUNCTIONS
53
+ #############################
54
+
55
+ @app.post("/api/detect_clones")
56
+ async def api_detect_clones(file: UploadFile = File(...)):
57
+ """API endpoint for clone detection"""
58
+ # Save uploaded file
59
+ temp_path = os.path.join(TEMP_DIR, f"temp_clone_{random.randint(1000, 9999)}.jpg")
60
+ with open(temp_path, "wb") as f:
61
+ shutil.copyfileobj(file.file, f)
62
+
63
+ try:
64
+ # Run detection
65
+ clone_img, clone_count = detect_clones(temp_path)
66
+
67
+ # Return results
68
+ result = {
69
+ "image": pil_to_base64(clone_img),
70
+ "clone_count": clone_count,
71
+ "explanation": get_clone_explanation(clone_count)
72
+ }
73
+ finally:
74
+ # Clean up the temporary file
75
+ if os.path.exists(temp_path):
76
+ os.remove(temp_path)
77
+
78
+ return result
79
+
80
+ # Define find_matches as a global function instead of nested
81
+ def find_matches(args):
82
+ """
83
+ Find matching blocks within the given indices.
84
+
85
+ Args:
86
+ args: A tuple containing (block_indices, blocks, tree, similarity_threshold)
87
+
88
+ Returns:
89
+ A set of matching block pairs
90
+ """
91
+ block_indices, blocks, tree, similarity_threshold = args
92
+ local_matches = set()
93
+ for i in block_indices:
94
+ # Find all blocks within the similarity threshold
95
+ distances, indices = tree.query(blocks[i], k=10, distance_upper_bound=similarity_threshold)
96
+ for j, dist in zip(indices, distances):
97
+ # Skip self-matches and invalid indices
98
+ if j != i and j < len(blocks) and dist <= similarity_threshold:
99
+ # Store matches as sorted tuples to avoid duplicates
100
+ local_matches.add(tuple(sorted([i, j])))
101
+ return local_matches
102
+
103
+
104
+ def detect_clones(image_path, max_dimension=2000):
105
+ """
106
+ Detects cloned/copy-pasted regions in the image with optimized performance.
107
+
108
+ Args:
109
+ image_path: Path to the image file
110
+ max_dimension: Maximum dimension to resize large images to
111
+
112
+ Returns:
113
+ PIL Image containing the clone detection result and count of clones
114
+ """
115
+ import cv2
116
+ import numpy as np
117
+ from PIL import Image
118
+ from scipy.spatial import cKDTree
119
+ from multiprocessing import Pool, cpu_count
120
+
121
+ # Read image
122
+ img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
123
+ if img is None:
124
+ raise ValueError(f"Could not read image at {image_path}")
125
+
126
+ height, width = img.shape
127
+
128
+ # Handle large images by resizing if needed
129
+ scale = 1.0
130
+ if height > max_dimension or width > max_dimension:
131
+ scale = max_dimension / max(height, width)
132
+ new_height, new_width = int(height * scale), int(width * scale)
133
+ img = cv2.resize(img, (new_width, new_height))
134
+ height, width = img.shape
135
+
136
+ # Create output image
137
+ clone_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
138
+
139
+ # Define parameters
140
+ block_size = 16
141
+ stride = 8
142
+
143
+ # For very large images, increase stride
144
+ if (height * width) > 4000000:
145
+ stride = 16
146
+
147
+ # Extract block features
148
+ blocks = []
149
+ positions = []
150
+
151
+ # Apply DCT to each block for feature extraction (faster than raw pixels)
152
+ for y in range(0, height - block_size, stride):
153
+ for x in range(0, width - block_size, stride):
154
+ block = img[y:y+block_size, x:x+block_size].astype(np.float32)
155
+ # Apply DCT and keep only top 16 coefficients (reduces dimensionality)
156
+ dct = cv2.dct(block)
157
+ feature = dct[:4, :4].flatten() # Use only low-frequency components
158
+ blocks.append(feature)
159
+ positions.append((x, y))
160
+
161
+ # Convert to numpy array for faster processing
162
+ blocks = np.array(blocks, dtype=np.float32)
163
+
164
+ # Normalize features for better comparison
165
+ norms = np.linalg.norm(blocks, axis=1)
166
+ norms[norms == 0] = 1 # Avoid division by zero
167
+ blocks = blocks / norms[:, np.newaxis]
168
+
169
+ # Use KD-Tree for efficient nearest neighbor search (much faster than cosine_similarity)
170
+ tree = cKDTree(blocks)
171
+
172
+ # Find similar blocks using radius search (equivalent to high cosine similarity)
173
+ # This is much more efficient than computing the full similarity matrix
174
+ similarity_threshold = 0.04 # Equivalent to ~0.95 cosine similarity
175
+ matches = set()
176
+
177
+ # Use multiple processes to speed up the search
178
+ num_processes = min(8, cpu_count())
179
+
180
+ # Split work among processes
181
+ chunk_size = len(blocks) // num_processes + 1
182
+ block_chunks = [range(i, min(i + chunk_size, len(blocks))) for i in range(0, len(blocks), chunk_size)]
183
+
184
+ # Prepare arguments for the find_matches function
185
+ args_list = [(chunk, blocks, tree, similarity_threshold) for chunk in block_chunks]
186
+
187
+ with Pool(num_processes) as pool:
188
+ results = pool.map(find_matches, args_list)
189
+
190
+ # Combine results
191
+ for result in results:
192
+ matches.update(result)
193
+
194
+ # Draw rectangles for matches
195
+ for i, j in matches:
196
+ x1, y1 = positions[i]
197
+ x2, y2 = positions[j]
198
+ cv2.rectangle(clone_img, (x1, y1), (x1+block_size, y1+block_size), (0, 0, 255), 1)
199
+ cv2.rectangle(clone_img, (x2, y2), (x2+block_size, y2+block_size), (255, 0, 0), 1)
200
+
201
+ # Convert OpenCV image to PIL format
202
+ clone_result = Image.fromarray(cv2.cvtColor(clone_img, cv2.COLOR_BGR2RGB))
203
+
204
+ # Restore original scale if the image was resized
205
+ if scale != 1.0:
206
+ orig_size = (int(clone_img.shape[1]/scale), int(clone_img.shape[0]/scale))
207
+ clone_result = clone_result.resize(orig_size, Image.LANCZOS)
208
+
209
+ return clone_result, len(matches)
210
+
211
+ @app.post("/api/error_level_analysis")
212
+ async def api_error_level_analysis(file: UploadFile = File(...), quality: int = 90, scale: int = 10):
213
+ """API endpoint for error level analysis"""
214
+ # Save uploaded file with random suffix to avoid conflicts
215
+ temp_path = os.path.join(TEMP_DIR, f"temp_ela_{random.randint(1000, 9999)}.jpg")
216
+ with open(temp_path, "wb") as f:
217
+ shutil.copyfileobj(file.file, f)
218
+
219
+ try:
220
+ # Run analysis
221
+ ela_img = error_level_analysis(temp_path, quality, scale)
222
+
223
+ # Return results
224
+ result = {
225
+ "image": pil_to_base64(ela_img)
226
+ }
227
+ finally:
228
+ # Clean up the temporary file
229
+ if os.path.exists(temp_path):
230
+ os.remove(temp_path)
231
+
232
+ return result
233
+
234
+ def error_level_analysis(image_path, quality=90, scale=10):
235
+ """
236
+ Performs Error Level Analysis (ELA) on the image.
237
+
238
+ Args:
239
+ image_path: Path to the image file
240
+ quality: JPEG quality level for recompression
241
+ scale: Amplification factor for differences
242
+
243
+ Returns:
244
+ PIL Image containing the ELA result
245
+ """
246
+ # Open the original image
247
+ original = Image.open(image_path).convert('RGB')
248
+
249
+ # Save and reopen a JPEG version at the specified quality
250
+ temp_filename = os.path.join(TEMP_DIR, f"temp_ela_process_{random.randint(1000, 9999)}.jpg")
251
+ original.save(temp_filename, 'JPEG', quality=quality)
252
+
253
+ try:
254
+ recompressed = Image.open(temp_filename)
255
+
256
+ # Calculate the difference
257
+ diff = ImageChops.difference(original, recompressed)
258
+
259
+ # Amplify the difference for better visualization
260
+ diff = ImageEnhance.Brightness(diff).enhance(scale)
261
+
262
+ # Create a colored version of the diff for visualization
263
+ diff_array = np.array(diff)
264
+
265
+ # Convert to grayscale
266
+ if len(diff_array.shape) == 3:
267
+ diff_gray = np.mean(diff_array, axis=2)
268
+ else:
269
+ diff_gray = diff_array
270
+
271
+ # Apply colormap for better visualization
272
+ colormap = plt.get_cmap('jet')
273
+ colored_diff = (colormap(diff_gray / 255.0) * 255).astype(np.uint8)
274
+
275
+ # Create PIL image from the array (remove alpha channel)
276
+ colored_result = Image.fromarray(colored_diff[:, :, :3])
277
+
278
+ return colored_result
279
+ finally:
280
+ # Clean up temporary file
281
+ if os.path.exists(temp_filename):
282
+ os.remove(temp_filename)
283
+
284
+ @app.post("/api/extract_exif_metadata")
285
+ async def api_extract_exif_metadata(file: UploadFile = File(...)):
286
+ """API endpoint for EXIF metadata extraction"""
287
+ # Save uploaded file
288
+ temp_path = os.path.join(TEMP_DIR, f"temp_exif_{random.randint(1000, 9999)}.jpg")
289
+ with open(temp_path, "wb") as f:
290
+ shutil.copyfileobj(file.file, f)
291
+
292
+ try:
293
+ # Run analysis
294
+ exif_result = extract_exif_metadata(temp_path)
295
+
296
+ # Return results
297
+ return exif_result
298
+ finally:
299
+ # Clean up the temporary file
300
+ if os.path.exists(temp_path):
301
+ os.remove(temp_path)
302
+
303
+ def extract_exif_metadata(image_path):
304
+ """
305
+ Extracts EXIF metadata from the image and identifies potential manipulation indicators.
306
+
307
+ Args:
308
+ image_path: Path to the image file
309
+
310
+ Returns:
311
+ Dictionary with metadata and analysis
312
+ """
313
+ try:
314
+ img = Image.open(image_path)
315
+ exif_data = img._getexif() or {}
316
+
317
+ # Map EXIF tags to readable names
318
+ exif_tags = {
319
+ 271: 'Make', 272: 'Model', 306: 'DateTime',
320
+ 36867: 'DateTimeOriginal', 36868: 'DateTimeDigitized',
321
+ 37510: 'UserComment', 40964: 'RelatedSoundFile',
322
+ 305: 'Software', 315: 'Artist', 33432: 'Copyright'
323
+ }
324
+
325
+ # Process EXIF data into readable format
326
+ metadata = {}
327
+ for tag_id, value in exif_data.items():
328
+ tag = exif_tags.get(tag_id, str(tag_id))
329
+ metadata[tag] = str(value)
330
+
331
+ # Check for potential manipulation indicators
332
+ indicators = []
333
+
334
+ # Check for editing software
335
+ editing_software = ['photoshop', 'lightroom', 'gimp', 'paint', 'editor', 'filter']
336
+ if 'Software' in metadata:
337
+ software = metadata['Software'].lower()
338
+ for editor in editing_software:
339
+ if editor in software:
340
+ indicators.append(f"Image edited with {metadata['Software']}")
341
+ break
342
+
343
+ # Check for date discrepancies
344
+ if 'DateTimeOriginal' in metadata and 'DateTime' in metadata:
345
+ if metadata['DateTimeOriginal'] != metadata['DateTime']:
346
+ indicators.append("Capture time differs from modification time")
347
+
348
+ # Missing original date
349
+ if 'DateTime' in metadata and 'DateTimeOriginal' not in metadata:
350
+ indicators.append("Original capture time missing")
351
+
352
+ # Create result dictionary
353
+ result = {
354
+ "metadata": metadata,
355
+ "indicators": indicators,
356
+ "summary": "Potential manipulation detected" if indicators else "No obvious manipulation indicators",
357
+ "analysis_count": len(metadata)
358
+ }
359
+
360
+ return result
361
+
362
+ except Exception as e:
363
+ return {
364
+ "metadata": {"Error": str(e)},
365
+ "indicators": ["Error extracting metadata"],
366
+ "summary": "Analysis failed",
367
+ "analysis_count": 0
368
+ }
369
+
370
+ @app.post("/api/noise_analysis")
371
+ async def api_noise_analysis(file: UploadFile = File(...), amplification: int = 15):
372
+ """API endpoint for noise analysis"""
373
+ # Save uploaded file
374
+ temp_path = os.path.join(TEMP_DIR, f"temp_noise_{random.randint(1000, 9999)}.jpg")
375
+ with open(temp_path, "wb") as f:
376
+ shutil.copyfileobj(file.file, f)
377
+
378
+ try:
379
+ # Run analysis
380
+ noise_img = noise_analysis(temp_path, amplification)
381
+
382
+ # Return results
383
+ return {
384
+ "image": pil_to_base64(noise_img)
385
+ }
386
+ finally:
387
+ # Clean up the temporary file
388
+ if os.path.exists(temp_path):
389
+ os.remove(temp_path)
390
+
391
+ def noise_analysis(image_path, amplification=15):
392
+ """
393
+ Extracts and analyzes noise patterns in the image to detect inconsistencies.
394
+
395
+ Args:
396
+ image_path: Path to the image file
397
+ amplification: Factor to amplify noise for visualization
398
+
399
+ Returns:
400
+ PIL Image containing the noise analysis result
401
+ """
402
+ # Read the image
403
+ img = cv2.imread(image_path)
404
+
405
+ # Convert to grayscale
406
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
407
+
408
+ # Apply Gaussian blur to extract base image without noise
409
+ blur = cv2.GaussianBlur(gray, (5, 5), 0)
410
+
411
+ # Extract noise by subtracting the blurred image from the original
412
+ noise = cv2.subtract(gray, blur)
413
+
414
+ # Amplify the noise for better visualization
415
+ noise = cv2.multiply(noise, amplification)
416
+
417
+ # Apply a colormap for visualization
418
+ noise_colored = cv2.applyColorMap(noise, cv2.COLORMAP_JET)
419
+
420
+ # Convert back to PIL format
421
+ noise_pil = Image.fromarray(cv2.cvtColor(noise_colored, cv2.COLOR_BGR2RGB))
422
+
423
+ return noise_pil
424
+
425
+ @app.post("/api/manipulation_likelihood")
426
+ async def api_manipulation_likelihood(file: UploadFile = File(...)):
427
+ """API endpoint for manipulation likelihood analysis"""
428
+ # Save uploaded file
429
+ temp_path = os.path.join(TEMP_DIR, f"temp_manipulation_{random.randint(1000, 9999)}.jpg")
430
+ with open(temp_path, "wb") as f:
431
+ shutil.copyfileobj(file.file, f)
432
+
433
+ try:
434
+ # Run analysis
435
+ result = manipulation_likelihood(temp_path)
436
+
437
+ # Convert image to base64
438
+ result["heatmap_image_base64"] = pil_to_base64(result["heatmap_image"])
439
+ del result["heatmap_image"] # Remove PIL image from JSON response
440
+
441
+ # Return results
442
+ return result
443
+ finally:
444
+ # Clean up the temporary file
445
+ if os.path.exists(temp_path):
446
+ os.remove(temp_path)
447
+
448
+ def manipulation_likelihood(image_path):
449
+ """
450
+ Simulates a pre-trained model that evaluates the likelihood of image manipulation.
451
+ For demo purposes, this generates a random score with some biasing based on image properties.
452
+
453
+ Args:
454
+ image_path: Path to the image file
455
+
456
+ Returns:
457
+ Dictionary with manipulation probability and areas of interest
458
+ """
459
+ # Open the image
460
+ img = np.array(Image.open(image_path).convert('RGB'))
461
+
462
+ # Get image dimensions
463
+ height, width = img.shape[:2]
464
+
465
+ # In a real implementation, you would use your pre-trained model here
466
+ # For demo purposes, we'll simulate a model output based on image characteristics
467
+
468
+ # Create a heatmap of "suspicious" areas (for demo purposes)
469
+ heatmap = np.zeros((height, width), dtype=np.float32)
470
+
471
+ # Add some "suspicious" regions for demonstration
472
+ # This would be replaced by actual model output in a real implementation
473
+
474
+ # 1. Add some random regions of interest
475
+ num_regions = random.randint(1, 4)
476
+ for _ in range(num_regions):
477
+ x = random.randint(0, width - 1)
478
+ y = random.randint(0, height - 1)
479
+ radius = random.randint(width//10, width//5)
480
+
481
+ # Create a circular region of interest
482
+ y_indices, x_indices = np.ogrid[:height, :width]
483
+ dist_from_center = ((y_indices - y)**2 + (x_indices - x)**2)
484
+ mask = dist_from_center <= radius**2
485
+
486
+ # Add to heatmap with random intensity
487
+ intensity = random.uniform(0.5, 1.0)
488
+ heatmap[mask] = np.maximum(heatmap[mask], intensity * np.exp(-dist_from_center[mask] / (2 * (radius/2)**2)))
489
+
490
+ # Normalize the heatmap
491
+ if np.max(heatmap) > 0:
492
+ heatmap = heatmap / np.max(heatmap)
493
+
494
+ # Convert to RGB for visualization using a colormap
495
+ cmap = LinearSegmentedColormap.from_list('custom', [(0, 0, 0, 0), (1, 0, 0, 0.7)])
496
+ heatmap_rgb = (cmap(heatmap) * 255).astype(np.uint8)
497
+
498
+ # Overlay heatmap on the original image
499
+ orig_img = np.array(Image.open(image_path).convert('RGB'))
500
+ overlay = orig_img.copy()
501
+
502
+ # Only add red channel where heatmap has values
503
+ for c in range(3):
504
+ if c == 0: # Red channel
505
+ overlay[:, :, c] = np.where(heatmap_rgb[:, :, 3] > 0,
506
+ (overlay[:, :, c] * 0.5 + heatmap_rgb[:, :, 0] * 0.5).astype(np.uint8),
507
+ overlay[:, :, c])
508
+ else: # Green and blue channels - reduce them in highlighted areas
509
+ overlay[:, :, c] = np.where(heatmap_rgb[:, :, 3] > 0,
510
+ (overlay[:, :, c] * 0.5).astype(np.uint8),
511
+ overlay[:, :, c])
512
+
513
+ # Generate a "manipulation probability" for demo purposes
514
+ # In a real implementation, this would come from your model
515
+ exif_result = extract_exif_metadata(image_path)
516
+ exif_factor = 0.3 if exif_result["indicators"] else 0.0
517
+
518
+ # Slightly bias probability based on file characteristics for the demo
519
+ img_factor = 0.1 if ".jpg" in image_path.lower() else 0.0
520
+
521
+ # Combine factors with a random component for the demo
522
+ base_probability = random.uniform(0.2, 0.8)
523
+ manipulation_probability = min(0.95, base_probability + exif_factor + img_factor)
524
+
525
+ # Create a more realistic result for the demo
526
+ overlay_image = Image.fromarray(overlay)
527
+
528
+ # Return results
529
+ return {
530
+ "probability": manipulation_probability,
531
+ "heatmap_image": overlay_image,
532
+ "explanation": get_probability_explanation(manipulation_probability),
533
+ "confidence": "medium" if 0.3 < manipulation_probability < 0.7 else "high"
534
+ }
535
+
536
+ def get_probability_explanation(prob):
537
+ """Returns an explanation text based on the manipulation probability"""
538
+ if prob < 0.3:
539
+ return "The image appears to be authentic with no significant signs of manipulation."
540
+ elif prob < 0.6:
541
+ return "Some inconsistencies detected that might indicate limited manipulation."
542
+ else:
543
+ return "Strong indicators of digital manipulation detected in this image."
544
+
545
+ def get_clone_explanation(count):
546
+ """Returns an explanation based on the number of clone matches found"""
547
+ if count == 0:
548
+ return "No copy-paste manipulations detected in the image."
549
+ elif count < 10:
550
+ return "Few potential copy-paste regions detected, might be false positives."
551
+ else:
552
+ return "Significant number of copy-paste regions detected, suggesting manipulation."
553
+
554
+ @app.post("/api/analyze_image")
555
+ async def api_analyze_image(file: UploadFile = File(...)):
556
+ """Main API endpoint for complete image analysis"""
557
+ # Save uploaded file
558
+ temp_path = os.path.join(TEMP_DIR, f"temp_analyze_{random.randint(1000, 9999)}.jpg")
559
+ with open(temp_path, "wb") as f:
560
+ shutil.copyfileobj(file.file, f)
561
+
562
+ try:
563
+ # Read the image as PIL
564
+ image = Image.open(temp_path)
565
+
566
+ # Run all analyses
567
+ exif_result = extract_exif_metadata(temp_path)
568
+ manipulation_result = manipulation_likelihood(temp_path)
569
+ clone_result, clone_count = detect_clones(temp_path)
570
+
571
+ # Compile combined analysis text
572
+ analysis_text = f"""
573
+ ## Manipulation Analysis Results
574
+
575
+ **Overall Assessment: {manipulation_result['probability']*100:.1f}% likelihood of manipulation**
576
+
577
+ {manipulation_result['explanation']}
578
+
579
+ ### Clone Detection Analysis:
580
+ Found {clone_count} potential cloned regions in the image.
581
+ {get_clone_explanation(clone_count)}
582
+
583
+ ### EXIF Metadata Analysis:
584
+ {exif_result['summary']}
585
+
586
+ Indicators found: {len(exif_result['indicators'])}
587
+ """
588
+
589
+ if exif_result['indicators']:
590
+ analysis_text += "\nDetailed indicators:\n"
591
+ for indicator in exif_result['indicators']:
592
+ analysis_text += f"- {indicator}\n"
593
+
594
+ # Return complete result object
595
+ return {
596
+ "manipulation_probability": manipulation_result["probability"],
597
+ "analysis_text": analysis_text,
598
+ "exif_data": exif_result["metadata"],
599
+ "clone_count": clone_count,
600
+ "original_image": pil_to_base64(image),
601
+ "ela_image": pil_to_base64(error_level_analysis(temp_path)),
602
+ "noise_image": pil_to_base64(noise_analysis(temp_path)),
603
+ "heatmap_image": pil_to_base64(manipulation_result["heatmap_image"]),
604
+ "clone_image": pil_to_base64(clone_result)
605
+ }
606
+ finally:
607
+ # Clean up the temporary file
608
+ if os.path.exists(temp_path):
609
+ os.remove(temp_path)
610
+
611
+ # Add a health check endpoint
612
+ @app.get("/health")
613
+ async def health_check():
614
+ """Health check endpoint for monitoring"""
615
+ return {"status": "healthy", "service": "Image Forensic API"}
616
+
617
+ # Add a root endpoint with API documentation
618
+ @app.get("/")
619
+ async def root():
620
+ """Root endpoint with API information"""
621
+ return {
622
+ "name": "Image Forensic & Fraud Detection API",
623
+ "version": "1.0.0",
624
+ "endpoints": [
625
+ "/api/analyze_image",
626
+ "/api/detect_clones",
627
+ "/api/error_level_analysis",
628
+ "/api/extract_exif_metadata",
629
+ "/api/noise_analysis",
630
+ "/api/manipulation_likelihood"
631
+ ],
632
+ "docs": "/docs"
633
+ }
634
+
635
+ if __name__ == "__main__":
636
+ import uvicorn
637
+ uvicorn.run(app, host="0.0.0.0", port=7860)