AMontiB commited on
Commit
fc8df74
·
1 Parent(s): df74620
.DS_Store ADDED
Binary file (6.15 kB). View file
 
CFA.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from PIL import Image, ImageDraw
4
+ import matplotlib.pyplot as plt
5
+ from scipy.signal import convolve2d
6
+ import time
7
+ np.random.seed(123)
8
+
9
+ # --- Configuration ---
10
+ BOX_SIZE = 128
11
+
12
+ # --- Helper Functions (GetMap, getFourier) ---
13
+ # These computational functions remain the same as before.
14
+ def GetMap(image_channel):
15
+ f = image_channel.astype(np.float64)
16
+ alpha = np.random.rand(4)
17
+ sigma = 0.005
18
+ delta = 10
19
+ count = 0
20
+ max_iter = 30
21
+ tolerance = 0.01
22
+ while True:
23
+ count += 1
24
+ kernel = np.array([[0, alpha[0], 0], [alpha[1], 0, alpha[2]], [0, alpha[3], 0]])
25
+ filtered = convolve2d(f, kernel, mode='same', boundary='symm')
26
+ r = np.abs(f - filtered) / 255.0
27
+ r = r[1:-1, 1:-1]
28
+ e = np.exp(-r**2 / sigma)
29
+ w = e / (e + 1 / delta)
30
+ if np.sum(w) < 1e-6:
31
+ print("Warning: Sum of weights is near zero. Exiting GetMap early.")
32
+ return e.ravel()
33
+ w_flat = w.ravel()
34
+ W = np.diag(w_flat)
35
+ value1 = f[:-2, 1:-1].ravel(); value2 = f[1:-1, :-2].ravel(); value3 = f[1:-1, 2:].ravel(); value4 = f[2:, 1:-1].ravel()
36
+ Y = f[1:-1, 1:-1].ravel()
37
+ X = np.column_stack((value1, value2, value3, value4))
38
+ try:
39
+ alpha_new = np.linalg.inv(X.T @ W @ X) @ (X.T @ W @ Y)
40
+ except np.linalg.LinAlgError:
41
+ print("Warning: Singular matrix encountered. Cannot compute inverse.")
42
+ return e.ravel()
43
+ if np.linalg.norm(alpha - alpha_new) < tolerance or count > max_iter: break
44
+ alpha = alpha_new
45
+ sigma = np.sum(w * (r**2)) / np.sum(w)
46
+ return e.ravel()
47
+
48
+ def getFourier(prob):
49
+ imFft = np.fft.fftshift(np.fft.fft2(prob))
50
+ imFft = np.abs(imFft)
51
+ if np.max(imFft) > 0:
52
+ imFft = (imFft / np.max(imFft) * 255)
53
+ imFft = imFft.astype(np.uint8)
54
+ imFft = (imFft > (0.5 * 255)).astype(np.uint8)
55
+ return imFft
56
+
57
+ # --- New Gradio Interaction Functions ---
58
+
59
+ def draw_box_on_image(image: np.ndarray, box_coords: tuple, color="red", width=3) -> np.ndarray:
60
+ """Draws a bounding box on a NumPy image array."""
61
+ pil_image = Image.fromarray(image)
62
+ draw = ImageDraw.Draw(pil_image)
63
+ x, y = box_coords
64
+ rectangle = (x, y, x + BOX_SIZE, y + BOX_SIZE)
65
+ draw.rectangle(rectangle, outline=color, width=width)
66
+ return np.array(pil_image)
67
+
68
+ def on_upload_image(image: np.ndarray) -> tuple:
69
+ """Called when an image is first uploaded. Stores the original image and draws the initial box."""
70
+ initial_coords = (0, 0)
71
+ image_with_box = draw_box_on_image(image, initial_coords)
72
+ # Returns: (image_with_box for display, original_image for state, initial_coords for state)
73
+ return image_with_box, image, initial_coords
74
+
75
+ def move_selection_box(original_image: np.ndarray, evt: gr.SelectData) -> tuple:
76
+ """Called when the user clicks the image. It moves the box to the clicked location."""
77
+ # Center the box on the user's click
78
+ x = evt.index[0] - BOX_SIZE // 2
79
+ y = evt.index[1] - BOX_SIZE // 2
80
+
81
+ # Clamp coordinates to ensure the box stays within the image boundaries
82
+ img_h, img_w, _ = original_image.shape
83
+ x = max(0, min(x, img_w - BOX_SIZE))
84
+ y = max(0, min(y, img_h - BOX_SIZE))
85
+
86
+ new_coords = (int(x), int(y))
87
+ image_with_box = draw_box_on_image(original_image, new_coords)
88
+ # Returns: (image_with_box for display, new_coords for state)
89
+ return image_with_box, new_coords
90
+
91
+ def analyze_region(original_image: np.ndarray, box_coords: tuple):
92
+ """The main analysis function, triggered by the 'Analyze' button."""
93
+ if original_image is None:
94
+ gr.Warning("Please upload an image first!")
95
+ return None
96
+
97
+ print(f"\n--- Analysis Started for region at {box_coords} ---")
98
+ start_time = time.time()
99
+
100
+ x, y = box_coords
101
+ patch = original_image[y:y + BOX_SIZE, x:x + BOX_SIZE]
102
+ print(f"1. Patch extracted with shape: {patch.shape}")
103
+
104
+ if len(patch.shape) == 3: analysis_channel = patch[:, :, 1] # Green channel
105
+ else: analysis_channel = patch # Grayscale
106
+
107
+ print("2. Computing probability map...")
108
+ prob_flat = GetMap(analysis_channel)
109
+ prob_map_shape = (analysis_channel.shape[0] - 2, analysis_channel.shape[1] - 2)
110
+ prob_map = prob_flat.reshape(prob_map_shape)
111
+
112
+ print("3. Computing Fourier transform...")
113
+ fft_result = getFourier(prob_map)
114
+
115
+ # Plotting
116
+ fig, axs = plt.subplots(1, 3, figsize=(12, 4))
117
+ axs[0].imshow(patch); axs[0].set_title("Selected 128x128 Patch"); axs[0].axis("off")
118
+ axs[1].imshow(prob_map, cmap='gray'); axs[1].set_title("Probability Map"); axs[1].axis("off")
119
+ axs[2].imshow(fft_result, cmap='gray'); axs[2].set_title("Fourier Transform"); axs[2].axis("off")
120
+ plt.tight_layout()
121
+
122
+ print(f"4. Analysis complete in {time.time() - start_time:.2f} seconds.")
123
+ return fig
124
+
125
+ # --- Build the Gradio Interface in a function ---
126
+ def create_ui():
127
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
128
+ # State variables store data (like the original image) between user interactions
129
+ original_image_state = gr.State()
130
+ box_coords_state = gr.State(value=(0, 0))
131
+
132
+ gr.Markdown("# 🖼️ Image Patch Analyzer (CFA)")
133
+ gr.Markdown(
134
+ "**Instructions:**\n"
135
+ "1. **Upload** an image.\n"
136
+ "2. **Click** anywhere on the image to move the 128x128 selection box.\n"
137
+ "3. Press the **Analyze Region** button to start processing."
138
+ )
139
+
140
+ with gr.Row():
141
+ image_display = gr.Image(type="numpy", label="Selection Canvas")
142
+ output_plot = gr.Plot(label="Analysis Results")
143
+
144
+ analyze_button = gr.Button("Analyze Region", variant="primary")
145
+
146
+ # --- Wire up the event listeners ---
147
+
148
+ # 1. When a new image is uploaded, call on_upload_image
149
+ image_display.upload(
150
+ fn=on_upload_image,
151
+ inputs=[image_display],
152
+ outputs=[image_display, original_image_state, box_coords_state]
153
+ )
154
+
155
+ # 2. When the user clicks the image, call move_selection_box
156
+ image_display.select(
157
+ fn=move_selection_box,
158
+ inputs=[original_image_state],
159
+ outputs=[image_display, box_coords_state]
160
+ )
161
+
162
+ # 3. When the user clicks the analyze button, call analyze_region
163
+ analyze_button.click(
164
+ fn=analyze_region,
165
+ inputs=[original_image_state, box_coords_state],
166
+ outputs=[output_plot],
167
+ # Show a progress bar during analysis
168
+ show_progress="full"
169
+ )
170
+ return demo
171
+
172
+ # --- Remove the launch() call ---
173
+ # if __name__ == "__main__":
174
+ # demo = create_ui()
175
+ # demo.launch()
JPEG_Ghost.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import imageio.v2 as imageio
4
+ import matplotlib.pyplot as plt
5
+ import os
6
+ import tempfile
7
+ from PIL import Image, ImageDraw
8
+
9
+ # --- Configuration ---
10
+ BOX_SIZE = 256
11
+
12
+ # --- Core Analysis & Helper Functions ---
13
+
14
+ def draw_box_on_image(image: np.ndarray, box_coords: tuple, color="red", width=3) -> np.ndarray:
15
+ """Draws a bounding box on a NumPy image array."""
16
+ pil_image = Image.fromarray(image)
17
+ draw = ImageDraw.Draw(pil_image)
18
+ x, y = box_coords
19
+ rectangle = (x, y, x + BOX_SIZE, y + BOX_SIZE)
20
+ draw.rectangle(rectangle, outline=color, width=width)
21
+ return np.array(pil_image)
22
+
23
+ def diff_maps(im: np.ndarray, qf_range: np.ndarray, temp_dir: str) -> np.ndarray:
24
+ if im.dtype != np.uint8:
25
+ im = im.astype(np.uint8)
26
+ num_qfs = len(qf_range)
27
+ diff_array = np.zeros((im.shape[0], im.shape[1], num_qfs), dtype=np.float32)
28
+ temp_path = os.path.join(temp_dir, 'temp_recompress.jpg')
29
+ for i, q in enumerate(qf_range):
30
+ imageio.imwrite(temp_path, im, format='JPEG', quality=int(q))
31
+ recompressed_im = imageio.imread(temp_path)
32
+ diff = (im.astype(np.float32) - recompressed_im.astype(np.float32))**2
33
+ if diff.ndim == 3:
34
+ diff = np.mean(diff, axis=2)
35
+ diff_array[:, :, i] = diff
36
+ return diff_array
37
+
38
+ def on_upload_image(image: np.ndarray) -> tuple:
39
+ """Called when an image is first uploaded to draw the initial box."""
40
+ initial_coords = (0, 0)
41
+ image_with_box = draw_box_on_image(image, initial_coords)
42
+ return image_with_box, image, initial_coords
43
+
44
+ def move_selection_box(original_image: np.ndarray, evt: gr.SelectData) -> tuple:
45
+ """Called when the user clicks the image to move the box."""
46
+ x = evt.index[0] - BOX_SIZE // 2
47
+ y = evt.index[1] - BOX_SIZE // 2
48
+ img_h, img_w, _ = original_image.shape
49
+ x = max(0, min(x, img_w - BOX_SIZE))
50
+ y = max(0, min(y, img_h - BOX_SIZE))
51
+ new_coords = (int(x), int(y))
52
+ image_with_box = draw_box_on_image(original_image, new_coords)
53
+ return image_with_box, new_coords
54
+
55
+ def run_analysis(original_image: np.ndarray, box_coords: tuple, qf1: int, qf2: int, qf_start: int, qf_end: int):
56
+ if original_image is None:
57
+ raise gr.Error("Please upload an image first.")
58
+ if qf_start >= qf_end:
59
+ raise gr.Error("Analysis QF Start must be less than QF End.")
60
+
61
+ with tempfile.TemporaryDirectory() as temp_dir:
62
+ x, y = box_coords
63
+ patch_coords = (x, y, x + BOX_SIZE, y + BOX_SIZE)
64
+
65
+ path_qf1 = os.path.join(temp_dir, 'temp1.jpg')
66
+ path_composite = os.path.join(temp_dir, 'composite.jpg')
67
+ imageio.imwrite(path_qf1, original_image, quality=int(qf1))
68
+ im_low_q = imageio.imread(path_qf1)
69
+
70
+ xmin, ymin, xmax, ymax = patch_coords
71
+ im_low_q[ymin:ymax, xmin:xmax] = original_image[ymin:ymax, xmin:xmax]
72
+
73
+ imageio.imwrite(path_composite, im_low_q, quality=int(qf2))
74
+ im_composite = imageio.imread(path_composite)
75
+
76
+ qf_values = np.arange(int(qf_start), int(qf_end) + 1, 5)
77
+ if len(qf_values) == 0:
78
+ raise gr.Error("The selected QF range is empty.")
79
+
80
+ diffs = diff_maps(im_composite, qf_values, temp_dir)
81
+ diffs = np.clip(diffs, 0, 255)
82
+
83
+ num_plots = diffs.shape[2]
84
+ cols = 4
85
+ rows = int(np.ceil(num_plots / cols))
86
+ fig, axes = plt.subplots(rows, cols, figsize=(16, 4 * rows), squeeze=False)
87
+ fig.suptitle('Difference Images for Different Recompression Quality Factors', fontsize=16)
88
+ axes = axes.flatten()
89
+ for i in range(num_plots):
90
+ axes[i].imshow(diffs[:, :, i], cmap='gray', vmin=0, vmax=np.percentile(diffs, 99))
91
+ axes[i].set_title(f'QF = {qf_values[i]}')
92
+ axes[i].axis('off')
93
+ for i in range(num_plots, len(axes)):
94
+ axes[i].axis('off')
95
+ plt.tight_layout(rect=[0, 0.03, 1, 0.95])
96
+
97
+ return im_composite, fig
98
+
99
+ # --- Build the Gradio Interface in a function ---
100
+
101
+ def create_ui():
102
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
103
+ gr.Markdown("# 🕵️ JPEG Double Compression Analyzer")
104
+ gr.Markdown(
105
+ "**Instructions:**\n"
106
+ "1. **Upload** an image.\n"
107
+ "2. **Click** on the image to move the 256x256 selection box.\n"
108
+ "3. Press **Analyze Image** to process the selected region."
109
+ )
110
+
111
+ original_image_state = gr.State()
112
+ box_coords_state = gr.State()
113
+
114
+ with gr.Row():
115
+ with gr.Column(scale=1):
116
+ gr.Markdown("### 1. Inputs")
117
+ image_display = gr.Image(type="numpy", label="Upload Image & Click to Select")
118
+ qf1_slider = gr.Slider(minimum=1, maximum=100, value=70, step=1, label="QF1: Background Quality")
119
+ qf2_slider = gr.Slider(minimum=1, maximum=100, value=85, step=1, label="QF2: Final Composite Quality")
120
+ gr.Markdown("#### Analysis QF Range")
121
+ with gr.Row():
122
+ qf_start_slider = gr.Slider(minimum=50, maximum=100, value=50, step=5, label="Start")
123
+ qf_end_slider = gr.Slider(minimum=50, maximum=100, value=90, step=5, label="End")
124
+ analyze_button = gr.Button("Analyze Image", variant="primary")
125
+
126
+ with gr.Column(scale=2):
127
+ gr.Markdown("### 2. Results")
128
+ composite_image_display = gr.Image(type="numpy", label="Generated Composite Image")
129
+ difference_plot_display = gr.Plot(label="Difference Maps")
130
+
131
+ # Event Listeners
132
+ image_display.upload(
133
+ fn=on_upload_image,
134
+ inputs=[image_display],
135
+ outputs=[image_display, original_image_state, box_coords_state]
136
+ )
137
+
138
+ image_display.select(
139
+ fn=move_selection_box,
140
+ inputs=[original_image_state],
141
+ outputs=[image_display, box_coords_state]
142
+ )
143
+
144
+ analyze_button.click(
145
+ fn=run_analysis,
146
+ inputs=[original_image_state, box_coords_state, qf1_slider, qf2_slider, qf_start_slider, qf_end_slider],
147
+ outputs=[composite_image_display, difference_plot_display]
148
+ )
149
+ return demo
150
+
151
+ # --- Remove the launch() call ---
152
+ # if __name__ == "__main__":
153
+ # demo = create_ui()
154
+ # demo.launch(debug=True)
PRNU.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import imageio.v2 as imageio
5
+ import tempfile
6
+ import os
7
+
8
+ # --- Import your custom source files ---
9
+ # This assumes a 'utils' directory is in the root of the HF Space
10
+ try:
11
+ import utils.src.Functions as Fu
12
+ import utils.src.Filter as Ft
13
+ import utils.src.maindir as md
14
+ except ImportError:
15
+ print("Warning: Could not import 'utils.src' modules.")
16
+ print("Please ensure the 'utils' directory is present in your repository.")
17
+ # Define dummy functions so the app can at least load
18
+ class DummyModule:
19
+ def __getattr__(self, name):
20
+ def dummy_func(*args, **kwargs):
21
+ raise ImportError(f"Module 'utils.src' not loaded. '{name}' is unavailable.")
22
+ return dummy_func
23
+ Fu = DummyModule()
24
+ Ft = DummyModule()
25
+ md = DummyModule()
26
+
27
+
28
+ # --- App Description ---
29
+ description = """
30
+ # 📸 PRNU-Based Image Forgery Detector
31
+
32
+ This tool analyzes an image to detect potential manipulations using Photo-Response Non-Uniformity (PRNU), a unique noise pattern that acts as a camera's fingerprint.
33
+
34
+ ## How it Works
35
+ 1. **Camera Fingerprint**: Every digital camera sensor has a unique, systematic noise pattern called PRNU. We use a pre-extracted fingerprint file (`.dat`) for a specific camera.
36
+ 2. **Image Analysis**: The tool extracts the noise residual from the uploaded image.
37
+ 3. **Correlation**: It then compares the image's noise with the camera's fingerprint by calculating the Peak-to-Correlation Energy (PCE) across different blocks of the image.
38
+ 4. **PCE Map**: The output PCE map visualizes this correlation. High PCE values (brighter areas) suggest that this part of the image was likely taken by the fingerprinted camera. Dark or inconsistent areas could indicate tampering or that the image was taken with a different device.
39
+
40
+ ![Diagram of the PRNU process](https://www.researchgate.net/profile/Feyisetan-Ojerinde/publication/323982885/figure/fig1/AS:607997380104192@1521971778119/Block-diagram-of-PRNU-based-source-camera-identification-process.png)
41
+
42
+ ## How to Use
43
+ 1. Upload the camera's fingerprint file (`.dat` format).
44
+ 2. Upload the JPG/PNG image you want to analyze.
45
+ 3. Click **Submit** and view the resulting PCE map.
46
+ """
47
+
48
+ # --- Main Analysis Function ---
49
+ def analyze_image_forgery(fingerprint_file, input_image):
50
+ """
51
+ Processes an image against a camera fingerprint to generate a PCE map.
52
+ """
53
+ if fingerprint_file is None:
54
+ raise gr.Error("Please upload a camera fingerprint (.dat file).")
55
+ if input_image is None:
56
+ raise gr.Error("Please upload an image to analyze.")
57
+
58
+ try:
59
+ # --- 1. Load Camera Fingerprint ---
60
+ print("Loading camera fingerprint...")
61
+ Fingerprint = np.genfromtxt(fingerprint_file.name)
62
+ print(f"Fingerprint loaded. Shape: {Fingerprint.shape}")
63
+
64
+ # --- 2. Save uploaded image to a temporary file ---
65
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as temp_img_file:
66
+ temp_img_path = temp_img_file.name
67
+ imageio.imwrite(temp_img_path, input_image)
68
+
69
+ # --- 3. Extract and filter PRNU noise from the image ---
70
+ print("Extracting noise from image...")
71
+ Noisex = Ft.NoiseExtractFromImage(temp_img_path, sigma=2.)
72
+ Noisex = Fu.WienerInDFT(Noisex, np.std(Noisex))
73
+ print(f"Noise extracted. Shape: {Noisex.shape}")
74
+
75
+ # Clean up the temporary image file
76
+ os.remove(temp_img_path)
77
+
78
+ # --- 4. Align Fingerprint and PRNU sizes by padding if necessary ---
79
+ if Noisex.shape != Fingerprint.shape:
80
+ print("Shapes do not match. Padding PRNU noise to match fingerprint size.")
81
+ Noisex_padded = np.zeros_like(Fingerprint)
82
+ h = min(Noisex.shape[0], Fingerprint.shape[0])
83
+ w = min(Noisex.shape[1], Fingerprint.shape[1])
84
+ Noisex_padded[:h, :w] = Noisex[:h, :w]
85
+ Noisex = Noisex_padded
86
+
87
+ # --- 5. Compute PCE Map in blocks ---
88
+ print("Computing PCE map...")
89
+ block_size = 64
90
+ blocks_x = np.arange(0, Noisex.shape[0], block_size)
91
+ blocks_y = np.arange(0, Noisex.shape[1], block_size)
92
+ PCE_map = np.zeros((len(blocks_x), len(blocks_y)))
93
+
94
+ for y_idx, y_start in enumerate(blocks_y):
95
+ for x_idx, x_start in enumerate(blocks_x):
96
+ block_Noisex = Noisex[x_start:x_start+block_size, y_start:y_start+block_size]
97
+ block_Fingerprint = Fingerprint[x_start:x_start+block_size, y_start:y_start+block_size]
98
+
99
+ # Skip if blocks are not of the expected size (can happen at edges)
100
+ if block_Noisex.shape != (block_size, block_size):
101
+ continue
102
+
103
+ C = Fu.crosscorr(block_Noisex, block_Fingerprint)
104
+ det, _ = md.PCE(C)
105
+ PCE_map[x_idx, y_idx] = det.get('PCE', 0) # Use .get for safety
106
+
107
+ print("PCE map computed successfully.")
108
+
109
+ # --- 6. Generate Output Plots ---
110
+ # Plot 1: PCE Map
111
+ fig1, ax1 = plt.subplots(figsize=(8, 6))
112
+ im = ax1.imshow(PCE_map, cmap='viridis')
113
+ ax1.set_title('Detection PCE-map')
114
+ fig1.colorbar(im, ax=ax1, label='PCE Value')
115
+
116
+ # Plot 2: Original Image
117
+ fig2, ax2 = plt.subplots(figsize=(8, 6))
118
+ ax2.imshow(input_image)
119
+ ax2.set_title('Analyzed Image')
120
+ ax2.axis('off')
121
+
122
+ return fig1, fig2
123
+
124
+ except ImportError as e:
125
+ print(f"ImportError: {e}")
126
+ raise gr.Error("Missing 'utils' module. Please ensure the 'utils' directory is in the repository.")
127
+ except Exception as e:
128
+ print(f"An error occurred: {e}")
129
+ raise gr.Error(f"An error occurred during analysis: {e}")
130
+
131
+
132
+ # --- Create the Gradio Interface in a function ---
133
+ def create_ui():
134
+ iface = gr.Interface(
135
+ fn=analyze_image_forgery,
136
+ inputs=[
137
+ gr.File(label="Upload Camera Fingerprint (.dat file)"),
138
+ gr.Image(type="numpy", label="Upload Image to Analyze")
139
+ ],
140
+ outputs=[
141
+ gr.Plot(label="PCE Map"),
142
+ gr.Plot(label="Analyzed Image")
143
+ ],
144
+ title="📸 PRNU-Based Image Forgery Detector",
145
+ description=description,
146
+ theme=gr.themes.Soft()
147
+ )
148
+ return iface
149
+
150
+ # --- Remove the launch() call ---
151
+ # if __name__ == "__main__":
152
+ # iface = create_ui()
153
+ # iface.launch()
README.md CHANGED
@@ -1,13 +1,26 @@
 
 
 
 
 
 
1
  ---
2
- title: Cfa Forensics Tool
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Digital Image Forensics Toolkit 🕵️‍♂️
2
+
3
+ This space provides a collection of fundamental digital forensic algorithms designed to detect "cheap fakes"—image manipulations created with standard editing software.
4
+
5
+ These methods are effective for identifying traditional forgeries such as splicing, copy-move, and inconsistent lighting. Select a tool from the tabs above to get started.
6
+
7
  ---
 
 
 
 
 
 
 
 
 
 
8
 
9
+ ## Forensic Tools
10
+
11
+ This toolkit includes the following analysis methods:
12
+
13
+ ### 🎨 Color Filter Array Analysis (`CFA.py`)
14
+ Analyzes artifacts introduced during the camera's raw image processing. Inconsistencies in the **Color Filter Array (CFA)** interpolation pattern can reveal areas that have been spliced from another image or copy-pasted within the same image (copy-move).
15
+
16
+ ### 👻 JPEG Ghost Detection (`JPEG_Ghost.py`)
17
+ Detects forgeries by identifying regions within an image that were compressed with different **JPEG Quality Factors (QF)**. When an area from a different JPEG image is spliced in, it often carries the "ghost" of its original compression level, which this tool can reveal.
18
+
19
+ ### 📸 PRNU Analysis (`PRNU.py`)
20
+ Uses the **Photo Response Non-Uniformity (PRNU)** pattern, a unique noise fingerprint inherent to every digital camera sensor. By comparing the PRNU of an image against a camera's known fingerprint, this tool can identify tampered regions (splicing, copy-move) that do not share the same origin. While typically used for source camera identification, local correlation analysis makes it a powerful forgery detector.
21
+
22
+ ### ☀️ Shadow Consistency Analysis (`shadows.py`)
23
+ A utility for verifying geometric consistency of shadows in an image. By projecting vanishing points, it helps determine if all shadows correspond to a single, coherent light source. This method is based on principles of perspective and can be useful for analyzing both traditional manipulations and AI-generated images.
24
+
25
+
26
+
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Import the UI-creation functions from your tool scripts
4
+ import CFA as CFA_tool
5
+ import JPEG_Ghost as JPEG_Ghost_tool
6
+ import PRNU as PRNU_tool
7
+ import shadow as shadows_tool
8
+
9
+ # Create the tabbed interface
10
+ demo = gr.TabbedInterface(
11
+ interface_list=[
12
+ CFA_tool.create_ui(),
13
+ JPEG_Ghost_tool.create_ui(),
14
+ PRNU_tool.create_ui(),
15
+ shadows_tool.build_gradio_interface()
16
+ ],
17
+ tab_names=[
18
+ "🎨 CFA Analysis",
19
+ "👻 JPEG Ghost",
20
+ "📸 PRNU Analysis",
21
+ "☀️ Shadow Analysis"
22
+ ],
23
+ title="Digital Image Forensics Toolkit 🕵️‍♂️"
24
+ )
25
+
26
+ # Launch the app
27
+ if __name__ == "__main__":
28
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ asttokens==2.4.1
2
+ backcall==0.2.0
3
+ colorama==0.4.6
4
+ comm==0.2.2
5
+ contourpy==1.1.1
6
+ cycler==0.12.1
7
+ debugpy==1.8.8
8
+ decorator==5.1.1
9
+ executing==2.1.0
10
+ fonttools==4.55.0
11
+ imagecodecs==2023.3.16
12
+ imageio==2.35.1
13
+ importlib-metadata==8.5.0
14
+ importlib-resources==6.4.5
15
+ ipykernel==6.29.5
16
+ ipython==8.12.3
17
+ jedi==0.19.2
18
+ jupyter-client==8.6.3
19
+ jupyter-core==5.7.2
20
+ kiwisolver==1.4.7
21
+ lazy-loader==0.4
22
+ matplotlib==3.7.5
23
+ matplotlib-inline==0.1.7
24
+ nest-asyncio==1.6.0
25
+ networkx==3.1
26
+ numpy==1.24.4
27
+ opencv-python==4.10.0.84
28
+ packaging==24.2
29
+ parso==0.8.4
30
+ pickleshare==0.7.5
31
+ pillow==10.4.0
32
+ platformdirs==4.3.6
33
+ prompt-toolkit==3.0.48
34
+ psutil==6.1.0
35
+ pure-eval==0.2.3
36
+ pygments==2.18.0
37
+ pyparsing==3.1.4
38
+ python-dateutil==2.9.0.post0
39
+ PyWavelets==1.4.1
40
+ scikit-image==0.21.0
41
+ scipy==1.10.1
42
+ six==1.16.0
43
+ tornado==6.4.1
44
+ gradio
45
+ imageio.v2
shadow.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio app module for interactive vanishing-point selection.
3
+ This file is intended to be imported by app.py.
4
+ """
5
+
6
+ import io
7
+ import math
8
+ import numpy as np
9
+ from PIL import Image, ImageDraw, ImageFont
10
+ import gradio as gr
11
+ from scipy.optimize import minimize
12
+
13
+ # ------------------------ Helper math functions ---------------------------
14
+
15
+ def build_line_from_points(p1, p2):
16
+ """Return line coefficients (A, B, C) for Ax + By + C = 0 given two points."""
17
+ x1, y1 = p1
18
+ x2, y2 = p2
19
+ a = y1 - y2
20
+ b = x2 - x1
21
+ c = x1 * y2 - y1 * x2
22
+ return np.array([a, b, c], dtype=float)
23
+
24
+
25
+ def distance_point_to_line(pt, line):
26
+ x, y = pt
27
+ a, b, c = line
28
+ return abs(a * x + b * y + c) / math.hypot(a, b)
29
+
30
+
31
+ def total_distances(x, lines, noise_lines):
32
+ """Sum of distances from candidate point x to all lines and noise lines."""
33
+ pt = x
34
+ s = 0.0
35
+ for L in lines:
36
+ s += distance_point_to_line(pt, L)
37
+ for Ln in noise_lines:
38
+ s += distance_point_to_line(pt, Ln)
39
+ return s
40
+
41
+
42
+ def add_noise_lines_for_line(p1, p2, n=4, sigma=1.0):
43
+ """Create a list of "noise" lines by jittering the endpoints slightly."""
44
+ noise_lines = []
45
+ for _ in range(n):
46
+ p1n = (p1[0] + np.random.normal(0, sigma), p1[1] + np.random.normal(0, sigma))
47
+ p2n = (p2[0] + np.random.normal(0, sigma), p2[1] + np.random.normal(0, sigma))
48
+ noise_lines.append(build_line_from_points(p1n, p2n))
49
+ return noise_lines
50
+
51
+ # ------------------------- Drawing utilities ------------------------------
52
+
53
+ def draw_overlay(base_pil, yellow_lines, red_lines, yellow_points, red_points, vps=None):
54
+ """Return a new PIL image with overlays drawn: lines, points and vanishing points.
55
+
56
+ - yellow_lines, red_lines: lists of line coefficients
57
+ - yellow_points, red_points: lists of tuples (p1, p2) for each line
58
+ - vps: dict with keys 'yellow' and 'red' for vanishing points (x,y)
59
+ """
60
+ if isinstance(base_pil, np.ndarray):
61
+ img = Image.fromarray(base_pil).convert("RGBA")
62
+ else:
63
+ img = base_pil.copy().convert("RGBA")
64
+
65
+ draw = ImageDraw.Draw(img)
66
+
67
+ # helpers
68
+ def draw_point(pt, color, r=4):
69
+ x, y = pt
70
+ draw.ellipse((x - r, y - r, x + r, y + r), fill=color, outline=color)
71
+
72
+ def draw_line_by_points(p1, p2, color, width=2, dash=False):
73
+ # we just draw a straight segment connecting endpoints
74
+ if dash:
75
+ # dashed line: draw small segments
76
+ x1, y1 = p1
77
+ x2, y2 = p2
78
+ segs = 40
79
+ for i in range(segs):
80
+ t0 = i / segs
81
+ t1 = (i + 0.5) / segs
82
+ xa = x1 * (1 - t0) + x2 * t0
83
+ ya = y1 * (1 - t0) + y2 * t0
84
+ xb = x1 * (1 - t1) + x2 * t1
85
+ yb = y1 * (1 - t1) + y2 * t1
86
+ draw.line((xa, ya, xb, yb), fill=color, width=width)
87
+ else:
88
+ draw.line((p1[0], p1[1], p2[0], p2[1]), fill=color, width=width)
89
+
90
+ # Draw yellow lines
91
+ for idx, ((p1, p2), L) in enumerate(zip(yellow_points, yellow_lines)):
92
+ # draw long extents of line by projecting to image bounds
93
+ draw_line_segment_from_line(L, img.size, color=(255, 215, 0, 200), draw=draw)
94
+ draw_point(p1, (255, 215, 0, 255))
95
+ draw_point(p2, (255, 215, 0, 255))
96
+
97
+ # Draw red lines
98
+ for idx, ((p1, p2), L) in enumerate(zip(red_points, red_lines)):
99
+ draw_line_segment_from_line(L, img.size, color=(255, 64, 64, 200), draw=draw)
100
+ draw_point(p1, (255, 64, 64, 255))
101
+ draw_point(p2, (255, 64, 64, 255))
102
+
103
+ # Draw vanishing points if present
104
+ if vps is not None:
105
+ if "yellow" in vps and vps["yellow"] is not None:
106
+ draw_point(vps["yellow"], (255, 215, 0, 255), r=6)
107
+ if "red" in vps and vps["red"] is not None:
108
+ draw_point(vps["red"], (255, 64, 64, 255), r=6)
109
+
110
+ return img.convert("RGB")
111
+
112
+
113
+ def draw_line_segment_from_line(line, image_size, draw=None, color=(255, 255, 0, 255)):
114
+ """Given line coefficients and image size, draw a segment across the image bounds.
115
+ This draws directly using ImageDraw if 'draw' is provided.
116
+ """
117
+ W, H = image_size
118
+ a, b, c = line
119
+ points = []
120
+ # intersection with left edge x=0
121
+ if abs(b) > 1e-9:
122
+ y = -(a * 0 + c) / b
123
+ points.append((0, y))
124
+ # right edge x=W
125
+ if abs(b) > 1e-9:
126
+ y = -(a * W + c) / b
127
+ points.append((W, y))
128
+ # top edge y=0 --> a x + c = 0
129
+ if abs(a) > 1e-9:
130
+ x = -(b * 0 + c) / a
131
+ points.append((x, 0))
132
+ # bottom edge y=H
133
+ if abs(a) > 1e-9:
134
+ x = -(b * H + c) / a
135
+ points.append((x, H))
136
+
137
+ # keep only points within the image bounds
138
+ pts_in = [(x, y) for (x, y) in points if -W * 0.1 <= x <= W * 1.1 and -H * 0.1 <= y <= H * 1.1]
139
+ if len(pts_in) >= 2 and draw is not None:
140
+ # pick two extreme points
141
+ # sort by x coordinate
142
+ pts_in = sorted(pts_in, key=lambda p: (p[0], p[1]))
143
+ pA = pts_in[0]
144
+ pB = pts_in[-1]
145
+ draw.line((pA[0], pA[1], pB[0], pB[1]), fill=color, width=2)
146
+
147
+ # ------------------------- Gradio app callbacks ---------------------------
148
+
149
+ def init_states():
150
+ return None, [], [], [], [], []
151
+
152
+
153
+ def on_mode_change(mode, image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
154
+ """Switch drawing mode between 'yellow', 'red' or None.
155
+ Returns image (unchanged) and updated states.
156
+ """
157
+ # Just update the mode state. Clear any pending single point.
158
+ return (image, mode, [], y_lines, r_lines, y_pairs, r_pairs)
159
+
160
+
161
+ def on_image_select(sel: gr.SelectData, image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
162
+ """Called when user clicks on the image. sel.index gives (x, y) in pixels.
163
+ """
164
+ if image is None:
165
+ gr.Warning("Please upload an image first.")
166
+ return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
167
+
168
+ if sel is None:
169
+ return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
170
+
171
+ idx = getattr(sel, "index", None)
172
+ if idx is None:
173
+ idx = getattr(sel, "data", None) or getattr(sel, "value", None)
174
+ if not idx:
175
+ return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
176
+
177
+ x, y = int(idx[0]), int(idx[1])
178
+
179
+ # append to current_points
180
+ current_points = list(current_points) if current_points is not None else []
181
+ current_points.append((x, y))
182
+
183
+ # if we have two points, create a line
184
+ if len(current_points) >= 2 and current_mode in ("yellow", "red"):
185
+ p1 = current_points[-2]
186
+ p2 = current_points[-1]
187
+ L = build_line_from_points(p1, p2)
188
+ if current_mode == "yellow":
189
+ y_lines = list(y_lines) if y_lines is not None else []
190
+ y_pairs = list(y_pairs) if y_pairs is not None else []
191
+ y_lines.append(L)
192
+ y_pairs.append((p1, p2))
193
+ else:
194
+ r_lines = list(r_lines) if r_lines is not None else []
195
+ r_pairs = list(r_pairs) if r_pairs is not None else []
196
+ r_lines.append(L)
197
+ r_pairs.append((p1, p2))
198
+
199
+ # Clear current points after forming a line
200
+ current_points = []
201
+
202
+
203
+ # redraw overlay image
204
+ base_pil = Image.fromarray(image) if isinstance(image, np.ndarray) else image
205
+ out = draw_overlay(base_pil, y_lines or [], r_lines or [], y_pairs or [], r_pairs or [], vps=None)
206
+
207
+ return out, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
208
+
209
+
210
+ def compute_vanishing_points(image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
211
+ """Compute vanishing points for both color groups, draw them and return annotated image.
212
+ """
213
+ if image is None:
214
+ gr.Warning("Please upload an image and draw lines first.")
215
+ return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
216
+
217
+ img_pil = Image.fromarray(image) if isinstance(image, np.ndarray) else image
218
+
219
+ vps = {"yellow": None, "red": None}
220
+
221
+ # process yellow group
222
+ if y_lines and len(y_lines) > 1:
223
+ lines_arr = np.array(y_lines)
224
+ inters = []
225
+ for i in range(len(lines_arr) - 1):
226
+ for j in range(i + 1, len(lines_arr)):
227
+ try:
228
+ ip = np.linalg.solve(np.array([[lines_arr[i][0], lines_arr[i][1]],[lines_arr[j][0], lines_arr[j][1]]]),
229
+ -np.array([lines_arr[i][2], lines_arr[j][2]]))
230
+ inters.append(ip)
231
+ except Exception:
232
+ pass
233
+ if inters:
234
+ p0 = np.mean(inters, axis=0)
235
+ else:
236
+ p0 = np.array([img_pil.width / 2, img_pil.height / 2])
237
+
238
+ noise = []
239
+ for (p1, p2) in y_pairs:
240
+ noise += add_noise_lines_for_line(p1, p2, n=4, sigma=2.0)
241
+
242
+ res = minimize(lambda x: total_distances(x, lines_arr, noise), p0, method='Powell')
243
+ vps['yellow'] = (float(res.x[0]), float(res.x[1]))
244
+
245
+ # process red group
246
+ if r_lines and len(r_lines) > 1:
247
+ lines_arr = np.array(r_lines)
248
+ inters = []
249
+ for i in range(len(lines_arr) - 1):
250
+ for j in range(i + 1, len(lines_arr)):
251
+ try:
252
+ ip = np.linalg.solve(np.array([[lines_arr[i][0], lines_arr[i][1]],[lines_arr[j][0], lines_arr[j][1]]]),
253
+ -np.array([lines_arr[i][2], lines_arr[j][2]]))
254
+ inters.append(ip)
255
+ except Exception:
256
+ pass
257
+ if inters:
258
+ p0 = np.mean(inters, axis=0)
259
+ else:
260
+ p0 = np.array([img_pil.width / 2, img_pil.height / 2])
261
+
262
+ noise = []
263
+ for (p1, p2) in r_pairs:
264
+ noise += add_noise_lines_for_line(p1, p2, n=4, sigma=2.0)
265
+
266
+ res = minimize(lambda x: total_distances(x, lines_arr, noise), p0, method='Powell')
267
+ vps['red'] = (float(res.x[0]), float(res.x[1]))
268
+
269
+ out = draw_overlay(img_pil, y_lines or [], r_lines or [], y_pairs or [], r_pairs or [], vps=vps)
270
+ # Return state, clearing current_points
271
+ return out, current_mode, [], y_lines, r_lines, y_pairs, r_pairs
272
+
273
+
274
+ def on_upload(image):
275
+ # When a new image is uploaded, reset all states
276
+ return image, None, [], [], [], [], []
277
+
278
+ # ------------------------------ Build Blocks ------------------------------
279
+
280
+ def build_gradio_interface():
281
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
282
+ gr.Markdown("# ☀️ Shadow Vanishing-Point Picker")
283
+ with gr.Row():
284
+ img_in = gr.Image(label="Upload image and then click to add points", type="numpy", interactive=True, height=600)
285
+ with gr.Column(scale=1):
286
+ start_y = gr.Button("Start Yellow Line")
287
+ start_r = gr.Button("Start Red Line")
288
+ none_btn = gr.Button("Stop Drawing")
289
+ compute_btn = gr.Button("Compute Vanishing Points", variant="primary")
290
+ reset_btn = gr.Button("Reset All")
291
+ gr.Markdown("Click the image to add points. Two points make one line. Add at least 2 lines per color group to compute a vanishing point.")
292
+
293
+ # states
294
+ current_mode = gr.State(None)
295
+ current_points = gr.State([])
296
+ y_lines = gr.State([])
297
+ r_lines = gr.State([])
298
+ y_pairs = gr.State([])
299
+ r_pairs = gr.State([])
300
+
301
+ # Original image state to allow reseting
302
+ original_image = gr.State()
303
+
304
+ # link buttons to mode change
305
+ start_y.click(on_mode_change, inputs=[gr.State("yellow"), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
306
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
307
+ start_r.click(on_mode_change, inputs=[gr.State("red"), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
308
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
309
+ none_btn.click(on_mode_change, inputs=[gr.State(None), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
310
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
311
+
312
+ # image select event
313
+ img_in.select(on_image_select, inputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
314
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
315
+
316
+ compute_btn.click(compute_vanishing_points, inputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
317
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
318
+
319
+ # Store original image on upload
320
+ img_in.upload(
321
+ lambda img: (img, img, None, [], [], [], [], []), # Reset all states on new upload
322
+ inputs=[img_in],
323
+ outputs=[img_in, original_image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs]
324
+ )
325
+
326
+ # Reset button restores the original image and clears states
327
+ reset_btn.click(
328
+ lambda img: (img, None, [], [], [], [], []), # Reset all states
329
+ inputs=[original_image],
330
+ outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs]
331
+ )
332
+
333
+ return demo
334
+
335
+ # --- Remove the launch() call ---
336
+ # if __name__ == '__main__':
337
+ # demo = build_gradio_interface()
338
+ # demo.queue()
339
+ # demo.launch()
utils/src/Filter.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Please read the copyright notice located on the readme file (README.md).
3
+ """
4
+ import cv2 as cv
5
+ import numpy as np
6
+ from scipy import signal
7
+ import utils.src.Functions as Fu
8
+
9
+
10
+ def Threshold(y, t):
11
+ """
12
+ Applies max(0,y-t).
13
+
14
+ Parameters
15
+ ----------
16
+ y : numpy.ndarray('float')
17
+ Array of Wavelet coefficients
18
+ t : float
19
+ Variance of PRNU
20
+
21
+ Returns
22
+ -------
23
+ numpy.ndarray('float')
24
+ The thresholded Wavelet coefficients for a later filtering
25
+
26
+ """
27
+ res = y - t
28
+ x = np.maximum(res, 0.)
29
+ return x
30
+
31
+
32
+ def WaveNoise(coef, NoiseVar):
33
+ """
34
+ Applies Wiener-like filter in Wavelet Domain (residual filtering).
35
+
36
+ Models each detail wavelet coefficient as conditional Gaussian random
37
+ variable and use four square NxN moving windows, N in [3,5,7,9], to
38
+ estimate the variance of noise-free image for each wavelet coefficient.
39
+ Then it applies a Wiener-type denoising filter to the coefficients.
40
+
41
+ Parameters
42
+ ----------
43
+ coef : numpy.ndarray('float')
44
+ Wavelet detailed coefficient at certain level
45
+ NoiseVar : float
46
+ Variance of the additive noise (PRNU)
47
+
48
+ Returns
49
+ -------
50
+ numpy.ndarray('float')
51
+ Attenuated (filtered) Wavelet coefficient
52
+ numpy.ndarray('float')
53
+ Finall estimated variances for each Wavelet coefficient
54
+ """
55
+
56
+ tc = np.power(coef, 2)
57
+ coefVar = Threshold(
58
+ signal.fftconvolve(tc, np.ones([3, 3], dtype=float) / (3 * 3), mode='same'),
59
+ NoiseVar)
60
+
61
+ for w in range(5, 9 + 1, 2):
62
+ EstVar = Threshold(
63
+ signal.fftconvolve(tc, np.ones([w, w], dtype=float) / (w * w), mode='same'),
64
+ NoiseVar)
65
+ coefVar = np.minimum(coefVar, EstVar)
66
+
67
+ # Wiener filter like attenuation
68
+ tc = np.multiply(coef, np.divide(NoiseVar, coefVar + NoiseVar))
69
+
70
+ return tc, coefVar
71
+
72
+ '''
73
+ def WaveFilter(coef, NoiseVar):
74
+ """
75
+ Applies Wiener-like filter in Wavelet Domain (image filtering).
76
+
77
+ Models each detail wavelet coefficient as conditional Gaussian random
78
+ variable and use four square NxN moving windows, N in [3,5,7,9], to
79
+ estimate the variance of noise-free image for each wavelet coefficient.
80
+ Then it applies a Wiener-type denoising filter to the coefficients.
81
+
82
+ Parameters
83
+ ----------
84
+ coef : numpy.ndarray('float')
85
+ Wavelet detailed coefficient at certain level
86
+ NoiseVar : float
87
+ Variance of the additive noise
88
+
89
+ Returns
90
+ -------
91
+ numpy.ndarray('float')
92
+ Attenuated (filtered) Wavelet coefficient
93
+ numpy.ndarray('float')
94
+ Finall estimated variances for each Wavelet coefficient
95
+ """
96
+
97
+ tc = np.power(coef, 2)
98
+ coefVar = Threshold(
99
+ signal.fftconvolve(np.ones([3, 3]) / (3. * 3.), tc, mode='valid'),
100
+ NoiseVar);
101
+
102
+ for w in range(5, 9 + 1, 2):
103
+ EstVar = Threshold(
104
+ signal.fftconvolve(np.ones([w, w]) / (w * w), tc, mode='valid'),
105
+ NoiseVar)
106
+ coefVar = min(coefVar, EstVar)
107
+
108
+ # Wiener filter like attenuation
109
+ tc = np.multiply(coef, np.divide(coefVar, coefVar + NoiseVar))
110
+
111
+ return tc, coefVar
112
+ '''
113
+
114
+ def NoiseExtractFromImage(image, sigma=3.0, color=False, noZM=False):
115
+ """
116
+ Estimates PRNU from one image
117
+
118
+ Parameters
119
+ ----------
120
+ image : str or numpy.ndarray('uint8')
121
+ either test image filename or numpy matrix of image
122
+ sigma : float
123
+ std of noise to be used for identicication
124
+ (recomended value between 2 and 3)
125
+ color : bool
126
+ for an RGB image, whether to extract noise for the three channels
127
+ separately (default: False)
128
+ noZM
129
+ whether to apply zero-mean to the extracted (filtered) noise
130
+
131
+ Returns
132
+ -------
133
+ numpy.ndarray('float')
134
+ extracted noise from the input image, a rough estimate of PRNU fingerprint
135
+
136
+ Example
137
+ -------
138
+ noise = NoiseExtractFromImage('DSC00123.JPG',2);
139
+
140
+ Reference
141
+ ---------
142
+ [1] M. Goljan, T. Filler, and J. Fridrich. Large Scale Test of Sensor
143
+ Fingerprint Camera Identification. In N.D. Memon and E.J. Delp and P.W. Wong and
144
+ J. Dittmann, editors, Proc. of SPIE, Electronic Imaging, Media Forensics and
145
+ Security XI, volume 7254, pages # 0I–01–0I–12, January 2009.
146
+
147
+ """
148
+
149
+ # ----- Parameters ----- #
150
+ L = 4 # number of wavelet decomposition levels (between 2-5 as well)
151
+ if isinstance(image, str):
152
+ X = cv.imread(image)
153
+ if np.ndim(X)==3: X = X[:,:,::-1] # BGR2RGB
154
+ else:
155
+ X = image
156
+ del image
157
+
158
+ M0, N0, three = X.shape
159
+ if X.dtype == 'uint8':
160
+ # convert to [0,255]
161
+ X = X.astype(float)
162
+ elif X.dtype == 'uint16':
163
+ X = X.astype(float) / 65535 * 255
164
+
165
+ qmf = [ .230377813309, .714846570553, .630880767930, -.027983769417,
166
+ -.187034811719, .030841381836, .032883011667, -.010597401785]
167
+ qmf /= np.linalg.norm(qmf)
168
+
169
+ if three != 3:
170
+ Noise = Fu.NoiseExtract(X, qmf, sigma, L)
171
+ else:
172
+ Noise = np.zeros(X.shape)
173
+ for j in range(3):
174
+ Noise[:, :, j] = Fu.NoiseExtract(X[:, :, j], qmf, sigma, L)
175
+ if not color:
176
+ Noise = Fu.rgb2gray1(Noise)
177
+ if noZM:
178
+ print('not removing the linear pattern')
179
+ else:
180
+ Noise, _ = Fu.ZeroMeanTotal(Noise)
181
+
182
+ #Noise = Noise.astype(float)
183
+
184
+ return Noise
185
+
186
+ #%% ----- 'mdwt' mex code ported to python -----#
187
+ def mdwt(x, h, L):
188
+ """
189
+ multi-level Discrete Wavelet Transform, implemented similar to
190
+ Rice Wavelet Toolbox (https://www.ece.rice.edu/dsp/software/rwt.shtml)
191
+
192
+ Parameters
193
+ ----------
194
+ X : numpy.ndarray('float')
195
+ 2D input image
196
+ h : list
197
+ db4 (D8) decomposition lowpass filter
198
+ L : Int
199
+ Number of levels for DWT decomposition
200
+
201
+ Returns
202
+ -------
203
+ numpy.ndarray('float')
204
+ input image in DWT domain
205
+
206
+ """
207
+
208
+ isint = lambda x: x % 1 == 0
209
+
210
+ m, n = x.shape[0], x.shape[1]
211
+ if m > 1:
212
+ mtest = m / (2.**L)
213
+ if not isint(mtest):
214
+ raise(ValueError("Number of rows in input image must be of size m*2^(L)"))
215
+ if n > 1:
216
+ ntest = n / (2.**L)
217
+ if not isint(ntest):
218
+ raise(ValueError("Number of columns in input image must be of size n*2^(L)"))
219
+
220
+
221
+ # -- internal --
222
+
223
+ def _fpsconv(x_in, lx, h0, h1, lhm1, x_outl, x_outh):
224
+ # circular-like padding
225
+ x_in[lx:lx+lhm1] = x_in[:lhm1]
226
+ #
227
+ tmp = np.convolve(x_in[:lx+lhm1],h0)
228
+ x_outl[:lx//2]= tmp[lhm1:-lhm1-1:2]
229
+ tmp = np.convolve(x_in[:lx+lhm1],h1)
230
+ x_outh[:lx//2]= tmp[lhm1:-lhm1-1:2]
231
+ '''
232
+ # or (as in the C++ implementation):
233
+ ind = 0
234
+ for i in range(0,lx,2):
235
+ x_outl[ind] = np.dot( x_in[i:i+lhm1+1], np.flip(h0) )
236
+ x_outh[ind] = np.dot( x_in[i:i+lhm1+1], np.flip(h1) )
237
+ ind += 1
238
+ '''
239
+ return x_in, x_outl, x_outh
240
+
241
+ def _MDWT(x, h, L):
242
+ lh = len(h)
243
+ _m, _n = x.shape[0], x.shape[1]
244
+ y = np.zeros([_m,_n], dtype=float)
245
+
246
+ xdummy = np.zeros([max(_m,_n) + lh-1], dtype=float)
247
+ ydummyl = np.zeros([max(_m,_n)], dtype=float)
248
+ ydummyh = np.zeros([max(_m,_n)], dtype=float)
249
+
250
+ # analysis lowpass and highpass
251
+ if _n == 1:
252
+ _n = _m
253
+ _m = 1
254
+
255
+ h0 = np.flip(h)
256
+ h1 = [h[i]*(-1)**(i+1) for i in range(lh)]
257
+ lhm1 = lh - 1
258
+ actual_m = 2 * _m
259
+ actual_n = 2 * _n
260
+
261
+ # main loop
262
+ for actual_L in range(1, L+1):
263
+ if _m == 1:
264
+ actual_m = 1
265
+ else:
266
+ actual_m = actual_m // 2
267
+ r_o_a = actual_m // 2
268
+ actual_n = actual_n // 2
269
+ c_o_a = actual_n // 2
270
+
271
+ # go by rows
272
+ for ir in range(actual_m):# loop over rows
273
+ # store in dummy variable
274
+ if actual_L == 1:
275
+ xdummy[:actual_n] = x[ir, :actual_n]# from input
276
+ else:
277
+ xdummy[:actual_n] = y[ir, :actual_n]# from LL of previous level
278
+ # perform filtering lowpass and highpass
279
+ xdummy, ydummyl, ydummyh = _fpsconv(xdummy, actual_n, h0, h1, lhm1, ydummyl, ydummyh)
280
+ # restore dummy variables in matrices
281
+ y[ir, :c_o_a ] = ydummyl[:c_o_a]
282
+ y[ir, c_o_a:2*c_o_a] = ydummyh[:c_o_a]
283
+
284
+
285
+ if _m > 1: # in case of a 2D signal
286
+ # go by columns
287
+ for ic in range(actual_n):# loop over column
288
+ # store in dummy variables
289
+ xdummy[:actual_m] = y[:actual_m, ic]
290
+ # perform filtering lowpass and highpass
291
+ xdummy, ydummyl, ydummyh = _fpsconv(xdummy, actual_m, h0, h1, lhm1, ydummyl, ydummyh)
292
+ # restore dummy variables in matrix
293
+ y[:r_o_a, ic] = ydummyl[:r_o_a]
294
+ y[r_o_a:2*r_o_a, ic] = ydummyh[:r_o_a]
295
+
296
+ return y
297
+
298
+ # --------------
299
+
300
+ y = _MDWT(x, h, L)
301
+
302
+ return y
303
+
304
+ #%% ----- 'midwt' mex code ported to python -----#
305
+ def midwt(y, h, L):
306
+ """
307
+ multi-level inverse Discrete Wavelet Transform, implemented similar to
308
+ Rice Wavelet Toolbox (https://www.ece.rice.edu/dsp/software/rwt.shtml)
309
+
310
+ Parameters
311
+ ----------
312
+ y : numpy.ndarray('float')
313
+ 2D matrix of image in multi-level DWT domain
314
+ h : list
315
+ db4 (D8) decomposition lowpass filter
316
+ L : Int
317
+ Number of levels for DWT decomposition
318
+
319
+ Returns
320
+ -------
321
+ numpy.ndarray('float')
322
+ input image in DWT domain
323
+
324
+ """
325
+
326
+ isint = lambda x: x % 1 == 0
327
+
328
+ m, n = y.shape[0], y.shape[1]
329
+ if m > 1:
330
+ mtest = m / (2.**L)
331
+ if not isint(mtest):
332
+ raise(ValueError("Number of rows in input image must be of size m*2^(L)"))
333
+ if n > 1:
334
+ ntest = n / (2.**L)
335
+ if not isint(ntest):
336
+ raise(ValueError("Number of columns in input image must be of size n*2^(L)"))
337
+
338
+ # -- internal --
339
+ def _bpsconv(x_out, lx, g0, g1, lhhm1, x_inl, x_inh):
340
+ x_inl[:lhhm1] = x_inl[lx:lx+lhhm1]
341
+ x_inh[:lhhm1] = x_inh[lx:lx+lhhm1]
342
+
343
+ tmp = np.convolve(x_inl[:lx+lhhm1+1], g0[::2]) + \
344
+ np.convolve(x_inh[:lx+lhhm1+1], g1[::2]);
345
+ x_out[:2*lx:2] = tmp[lhhm1:-lhhm1-1]
346
+
347
+ tmp = np.convolve(x_inl[:lx+lhhm1+1], g0[1::2]) + \
348
+ np.convolve(x_inh[:lx+lhhm1+1], g1[1::2])
349
+ x_out[1:2*lx:2] = tmp[lhhm1:-lhhm1-1]
350
+ '''
351
+ # or (as in the C++ implementation):
352
+ ind = 0
353
+ for i in range(lx):
354
+ x_out[ind] = np.dot(x_inl[i:i+lhhm1+1], np.flip(g0[::2])) + \
355
+ np.dot(x_inh[i:i+lhhm1+1], np.flip(g1[::2]))
356
+ x_out[ind+1] = np.dot(x_inl[i:i+lhhm1+1], np.flip(g0[1::2])) + \
357
+ np.dot(x_inh[i:i+lhhm1+1], np.flip(g1[1::2]))
358
+ ind += 2
359
+ '''
360
+ return x_out
361
+
362
+ def _MIDWT(y, h, L):
363
+ lh = len(h)
364
+ _m, _n = y.shape[0], y.shape[1]
365
+ xdummy = np.zeros([max(_m, _n)], dtype=float)
366
+ ydummyl = np.zeros([max(_m, _n)+lh//2-1], dtype=float)
367
+ ydummyh = np.zeros([max(_m, _n)+lh//2-1], dtype=float)
368
+
369
+ # synthesis lowpass and highpass
370
+ if _n == 1:
371
+ _n = _m
372
+ _m = 1
373
+
374
+ g0 = h
375
+ g1 = [h[lh-i-1]*((-1)**i) for i in range(lh)]
376
+ #lhm1 = lh - 1
377
+ lhhm1 = lh // 2 - 1
378
+
379
+ # 2^L
380
+ sample_f = 2**(L-1)
381
+
382
+ actual_m = _m // sample_f if _m > 1 else 1
383
+ actual_n = _n // sample_f
384
+
385
+ x = y
386
+
387
+ # main loop
388
+ for actual_L in range(L,0,-1):
389
+ r_o_a = actual_m // 2
390
+ c_o_a = actual_n // 2
391
+
392
+ # in case of a 2D signal
393
+ if _m > 1:
394
+ # go by columns
395
+ for ic in range(actual_n):# loop over column
396
+ # store in dummy variables
397
+ ydummyl[lhhm1:lhhm1+r_o_a] = x[:r_o_a, ic]
398
+ ydummyh[lhhm1:lhhm1+r_o_a] = x[r_o_a:2*r_o_a, ic]
399
+ # perform filtering lowpass and highpass
400
+ xdummy = _bpsconv(xdummy, r_o_a, g0, g1, lhhm1, ydummyl, ydummyh)
401
+ # restore dummy variables in matrix
402
+ x[:actual_m, ic] = xdummy[:actual_m]
403
+ # go by rows
404
+ for ir in range(actual_m):# loop over rows
405
+ # store in dummy variable
406
+ ydummyl[lhhm1:lhhm1+c_o_a] = x[ir, :c_o_a]
407
+ ydummyh[lhhm1:lhhm1+c_o_a] = x[ir, c_o_a:2*c_o_a]
408
+ # perform filtering lowpass and highpass
409
+ xdummy = _bpsconv(xdummy, c_o_a, g0, g1, lhhm1, ydummyl, ydummyh);
410
+ # restore dummy variables in matrices
411
+ x[ir, :actual_n] = xdummy[:actual_n]
412
+
413
+ actual_m = 1 if _m == 1 else actual_m * 2
414
+ actual_n = actual_n * 2
415
+
416
+ return x
417
+ # --------------
418
+
419
+ x = _MIDWT(y, h, L)
420
+
421
+ return x
422
+
utils/src/Functions.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Please read the copyright notice located on the readme file (README.md).
3
+ """
4
+ import numpy as np
5
+ from scipy import special
6
+ import utils.src.Filter as Ft
7
+
8
+
9
+ def crosscorr(array1, array2):
10
+ """
11
+ Computes 2D cross-correlation of two 2D arrays.
12
+
13
+ Parameters
14
+ ----------
15
+ array1 : numpy.ndarray
16
+ first 2D matrix
17
+ array2: numpy.ndarray
18
+ second 2D matrix
19
+
20
+ Returns
21
+ ------
22
+ numpy.ndarray('float64')
23
+ 2D cross-correlation matrix
24
+
25
+ """
26
+
27
+ array1 = array1.astype(np.double)
28
+ array2 = array2.astype(np.double)
29
+ array1 = array1 - array1.mean()
30
+ array2 = array2 - array2.mean()
31
+
32
+ ############### End of filtering
33
+ normalizator = np.sqrt(np.sum(np.power(array1,2))*np.sum(np.power(array2,2)))
34
+ tilted_array2 = np.fliplr(array2); del array2
35
+ tilted_array2 = np.flipud(tilted_array2)
36
+ TA = np.fft.fft2(tilted_array2); del tilted_array2
37
+ FA = np.fft.fft2(array1); del array1
38
+ AC = np.multiply(FA, TA); del FA, TA
39
+
40
+ if normalizator==0:
41
+ ret = None
42
+ else:
43
+ ret = np.real(np.fft.ifft2(AC))/normalizator
44
+ return ret
45
+
46
+ '''
47
+ def crosscorr(array1, array2):
48
+ # function ret = crosscor2(array1, array2)
49
+ # Computes 2D crosscorrelation of 2D arrays
50
+ # Function returns DOUBLE type 2D array
51
+ # No normalization applied
52
+
53
+ array1 = array1.astype(np.double)
54
+ array2 = array2.astype(np.double)
55
+ array1 = array1 - array1.mean()
56
+ array2 = array2 - array2.mean()
57
+
58
+ ############### End of filtering
59
+ tilted_array2 = np.fliplr(array2); del array2
60
+ tilted_array2 = np.flipud(tilted_array2)
61
+ TA = np.fft.fft2(tilted_array2); del tilted_array2
62
+ FA = np.fft.fft2(array1); del array1
63
+ FF = np.multiply(FA, TA); del FA, TA
64
+
65
+ ret = np.real(np.fft.ifft2(FF))
66
+ return ret
67
+ '''
68
+
69
+ def imcropmiddle(X, sizeout, preference='SE'):
70
+ """
71
+ Crops the middle portion of a given size.
72
+
73
+ Parameters
74
+ ----------
75
+ x : numpy.ndarray
76
+ 2D or 3D image matrix
77
+ sizeout: list
78
+ size of the output image
79
+
80
+ Returns
81
+ ------
82
+ numpy.ndarray
83
+ cropped image
84
+
85
+ """
86
+
87
+ if sizeout.__len__() >2:
88
+ sizeout = sizeout[:2]
89
+ if np.ndim(X)==2: X = X[...,np.newaxis]
90
+ M, N, three = X.shape
91
+ sizeout = [min(M,sizeout[0]), min(N,sizeout[1])]
92
+ # the cropped region is off center by 1/2 pixel
93
+ if preference == 'NW':
94
+ M0 = np.floor((M-sizeout[0])/2)
95
+ M1 = M0+sizeout[0]
96
+ N0 = np.floor((N-sizeout[1])/2)
97
+ N1 = N0+sizeout[1]
98
+ elif preference == 'SW':
99
+ M0 = np.ceil((M-sizeout[0])/2)
100
+ M1 = M0+sizeout[0]
101
+ N0 = np.floor((N-sizeout[1])/2)
102
+ N1 = N0+sizeout[1]
103
+ elif preference == 'NE':
104
+ M0 = np.floor((M-sizeout[0])/2)
105
+ M1 = M0+sizeout[0]
106
+ N0 = np.ceil((N-sizeout[1])/2)
107
+ N1 = N0+sizeout[1]
108
+ elif preference == 'SE':
109
+ M0 = np.ceil((M-sizeout[0])/2)
110
+ M1 = M0+sizeout[0]
111
+ N0 = np.ceil((N-sizeout[1])/2)
112
+ N1 = N0+sizeout[1]
113
+ X = X[M0:M1+1,N0:N1+1,:]
114
+ return X
115
+
116
+
117
+ def IntenScale(inp):
118
+ """
119
+ Scales input pixels to be used as a multiplicative model for PRNU detector.
120
+
121
+ Parameters
122
+ ----------
123
+ x : numpy.ndarray('uint8')
124
+ 2D or 3D image matrix
125
+
126
+ Returns
127
+ ------
128
+ numpy.ndarray('float')
129
+ Matrix of pixel intensities in to be used in a multiplicative model
130
+ for PRNU.
131
+
132
+ """
133
+
134
+ T = 252.
135
+ v = 6.
136
+ out = np.exp(-1*np.power(inp-T,2)/v)
137
+
138
+ out[inp < T] = inp[inp < T]/T
139
+
140
+ return out
141
+
142
+
143
+ def LinearPattern(X):
144
+ """
145
+ Output column and row means from all 4 subsignals, subsampling by 2.
146
+
147
+ Parameters
148
+ ----------
149
+ x : numpy.ndarray('float')
150
+ 2D noise matrix
151
+
152
+ Returns
153
+ -------
154
+ dict
155
+ A dictionary with the following items:
156
+ row means as LP.r11, LP.r12, LP.r21, LP.r22 (column vectors)
157
+ column means as LP.c11, LP.c12, LP.c21, LP.c22 (row vectors)
158
+
159
+ numpy.ndarray('float')
160
+ The difference between input X and ZeroMean(X); i.e. X-output would be
161
+ the zero-meaned version of X
162
+
163
+ """
164
+
165
+ M, N = X.shape
166
+ me = X.mean()
167
+ X = X-me
168
+
169
+ #LP = {"r11":[],"c11":[],"r12":[],"c12":[],"r21":[],"c21":[],"r22":[],"c22":[],"me":[],"cm":[]}
170
+ LP = dict(r11=[], c11=[], r12=[], c12=[], r21=[], c21=[], r22=[], c22=[], me=[], cm=[])
171
+ LP['r11'] = np.mean(X[::2,::2],axis=1)
172
+ LP['c11'] = np.mean(X[::2,::2],axis=0)
173
+ cm11 = np.mean(X[::2,::2])
174
+ LP['r12'] = np.mean(X[::2,1::2],axis=1)
175
+ LP['c12'] = np.mean(X[::2,1::2],axis=0)
176
+ cm12 = np.mean(X[::2,1::2]) # = -cm Assuming mean2(X)==0
177
+ LP['r21'] = np.mean(X[1::2,::2],axis=1)
178
+ LP['c21'] = np.mean(X[1::2,::2],axis=0)
179
+ cm21 = np.mean(X[1::2,::2]) # = -cm Assuming mean2(X)==0
180
+ LP['r22'] = np.mean(X[1::2,1::2],axis=1)
181
+ LP['c22'] = np.mean(X[1::2,1::2],axis=0)
182
+ cm22 = np.mean(X[1::2,1::2]) # = cm Assuming mean2(X)==0
183
+ LP['me'] = me
184
+ LP['cm'] = [cm11,cm12,cm21,cm22]
185
+
186
+ del X
187
+ D = np.zeros([M,N],dtype=np.double)
188
+ [aa,bb] = np.meshgrid(LP["c11"],LP["r11"],indexing='ij')
189
+ D[::2,::2] = aa+bb+me-cm11
190
+ [aa,bb] = np.meshgrid(LP["c12"],LP["r12"],indexing='ij')
191
+ D[::2,1::2] = aa+bb+me-cm12
192
+ [aa,bb] = np.meshgrid(LP["c21"],LP["r21"],indexing='ij')
193
+ D[1::2,::2] = aa+bb+me-cm21
194
+ [aa,bb] = np.meshgrid(LP["c22"],LP["r22"],indexing='ij')
195
+ D[1::2,1::2] = aa+bb+me-cm22
196
+
197
+ return LP, D
198
+
199
+
200
+ def NoiseExtract(Im,qmf,sigma,L):
201
+ """
202
+ Extracts noise signal that is locally Gaussian N(0,sigma^2)
203
+
204
+ Parameters
205
+ ----------
206
+ Im : numpy.ndarray
207
+ 2D noisy image matrix
208
+ qmf : list
209
+ Scaling coefficients of an orthogonal wavelet filter
210
+ sigma : float
211
+ std of noise to be used for identicication
212
+ (recomended value between 2 and 3)
213
+ L : int
214
+ The number of wavelet decomposition levels.
215
+ Must match the number of levels of WavePRNU.
216
+ (Generally, L = 3 or 4 will give pretty good results because the
217
+ majority of the noise is present only in the first two detail levels.)
218
+
219
+ Returns
220
+ -------
221
+ numpy.ndarray('float')
222
+ extracted noise converted back to spatial domain
223
+
224
+ Example
225
+ -------
226
+ Im = np.double(cv.imread('Lena_g.bmp')[...,::-1]) % read gray scale test image
227
+ qmf = MakeONFilter('Daubechies',8)
228
+ Image_noise = NoiseExtract(Im, qmf, 3., 4)
229
+
230
+ Reference
231
+ ---------
232
+ [1] M. Goljan, T. Filler, and J. Fridrich. Large Scale Test of Sensor
233
+ Fingerprint Camera Identification. In N.D. Memon and E.J. Delp and P.W. Wong and
234
+ J. Dittmann, editors, Proc. of SPIE, Electronic Imaging, Media Forensics and
235
+ Security XI, volume 7254, pages # 0I–01–0I–12, January 2009.
236
+
237
+ """
238
+
239
+ Im = Im.astype(float)
240
+
241
+ M, N = Im.shape
242
+ m = 2**L
243
+ # use padding with mirrored image content
244
+ minpad=2 # minimum number of padded rows and columns as well
245
+ nr = (np.ceil((M+minpad)/m)*m).astype(int); nc = (np.ceil((N+minpad)/m)*m).astype(int) # dimensions of the padded image (always pad 8 pixels or more)
246
+ pr = np.ceil((nr-M)/2).astype(int) # number of padded rows on the top
247
+ prd= np.floor((nr-M)/2).astype(int) # number of padded rows at the bottom
248
+ pc = np.ceil((nc-N)/2).astype(int) # number of padded columns on the left
249
+ pcr= np.floor((nc-N)/2).astype(int) # number of padded columns on the right
250
+ Im = np.block([
251
+ [ Im[pr-1::-1,pc-1::-1], Im[pr-1::-1,:], Im[pr-1::-1,N-1:N-pcr-1:-1]],
252
+ [ Im[:,pc-1::-1], Im, Im[:,N-1:N-pcr-1:-1] ],
253
+ [ Im[M-1:M-prd-1:-1,pc-1::-1], Im[M-1:M-prd-1:-1,:], Im[M-1:M-prd-1:-1,N-1:N-pcr-1:-1] ]
254
+ ])
255
+
256
+ # Precompute noise variance and initialize the output
257
+ NoiseVar = sigma**2
258
+ # Wavelet decomposition, without redudance
259
+ wave_trans = Ft.mdwt(Im,qmf,L)
260
+ # Extract the noise from the wavelet coefficients
261
+
262
+ for i in range(1,L+1):
263
+
264
+ # Horizontal noise extraction
265
+ wave_trans[0:nr//2, nc//2:nc], _ = \
266
+ Ft.WaveNoise(wave_trans[0:nr//2, nc//2:nc], NoiseVar)
267
+
268
+ # Vertical noise extraction
269
+ wave_trans[nr//2:nr, 0:nc//2], _ = \
270
+ Ft.WaveNoise(wave_trans[nr//2:nr, 0:nc//2],NoiseVar)
271
+
272
+ # Diagonal noise extraction
273
+ wave_trans[nr//2:nr, nc//2:nc], _ = \
274
+ Ft.WaveNoise(wave_trans[nr//2:nr, nc//2:nc], NoiseVar)
275
+
276
+ nc = nc//2
277
+ nr = nr//2
278
+
279
+ # Last, coarest level noise extraction
280
+ wave_trans[0:nr,0:nc] = 0
281
+
282
+ # Inverse wavelet transform
283
+ image_noise = Ft.midwt(wave_trans,qmf,L)
284
+
285
+ # Crop to the original size
286
+ image_noise = image_noise[pr:pr+M,pc:pc+N]
287
+ return image_noise
288
+
289
+
290
+ def Qfunction(x):
291
+ """
292
+ Calculates probability that Gaussian variable N(0,1) takes value larger
293
+ than x
294
+
295
+ Parameters
296
+ ----------
297
+ x : float
298
+ value to evalueate Q-function for
299
+
300
+ Returns
301
+ -------
302
+ float
303
+ probability that a variable from N(0,1) is larger than x
304
+ float
305
+ logQ
306
+
307
+ """
308
+
309
+ if x<37.5:
310
+ Q = 1/2*special.erfc(x/np.sqrt(2))
311
+ logQ = np.log(Q)
312
+ else:
313
+ Q = (1/(np.sqrt(2*np.pi)*x))*np.exp(-np.power(x,2)/2)
314
+ logQ = -np.power(x,2)/2 - np.log(x)-1/2*np.log(2*np.pi)
315
+
316
+ return Q, logQ
317
+
318
+
319
+ def rgb2gray1(X):
320
+ """
321
+ Converts RGB-like real data to gray-like output.
322
+
323
+ Parameters
324
+ ----------
325
+ X : numpy.ndarray('float')
326
+ 3D noise matrix from RGB image(s)
327
+
328
+ Returns
329
+ -------
330
+ numpy.ndarray('float')
331
+ 2D noise matrix in grayscale
332
+
333
+ """
334
+
335
+ datatype = X.dtype
336
+
337
+ if X.shape[2]==1: G=X; return G
338
+ M,N,three = X.shape
339
+ X = X.reshape([M * N, three])
340
+
341
+ # Calculate transformation matrix
342
+ T = np.linalg.inv(np.array([[1.0, 0.956, 0.621],
343
+ [1.0, -0.272, -0.647],
344
+ [1.0, -1.106, 1.703]]))
345
+ coef = T[0,:]
346
+ G = np.reshape(np.matmul(X.astype(datatype), coef), [M, N])
347
+
348
+ return G
349
+
350
+
351
+ def Saturation(X, gray=False):
352
+ """
353
+ Determines saturated pixels as those having a peak value (must be over 250)
354
+ and a neighboring pixel of equal value
355
+
356
+ Parameters
357
+ ----------
358
+ X : numpy.ndarray('float')
359
+ 2D or 3D matrix of image with pixels in [0, 255]
360
+ gray : bool
361
+ Only for RGB input. If gray=true, then saturated pixels in output
362
+ (denoted as zeros) result from at least 2 saturated color channels
363
+
364
+ Returns
365
+ -------
366
+ numpy.ndarray('bool')
367
+ binary matrix, 0 - saturated pixels
368
+
369
+ """
370
+
371
+ M = X.shape[0]; N = X.shape[1]
372
+ if X.max()<=250:
373
+ if not gray:
374
+ SaturMap = np.ones(X.shape,dtype=np.bool)
375
+ else:
376
+ SaturMap = np.ones([M,N],dtype=np.bool)
377
+ return SaturMap
378
+
379
+ SaturMap = np.ones([M,N],dtype=np.int8)
380
+
381
+ Xh = X - np.roll(X, 1, axis=1)
382
+ Xv = X - np.roll(X, 1, axis=0)
383
+ Satur = np.logical_and(np.logical_and(Xh, Xv),
384
+ np.logical_and(np.roll(Xh, -1, axis=1),np.roll(Xv, -1, axis=0)))
385
+
386
+ if np.ndim(np.squeeze(X))==3:
387
+ maxX = []
388
+ for j in range(3):
389
+ maxX.append(X[:,:,j].max())
390
+ if maxX[j]>250:
391
+ SaturMap[:,:,j] = np.logical_not(np.logical_and(X[:,:,j]==maxX[j],
392
+ np.logical_not(Satur[:,:,j])))
393
+ elif np.ndim(np.squeeze(X))==2:
394
+ maxX = X.max()
395
+ SaturMap = np.logical_not(np.logical_and(X==maxX, np.logical_not(SaturMap)))
396
+ else: raise ValueError('Invalid matrix dimensions')
397
+
398
+ if gray and np.ndim(np.squeeze(X))==3:
399
+ SaturMap = SaturMap[:,:,1]+SaturMap[:,:,3]+SaturMap[:,:,3]
400
+ SaturMap[SaturMap>1] = 1
401
+
402
+ return SaturMap
403
+
404
+
405
+ def SeeProgress(i):
406
+ """
407
+ SeeProgress(i) outputs i without performing carriage return
408
+ This function is designed to be used in slow for-loops to show how the
409
+ calculations progress. If the first call in the loop is not with i=1, it's
410
+ convenient to call SeeProgress(1) before the loop.
411
+ """
412
+ if i==1 | i==0 : print('\n ')
413
+ print('* %(i)d *' % {"i": i}, end="\r")
414
+
415
+
416
+ def WienerInDFT(ImNoise,sigma):
417
+ """
418
+ Removes periodical patterns (like the blockness) from input noise in
419
+ frequency domain
420
+
421
+ Parameters
422
+ ----------
423
+ ImNoise : numpy.ndarray('float')
424
+ 2D noise matrix extracted from one images or a camera reference pattern
425
+ sigma : float
426
+ Standard deviation of the noise that we want not to exceed even locally
427
+ in DFT domain
428
+
429
+ Returns
430
+ -------
431
+ numpy.ndarray('float')
432
+ filtered image noise (or camera reference pattern) ... estimate of PRNU
433
+
434
+ """
435
+ M,N = ImNoise.shape
436
+
437
+ F = np.fft.fft2(ImNoise); del ImNoise
438
+ Fmag = np.abs(np.real(F / np.sqrt(M*N))) # normalized magnitude
439
+
440
+ NoiseVar = np.power(sigma, 2)
441
+ Fmag1, _ = Ft.WaveNoise(Fmag, NoiseVar)
442
+
443
+ fzero = np.where(Fmag==0); Fmag[fzero]=1; Fmag1[fzero]=0; del fzero
444
+ F = np.divide(np.multiply(F, Fmag1), Fmag)
445
+
446
+ # inverse FFT transform
447
+ NoiseClean = np.real(np.fft.ifft2(F))
448
+
449
+ return NoiseClean
450
+
451
+
452
+ def ZeroMean(X, zType='CFA'):
453
+ """
454
+ Subtracts mean from all subsignals of the given type
455
+
456
+ Parameters
457
+ ----------
458
+ X : numpy.ndarray('float')
459
+ 2-D or 3-D noise matrix
460
+ zType : str
461
+ Zero-meaning type. One of the following 4 options: {'col', 'row', 'both', 'CFA'}
462
+
463
+ Returns
464
+ -------
465
+ numpy.ndarray('float')
466
+ noise matrix after applying zero-mean
467
+ dict
468
+ dictionary including mean vectors in rows, columns, total mean, and
469
+ checkerboard mean
470
+
471
+ Example
472
+ -------
473
+ Y,_ = ZeroMean(X,'col') ... Y will have all columns with mean 0.
474
+ Y,_ = ZeroMean(X,'CFA') ... Y will have all columns, rows, and 4 types of
475
+ odd/even pixels zero mean.
476
+
477
+ """
478
+
479
+ M, N, K = X.shape
480
+ # initialize the output matrix and vectors
481
+ Y = np.zeros(X.shape, dtype=X.dtype)
482
+ row = np.zeros([M,K], dtype=X.dtype)
483
+ col = np.zeros([K,N], dtype=X.dtype)
484
+ cm=0
485
+
486
+ # subtract mean from each color channel
487
+ mu = []
488
+ for j in range(K):
489
+ mu.append(np.mean(X[:,:,j], axis=(0,1)))
490
+ X[:,:,j] -= mu[j]
491
+
492
+ for j in range(K):
493
+ row[:,j] = np.mean(X[:,:,j],axis=1)
494
+ col[j,:] = np.mean(X[:,:,j],axis=0)
495
+
496
+ if zType=='col':
497
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(col[j,:],(M,1))
498
+ elif zType=='row':
499
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(row[:,j],(N,1)).transpose()
500
+ elif zType=='both':
501
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(col[j,:],(M,1))
502
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(row[:,j],(N,1)).transpose()# equal to Y = ZeroMean(X,'row'); Y = ZeroMean(Y,'col');
503
+ elif zType=='CFA':
504
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(col[j,:],(M,1))
505
+ for j in range(K): Y[:,:,j] = X[:,:,j] - np.tile(row[:,j],(N,1)).transpose() # equal to Y = ZeroMean(X,'both');
506
+ for j in range(K):
507
+ cm = np.mean(Y[::2, ::2, j], axis=(1,2))
508
+ Y[::2, ::2, j] -= cm
509
+ Y[1::2, 1::2, j] -= cm
510
+ Y[::2, 1::2, j] += cm
511
+ Y[1::2, ::2, j] += cm
512
+ else:
513
+ raise(ValueError('Unknown type for zero-meaning.'))
514
+
515
+ # Linear pattern data:
516
+ LP = {}# dict(row=[], col=[], mu=[], checkerboard_mean=[])
517
+ LP['row'] = row
518
+ LP['col'] = col
519
+ LP['mu'] = mu
520
+ LP['checkerboard_mean'] = cm
521
+ return Y, LP
522
+
523
+
524
+ def ZeroMeanTotal(X):
525
+ """
526
+ Subtracts mean from all black and all white subsets of columns and rows
527
+ in a checkerboard pattern
528
+
529
+ Parameters
530
+ ----------
531
+ X : numpy.ndarray('float')
532
+ 2-D or 3-D noise matrix
533
+
534
+ Returns
535
+ -------
536
+ numpy.ndarray('float')
537
+ noise matrix after applying ZeroMeanTotal
538
+ dict
539
+ dictionary of four dictionaries for the four subplanes, each includes
540
+ mean vectors in rows, columns, total mean, and checkerboard mean.
541
+
542
+ """
543
+ dimExpanded = False
544
+ if np.ndim(X)==2: X = X[...,np.newaxis]; dimExpanded = True
545
+ Y = np.zeros(X.shape, dtype=X.dtype)
546
+
547
+ Z, LP11 = ZeroMean(X[::2, ::2, :],'both')
548
+ Y[::2, ::2, :] = Z
549
+ Z, LP12 = ZeroMean(X[::2, 1::2, :],'both')
550
+ Y[::2, 1::2,:] = Z
551
+ Z, LP21 = ZeroMean(X[1::2, ::2, :],'both')
552
+ Y[1::2, ::2,:] = Z
553
+ Z, LP22 = ZeroMean(X[1::2, 1::2, :],'both')
554
+ Y[1::2, 1::2,:] = Z
555
+
556
+ if dimExpanded: Y = np.squeeze(Y)
557
+
558
+ LP = {}# dict(d11=[], d12=[], d21=[], d22=[])
559
+ LP['d11'] = LP11
560
+ LP['d12'] = LP12
561
+ LP['d21'] = LP21
562
+ LP['d22'] = LP22
563
+
564
+ return Y, LP
565
+
utils/src/__pycache__/Filter.cpython-310.pyc ADDED
Binary file (8.63 kB). View file
 
utils/src/__pycache__/Filter.cpython-38.pyc ADDED
Binary file (8.78 kB). View file
 
utils/src/__pycache__/Functions.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
utils/src/__pycache__/Functions.cpython-38.pyc ADDED
Binary file (14.5 kB). View file
 
utils/src/__pycache__/extraUtils.cpython-38.pyc ADDED
Binary file (2.06 kB). View file
 
utils/src/__pycache__/getFingerprint.cpython-38.pyc ADDED
Binary file (3.02 kB). View file
 
utils/src/__pycache__/maindir.cpython-310.pyc ADDED
Binary file (3.57 kB). View file
 
utils/src/__pycache__/maindir.cpython-38.pyc ADDED
Binary file (3.54 kB). View file
 
utils/src/extraUtils.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import matplotlib.pyplot as plt
3
+ from matplotlib import cm
4
+ from matplotlib.ticker import LinearLocator, FormatStrFormatter
5
+ import numpy as np
6
+
7
+ def getImagesFrom(folder_addr, extension='.png'):
8
+ """
9
+ Returns a list of images from a path, with the desired extension.
10
+
11
+ Parameters
12
+ ----------
13
+ folder_addr : str
14
+ Path to a target directory
15
+ extension : str
16
+ Extension of target image files, such as '.png', '.jpg', etc.
17
+
18
+ Returns
19
+ -------
20
+ list
21
+ The list of addresses of the images
22
+ """
23
+
24
+ files = sorted(os.listdir(folder_addr))
25
+ if not folder_addr[-1]==os.sep: folder_addr += os.sep
26
+ list_probeFiles = [i for i in files if i.endswith(extension)]
27
+ for i in range(list_probeFiles.__len__()):
28
+ list_probeFiles[i] = folder_addr + list_probeFiles[i]
29
+ return list_probeFiles
30
+
31
+
32
+ '''
33
+ The following lines of code are mainly from:
34
+ [https://matplotlib.org/examples/mplot3d/surface3d_demo.html]
35
+ '''
36
+ def mesh(Z):
37
+ """
38
+ Similar to MATLAB 'mesh' function, can show peak of cross-correlation plane.
39
+
40
+ Parameters
41
+ ----------
42
+ Z : numpy.ndarray('float32')
43
+ 2D matrix of cross-correlation
44
+
45
+ Returns
46
+ -------
47
+ <nothing>
48
+
49
+ """
50
+ Z = np.squeeze(Z)
51
+
52
+ fig = plt.figure()
53
+ ax = fig.gca(projection='3d')
54
+
55
+ M, N = Z.shape
56
+ X = np.arange(0, N, 1)
57
+ Y = np.arange(0, M, 1)
58
+ X, Y = np.meshgrid(X, Y)
59
+
60
+ # Plot the surface.
61
+ surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
62
+ linewidth=0, antialiased=False)
63
+
64
+ # Customize the z axis.
65
+ ax.set_zlim(Z.min()*.99, Z.max()*1.01)
66
+ ax.zaxis.set_major_locator(LinearLocator(10))
67
+ ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
68
+
69
+ # Add a color bar which maps values to colors.
70
+ fig.colorbar(surf, shrink=0.5, aspect=5)
71
+
72
+ plt.show()
utils/src/getFingerprint.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Please read the copyright notice located on the readme file (README.md).
3
+ """
4
+ import utils.src.Functions as Fu
5
+ import cv2 as cv
6
+ import numpy as np
7
+
8
+ def getFingerprint(Images, sigma=3., fromHowMany=-1):
9
+ """
10
+ Extracts and averages noise from all images and outputs a camera
11
+ fingerprint
12
+
13
+ Parameters
14
+ ----------
15
+ Images : list
16
+ List of color images to process. They have to be from the same camera
17
+ and the same size and orientation.
18
+ sigma : float32
19
+ Standard deviation of the expected noise (PRNU)
20
+
21
+ Returns
22
+ -------
23
+ numpy.ndarray('float32')
24
+ 3D matrix of reference pattern - estimate of PRNU (in the output file)
25
+ dict
26
+ Dictionary of Linear Pattern data
27
+
28
+ -------------------------------------------------------------------------
29
+ [1] M. Goljan, T. Filler, and J. Fridrich. Large Scale Test of Sensor
30
+ Fingerprint Camera Identification. In N.D. Memon and E.J. Delp and P.W.
31
+ Wong and J. Dittmann, editors, Proc. of SPIE, Electronic Imaging, Media
32
+ Forensics and Security XI, volume 7254, pages % 0I–01–0I–12, January 2009.
33
+ -------------------------------------------------------------------------
34
+ """
35
+
36
+ database_size = Images.__len__() if fromHowMany==-1 else fromHowMany; del fromHowMany # Number of the images
37
+ if database_size==0: raise ValueError('No images of specified type in the directory.')
38
+ ### Parameters used in denoising filter
39
+ L = 4 # number of decomposition levels
40
+ qmf = [ .230377813309, .714846570553, .630880767930, -.027983769417,
41
+ -.187034811719, .030841381836, .032883011667, -.010597401785]
42
+ qmf /= np.linalg.norm(qmf)
43
+
44
+ t = 0
45
+ ImagesinRP = []
46
+ for i in range(database_size):
47
+ Fu.SeeProgress(i),
48
+ im = Images[i]
49
+ X = cv.imread(im);
50
+ if np.ndim(X)==3: X = X[:,:,::-1] # BGR to RGB
51
+ X = _double255(X)
52
+ if t == 0:
53
+ M,N,three=X.shape
54
+ if three == 1:
55
+ continue # only color images will be processed
56
+ ### Initialize sums
57
+ RPsum = np.zeros([M,N,3],dtype='single')
58
+ NN = np.zeros([M,N,3],dtype='single') # number of additions to each pixel for RPsum
59
+ else:
60
+ s = X.shape
61
+ if X.ndim != 3:
62
+ print('Not a color image - skipped.\n')
63
+ continue # only color images will be used
64
+ if set([M,N,three]) != set(X.shape):
65
+ print('\n Skipping image %(im)s of size %(s1)d x %(s2)d x %(s3)d \n' %{'im':im,'s1':s(1-1),'s2':s(2-1),'s3':s(3-1)})
66
+ continue # only same size images will be used
67
+
68
+ # The image will be the t-th image used for the reference pattern RP
69
+ t=t+1 # counter of used images
70
+ ImagesinRP.append(im)
71
+
72
+ for j in range(3):
73
+ ImNoise = np.single(Fu.NoiseExtract(X[:,:,j],qmf,sigma,L))
74
+ Inten = np.multiply(Fu.IntenScale(X[:,:,j]),\
75
+ Fu.Saturation(X[:,:,j])) # zeros for saturated pixels
76
+ RPsum[:,:,j] = RPsum[:,:,j] + np.multiply(ImNoise,Inten) # weighted average of ImNoise (weighted by Inten)
77
+ NN[:,:,j] = NN[:,:,j] + np.power(Inten,2)
78
+
79
+
80
+ del ImNoise, Inten, X
81
+ if t==0: raise ValueError('None of the images was color image in landscape orientation.')
82
+ RP = np.divide(RPsum, NN + 1)
83
+ # Remove linear pattern and keep its parameters
84
+ RP, LP = Fu.ZeroMeanTotal(RP)
85
+ return RP, LP, ImagesinRP
86
+
87
+ ### FUNCTIONS ##
88
+ def _double255(X):
89
+ # In MATLAB: convert to 'double' ranging from 0 to 255
90
+ # Here in this Python implementation we convert it to 'single' (np.float32)
91
+ X = X.astype(np.single)
92
+ return X
utils/src/maindir.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Please read the copyright notice located on the readme file (README.md).
3
+ """
4
+ import utils.src.Functions as Fu
5
+ import numpy as np
6
+ from scipy import special
7
+
8
+
9
+ def PCE(C, shift_range=[0,0], squaresize=11):
10
+ """
11
+ Computes Peak-to-Correlation Energy (PCE) obtained from correlation surface
12
+ restricted to possible shifts due to cropping. In this implementation of
13
+ PCE, it carries the sign of the peak (i.e. PCE can be negative)
14
+
15
+ Parameters
16
+ ----------
17
+ C : numpy.ndarray('float32')
18
+ cross-correlation surface calculated by function 'crosscorr'
19
+ shift_range : list
20
+ maximum shift is from [0,0] to [shift_range]
21
+ squaresize : int
22
+ removes the peak neighborhood of size (squaresize x squaresize)
23
+
24
+ Returns
25
+ -------
26
+ dict
27
+ A dictionary with the following items:
28
+ PCE : peak-to-correlation energy
29
+ PeakLocation : location of the primary peak, [0 0] when correlated
30
+ signals are not shifted to each other
31
+ pvalue : probability of obtaining peakheight or higher (under
32
+ Gaussian assumption)
33
+ P_FA : probability of false alarm (increases with increasing range
34
+ of admissible shifts (shift_range)
35
+ dict
36
+ A dictionary similar to the first output but for test under assumption
37
+ of no cropping (i.e. equal to 'Out0,_ = PCE(C)')
38
+
39
+ Example
40
+ -------
41
+ Out, Out0 = PCE(crosscorr(Noise1, Noise2), size(Noise)-1);
42
+ C = crosscorr(Noise1,Noise2); Out0,_ = PCE(C)
43
+
44
+ Note: 'Out0.PCE == Out.PCE' and 'Out.P_FA == Out.pvalue' when no shifts are considered
45
+
46
+ """
47
+
48
+ if any(np.greater_equal(shift_range,C.shape)):
49
+ shift_range = min(shift_range,C.shape-1) # all possible shift in at least one dimension
50
+
51
+ shift_range = np.array(shift_range)
52
+ Out = dict(PCE=[], pvalue=[], PeakLocation=[], peakheight=[], P_FA=[], log10P_FA=[])
53
+
54
+ if not C.any(): # the case when cross-correlation C has zero energy (see crosscor2)
55
+ Out['PCE'] = 0
56
+ Out['pvalue'] = 1
57
+ Out['PeakLocation'] = [0,0]
58
+ return
59
+
60
+ Cinrange = C[-1-shift_range[0]:,-1-shift_range[1]:] # C[-1,-1] location corresponds to no shift of the first matrix argument of 'crosscor2'
61
+ [max_cc, imax] = np.max(Cinrange.flatten()), np.argmax(Cinrange.flatten())
62
+ [ypeak, xpeak] = np.unravel_index(imax,Cinrange.shape)[0], np.unravel_index(imax,Cinrange.shape)[1]
63
+ Out['peakheight'] = Cinrange[ypeak,xpeak]
64
+ del Cinrange
65
+ Out['PeakLocation'] = [shift_range[0]-ypeak, shift_range[1]-xpeak]
66
+
67
+ C_without_peak = _RemoveNeighborhood(C,
68
+ np.array(C.shape)-Out['PeakLocation'],
69
+ squaresize)
70
+ correl = C[-1,-1]; del C
71
+
72
+ # signed PCE, peak-to-correlation energy
73
+ PCE_energy = np.mean(C_without_peak*C_without_peak)
74
+ Out['PCE'] = (Out['peakheight']**2)/PCE_energy * np.sign(Out['peakheight'])
75
+
76
+ # p-value
77
+ Out['pvalue'] = 1/2*special.erfc(Out['peakheight']/np.sqrt(PCE_energy)/np.sqrt(2)) # under simplifying assumption that C are samples from Gaussian pdf
78
+ [Out['P_FA'], Out['log10P_FA']] = _FAfromPCE(Out['PCE'], np.prod(shift_range+1))
79
+
80
+ Out0 = dict(PCE=[], P_FA=[], log10P_FA=[])
81
+ Out0['PCE'] = (correl**2)/PCE_energy
82
+ Out0['P_FA'], Out0['log10P_FA'] = _FAfromPCE(Out0['PCE'],1)
83
+ return Out, Out0
84
+ # ----------------------------------------
85
+
86
+ def _RemoveNeighborhood(X,x,ssize):
87
+ # Remove a 2-D neighborhood around x=[x1,x2] from matrix X and output a 1-D vector Y
88
+ # ssize square neighborhood has size (ssize x ssize) square
89
+ [M,N] = X.shape
90
+ radius = (ssize-1)/2
91
+ X = np.roll(X,[int(radius-x[0]),int(radius-x[1])], axis=[0,1])
92
+ Y = X[ssize:,:ssize]; Y = Y.flatten()
93
+ Y = np.concatenate([Y, X.flatten()[int(M*ssize):]], axis=0)
94
+ return Y
95
+
96
+ def _FAfromPCE(pce,search_space):
97
+ # Calculates false alarm probability from having peak-to-cross-correlation (PCE) measure of the peak
98
+ # pce PCE measure obtained from PCE.m
99
+ # seach_space number of correlation samples from which the maximum is taken
100
+ # USAGE: FA = FAfromPCE(31.5,32*32);
101
+
102
+ [p,logp] = Fu.Qfunction(np.sign(pce)*np.sqrt(np.abs(pce)))
103
+ if pce<50:
104
+ FA = np.power(1-(1-p),search_space)
105
+ else:
106
+ FA = search_space*p # an approximation
107
+
108
+ if FA==0:
109
+ FA = search_space*p
110
+ log10FA = np.log10(search_space)+logp*np.log10(np.exp(1))
111
+ else:
112
+ log10FA = np.log10(FA)
113
+
114
+ return FA, log10FA