sol9x-sagar commited on
Commit
1835398
·
1 Parent(s): 6ceeab6

Fix: move large model files to LFS

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.keras filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a lightweight Python image with TensorFlow pre-installed if possible,
2
+ # or a standard slim python image.
3
+ FROM python:3.11-slim
4
+
5
+ # Set the working directory
6
+ WORKDIR /code
7
+
8
+ # System deps often needed by opencv-python-headless
9
+ RUN apt-get update && apt-get install -y --no-install-recommends \
10
+ libgl1 \
11
+ libglib2.0-0 \
12
+ libsm6 \
13
+ libxext6 \
14
+ libxrender1 \
15
+ && rm -rf /var/lib/apt/lists/*
16
+
17
+ # Copy requirements first to leverage Docker cache
18
+ COPY requirements.txt .
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # Copy the rest of the application code
22
+ COPY . .
23
+
24
+ # Create a directory for the model if it doesn't exist (to match your app.py logic)
25
+ RUN mkdir -p /code/model
26
+
27
+ # Expose the port Hugging Face uses
28
+ EXPOSE 7860
29
+
30
+ # Run the application.
31
+ # We use Gunicorn for production-grade freedom from Flask's built-in server issues.
32
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "app:app"]
FingerprintImageEnhancer.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from scipy import signal
4
+ from scipy import ndimage
5
+ import math
6
+ import scipy
7
+
8
+ class FingerprintImageEnhancer(object):
9
+ def __init__(self):
10
+ self.ridge_segment_blksze = 32
11
+ self.ridge_segment_thresh = 0.1
12
+ self.gradient_sigma = 1
13
+ self.block_sigma = 7
14
+ self.orient_smooth_sigma = 7
15
+ self.ridge_freq_blksze = 38
16
+ self.ridge_freq_windsze = 15 #width change (thick -> 15) (default -> 5)
17
+ self.min_wave_length = 7
18
+ self.max_wave_length = 15
19
+ self.kx = 0.65
20
+ self.ky = 0.65
21
+ self.angleInc = 3
22
+ self.ridge_filter_thresh = -3
23
+
24
+
25
+ self._mask = []
26
+ self._normim = []
27
+ self._orientim = []
28
+ self._mean_freq = []
29
+ self._median_freq = []
30
+ self._freq = []
31
+ self._freqim = []
32
+ self._binim = []
33
+
34
+ def __normalise(self, img, mean, std):
35
+ normed = (img - np.mean(img)) / (np.std(img))
36
+ return (normed)
37
+
38
+ def __ridge_segment(self, img):
39
+ # RIDGESEGMENT - Normalises fingerprint image and segments ridge region
40
+ #
41
+ # Function identifies ridge regions of a fingerprint image and returns a
42
+ # mask identifying this region. It also normalises the intesity values of
43
+ # the image so that the ridge regions have zero mean, unit standard
44
+ # deviation.
45
+ #
46
+ # This function breaks the image up into blocks of size blksze x blksze and
47
+ # evaluates the standard deviation in each region. If the standard
48
+ # deviation is above the threshold it is deemed part of the fingerprint.
49
+ # Note that the image is normalised to have zero mean, unit standard
50
+ # deviation prior to performing this process so that the threshold you
51
+ # specify is relative to a unit standard deviation.
52
+ #
53
+ # Usage: [normim, mask, maskind] = ridgesegment(im, blksze, thresh)
54
+ #
55
+ # Arguments: im - Fingerprint image to be segmented.
56
+ # blksze - Block size over which the the standard
57
+ # deviation is determined (try a value of 16).
58
+ # thresh - Threshold of standard deviation to decide if a
59
+ # block is a ridge region (Try a value 0.1 - 0.2)
60
+ #
61
+ # Ouput: normim - Image where the ridge regions are renormalised to
62
+ # have zero mean, unit standard deviation.
63
+ # mask - Mask indicating ridge-like regions of the image,
64
+ # 0 for non ridge regions, 1 for ridge regions.
65
+ # maskind - Vector of indices of locations within the mask.
66
+ #
67
+ # Suggested values for a 500dpi fingerprint image:
68
+ #
69
+ # [normim, mask, maskind] = ridgesegment(im, 16, 0.1)
70
+ #
71
+ # See also: RIDGEORIENT, RIDGEFREQ, RIDGEFILTER
72
+
73
+ ### REFERENCES
74
+
75
+ # Peter Kovesi
76
+ # School of Computer Science & Software Engineering
77
+ # The University of Western Australia
78
+ # pk at csse uwa edu au
79
+ # http://www.csse.uwa.edu.au/~pk
80
+ rows, cols = img.shape
81
+ im = self.__normalise(img, 0, 1) # normalise to get zero mean and unit standard deviation
82
+
83
+ new_rows = int(self.ridge_segment_blksze * np.ceil((float(rows)) / (float(self.ridge_segment_blksze))))
84
+ new_cols = int(self.ridge_segment_blksze * np.ceil((float(cols)) / (float(self.ridge_segment_blksze))))
85
+
86
+ padded_img = np.zeros((new_rows, new_cols))
87
+ stddevim = np.zeros((new_rows, new_cols))
88
+ padded_img[0:rows][:, 0:cols] = im
89
+ for i in range(0, new_rows, self.ridge_segment_blksze):
90
+ for j in range(0, new_cols, self.ridge_segment_blksze):
91
+ block = padded_img[i:i + self.ridge_segment_blksze][:, j:j + self.ridge_segment_blksze]
92
+
93
+ stddevim[i:i + self.ridge_segment_blksze][:, j:j + self.ridge_segment_blksze] = np.std(block) * np.ones(block.shape)
94
+
95
+ stddevim = stddevim[0:rows][:, 0:cols]
96
+ self._mask = stddevim > self.ridge_segment_thresh
97
+ mean_val = np.mean(im[self._mask])
98
+ std_val = np.std(im[self._mask])
99
+ self._normim = (im - mean_val) / (std_val)
100
+
101
+ def __ridge_orient(self):
102
+ # RIDGEORIENT - Estimates the local orientation of ridges in a fingerprint
103
+ #
104
+ # Usage: [orientim, reliability, coherence] = ridgeorientation(im, gradientsigma,...
105
+ # blocksigma, ...
106
+ # orientsmoothsigma)
107
+ #
108
+ # Arguments: im - A normalised input image.
109
+ # gradientsigma - Sigma of the derivative of Gaussian
110
+ # used to compute image gradients.
111
+ # blocksigma - Sigma of the Gaussian weighting used to
112
+ # sum the gradient moments.
113
+ # orientsmoothsigma - Sigma of the Gaussian used to smooth
114
+ # the final orientation vector field.
115
+ # Optional: if ommitted it defaults to 0
116
+ #
117
+ # Output: orientim - The orientation image in radians.
118
+ # Orientation values are +ve clockwise
119
+ # and give the direction *along* the
120
+ # ridges.
121
+ # reliability - Measure of the reliability of the
122
+ # orientation measure. This is a value
123
+ # between 0 and 1. I think a value above
124
+ # about 0.5 can be considered 'reliable'.
125
+ # reliability = 1 - Imin./(Imax+.001);
126
+ # coherence - A measure of the degree to which the local
127
+ # area is oriented.
128
+ # coherence = ((Imax-Imin)./(Imax+Imin)).^2;
129
+ #
130
+ # With a fingerprint image at a 'standard' resolution of 500dpi suggested
131
+ # parameter values might be:
132
+ #
133
+ # [orientim, reliability] = ridgeorient(im, 1, 3, 3);
134
+ #
135
+ # See also: RIDGESEGMENT, RIDGEFREQ, RIDGEFILTER
136
+
137
+ ### REFERENCES
138
+
139
+ # May 2003 Original version by Raymond Thai,
140
+ # January 2005 Reworked by Peter Kovesi
141
+ # October 2011 Added coherence computation and orientsmoothsigma made optional
142
+ #
143
+ # School of Computer Science & Software Engineering
144
+ # The University of Western Australia
145
+ # pk at csse uwa edu au
146
+ # http://www.csse.uwa.edu.au/~pk
147
+
148
+ rows,cols = self._normim.shape
149
+ #Calculate image gradients.
150
+ sze = np.fix(6*self.gradient_sigma)
151
+ if np.remainder(sze,2) == 0:
152
+ sze = sze+1
153
+
154
+ gauss = cv2.getGaussianKernel(int(sze),self.gradient_sigma)
155
+ f = gauss * gauss.T
156
+
157
+ fy,fx = np.gradient(f) #Gradient of Gaussian
158
+
159
+ Gx = signal.convolve2d(self._normim, fx, mode='same')
160
+ Gy = signal.convolve2d(self._normim, fy, mode='same')
161
+
162
+ Gxx = np.power(Gx,2)
163
+ Gyy = np.power(Gy,2)
164
+ Gxy = Gx*Gy
165
+
166
+ #Now smooth the covariance data to perform a weighted summation of the data.
167
+ sze = np.fix(6*self.block_sigma)
168
+
169
+ gauss = cv2.getGaussianKernel(int(sze), self.block_sigma)
170
+ f = gauss * gauss.T
171
+
172
+ Gxx = ndimage.convolve(Gxx,f)
173
+ Gyy = ndimage.convolve(Gyy,f)
174
+ Gxy = 2*ndimage.convolve(Gxy,f)
175
+
176
+ # Analytic solution of principal direction
177
+ denom = np.sqrt(np.power(Gxy,2) + np.power((Gxx - Gyy),2)) + np.finfo(float).eps
178
+
179
+ sin2theta = Gxy/denom # Sine and cosine of doubled angles
180
+ cos2theta = (Gxx-Gyy)/denom
181
+
182
+
183
+ if self.orient_smooth_sigma:
184
+ sze = np.fix(6*self.orient_smooth_sigma)
185
+ if np.remainder(sze,2) == 0:
186
+ sze = sze+1
187
+ gauss = cv2.getGaussianKernel(int(sze), self.orient_smooth_sigma)
188
+ f = gauss * gauss.T
189
+ cos2theta = ndimage.convolve(cos2theta,f) # Smoothed sine and cosine of
190
+ sin2theta = ndimage.convolve(sin2theta,f) # doubled angles
191
+
192
+ self._orientim = np.pi/2 + np.arctan2(sin2theta,cos2theta)/2
193
+
194
+ def __ridge_freq(self):
195
+ # RIDGEFREQ - Calculates a ridge frequency image
196
+ #
197
+ # Function to estimate the fingerprint ridge frequency across a
198
+ # fingerprint image. This is done by considering blocks of the image and
199
+ # determining a ridgecount within each block by a call to FREQEST.
200
+ #
201
+ # Usage:
202
+ # [freqim, medianfreq] = ridgefreq(im, mask, orientim, blksze, windsze, ...
203
+ # minWaveLength, maxWaveLength)
204
+ #
205
+ # Arguments:
206
+ # im - Image to be processed.
207
+ # mask - Mask defining ridge regions (obtained from RIDGESEGMENT)
208
+ # orientim - Ridge orientation image (obtained from RIDGORIENT)
209
+ # blksze - Size of image block to use (say 32)
210
+ # windsze - Window length used to identify peaks. This should be
211
+ # an odd integer, say 3 or 5.
212
+ # minWaveLength, maxWaveLength - Minimum and maximum ridge
213
+ # wavelengths, in pixels, considered acceptable.
214
+ #
215
+ # Output:
216
+ # freqim - An image the same size as im with values set to
217
+ # the estimated ridge spatial frequency within each
218
+ # image block. If a ridge frequency cannot be
219
+ # found within a block, or cannot be found within the
220
+ # limits set by min and max Wavlength freqim is set
221
+ # to zeros within that block.
222
+ # medianfreq - Median frequency value evaluated over all the
223
+ # valid regions of the image.
224
+ #
225
+ # Suggested parameters for a 500dpi fingerprint image
226
+ # [freqim, medianfreq] = ridgefreq(im,orientim, 32, 5, 5, 15);
227
+ #
228
+
229
+ # See also: RIDGEORIENT, FREQEST, RIDGESEGMENT
230
+
231
+ # Reference:
232
+ # Hong, L., Wan, Y., and Jain, A. K. Fingerprint image enhancement:
233
+ # Algorithm and performance evaluation. IEEE Transactions on Pattern
234
+ # Analysis and Machine Intelligence 20, 8 (1998), 777 789.
235
+
236
+ ### REFERENCES
237
+
238
+ # Peter Kovesi
239
+ # School of Computer Science & Software Engineering
240
+ # The University of Western Australia
241
+ # pk at csse uwa edu au
242
+ # http://www.csse.uwa.edu.au/~pk
243
+
244
+ rows, cols = self._normim.shape
245
+ freq = np.zeros((rows, cols))
246
+
247
+ for r in range(0, rows - self.ridge_freq_blksze, self.ridge_freq_blksze):
248
+ for c in range(0, cols - self.ridge_freq_blksze, self.ridge_freq_blksze):
249
+ blkim = self._normim[r:r + self.ridge_freq_blksze][:, c:c + self.ridge_freq_blksze]
250
+ blkor = self._orientim[r:r + self.ridge_freq_blksze][:, c:c + self.ridge_freq_blksze]
251
+
252
+ freq[r:r + self.ridge_freq_blksze][:, c:c + self.ridge_freq_blksze] = self.__frequest(blkim, blkor)
253
+
254
+ self._freq = freq * self._mask
255
+ freq_1d = np.reshape(self._freq, (1, rows * cols))
256
+ ind = np.where(freq_1d > 0)
257
+
258
+ ind = np.array(ind)
259
+ ind = ind[1, :]
260
+
261
+ non_zero_elems_in_freq = freq_1d[0][ind]
262
+
263
+ self._mean_freq = np.mean(non_zero_elems_in_freq)
264
+ self._median_freq = np.median(non_zero_elems_in_freq) # does not work properly
265
+
266
+ self._freq = self._mean_freq * self._mask
267
+
268
+ def __frequest(self, blkim, blkor):
269
+ # FREQEST - Estimate fingerprint ridge frequency within image block
270
+ #
271
+ # Function to estimate the fingerprint ridge frequency within a small block
272
+ # of a fingerprint image. This function is used by RIDGEFREQ
273
+ #
274
+ # Usage:
275
+ # freqim = freqest(im, orientim, windsze, minWaveLength, maxWaveLength)
276
+ #
277
+ # Arguments:
278
+ # im - Image block to be processed.
279
+ # orientim - Ridge orientation image of image block.
280
+ # windsze - Window length used to identify peaks. This should be
281
+ # an odd integer, say 3 or 5.
282
+ # minWaveLength, maxWaveLength - Minimum and maximum ridge
283
+ # wavelengths, in pixels, considered acceptable.
284
+ #
285
+ # Output:
286
+ # freqim - An image block the same size as im with all values
287
+ # set to the estimated ridge spatial frequency. If a
288
+ # ridge frequency cannot be found, or cannot be found
289
+ # within the limits set by min and max Wavlength
290
+ # freqim is set to zeros.
291
+ #
292
+ # Suggested parameters for a 500dpi fingerprint image
293
+ # freqim = freqest(im,orientim, 5, 5, 15);
294
+ #
295
+ # See also: RIDGEFREQ, RIDGEORIENT, RIDGESEGMENT
296
+
297
+ ### REFERENCES
298
+
299
+ # Peter Kovesi
300
+ # School of Computer Science & Software Engineering
301
+ # The University of Western Australia
302
+ # pk at csse uwa edu au
303
+ # http://www.csse.uwa.edu.au/~pk
304
+
305
+ rows, cols = np.shape(blkim)
306
+
307
+ # Find mean orientation within the block. This is done by averaging the
308
+ # sines and cosines of the doubled angles before reconstructing the
309
+ # angle again. This avoids wraparound problems at the origin.
310
+
311
+ cosorient = np.mean(np.cos(2 * blkor))
312
+ sinorient = np.mean(np.sin(2 * blkor))
313
+ orient = math.atan2(sinorient, cosorient) / 2
314
+
315
+ # Rotate the image block so that the ridges are vertical
316
+
317
+ # ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
318
+ # rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
319
+ rotim = scipy.ndimage.rotate(blkim, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3,
320
+ mode='nearest')
321
+
322
+ # Now crop the image so that the rotated image does not contain any
323
+ # invalid regions. This prevents the projection down the columns
324
+ # from being mucked up.
325
+
326
+ cropsze = int(np.fix(rows / np.sqrt(2)))
327
+ offset = int(np.fix((rows - cropsze) / 2))
328
+ rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
329
+
330
+ # Sum down the columns to get a projection of the grey values down
331
+ # the ridges.
332
+
333
+ proj = np.sum(rotim, axis=0)
334
+ dilation = scipy.ndimage.grey_dilation(proj, self.ridge_freq_windsze, structure=np.ones(self.ridge_freq_windsze))
335
+
336
+ temp = np.abs(dilation - proj)
337
+
338
+ peak_thresh = 2
339
+
340
+ maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
341
+ maxind = np.where(maxpts)
342
+
343
+ rows_maxind, cols_maxind = np.shape(maxind)
344
+
345
+ # Determine the spatial frequency of the ridges by divinding the
346
+ # distance between the 1st and last peaks by the (No of peaks-1). If no
347
+ # peaks are detected, or the wavelength is outside the allowed bounds,
348
+ # the frequency image is set to 0
349
+
350
+ if (cols_maxind < 2):
351
+ return(np.zeros(blkim.shape))
352
+ else:
353
+ NoOfPeaks = cols_maxind
354
+ waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
355
+ if waveLength >= self.min_wave_length and waveLength <= self.max_wave_length:
356
+ return(1 / np.double(waveLength) * np.ones(blkim.shape))
357
+ else:
358
+ return(np.zeros(blkim.shape))
359
+
360
+ def __ridge_filter(self):
361
+ # RIDGEFILTER - enhances fingerprint image via oriented filters
362
+ #
363
+ # Function to enhance fingerprint image via oriented filters
364
+ #
365
+ # Usage:
366
+ # newim = ridgefilter(im, orientim, freqim, kx, ky, showfilter)
367
+ #
368
+ # Arguments:
369
+ # im - Image to be processed.
370
+ # orientim - Ridge orientation image, obtained from RIDGEORIENT.
371
+ # freqim - Ridge frequency image, obtained from RIDGEFREQ.
372
+ # kx, ky - Scale factors specifying the filter sigma relative
373
+ # to the wavelength of the filter. This is done so
374
+ # that the shapes of the filters are invariant to the
375
+ # scale. kx controls the sigma in the x direction
376
+ # which is along the filter, and hence controls the
377
+ # bandwidth of the filter. ky controls the sigma
378
+ # across the filter and hence controls the
379
+ # orientational selectivity of the filter. A value of
380
+ # 0.5 for both kx and ky is a good starting point.
381
+ # showfilter - An optional flag 0/1. When set an image of the
382
+ # largest scale filter is displayed for inspection.
383
+ #
384
+ # Output:
385
+ # newim - The enhanced image
386
+ #
387
+ # See also: RIDGEORIENT, RIDGEFREQ, RIDGESEGMENT
388
+
389
+ # Reference:
390
+ # Hong, L., Wan, Y., and Jain, A. K. Fingerprint image enhancement:
391
+ # Algorithm and performance evaluation. IEEE Transactions on Pattern
392
+ # Analysis and Machine Intelligence 20, 8 (1998), 777 789.
393
+
394
+ ### REFERENCES
395
+
396
+ # Peter Kovesi
397
+ # School of Computer Science & Software Engineering
398
+ # The University of Western Australia
399
+ # pk at csse uwa edu au
400
+ # http://www.csse.uwa.edu.au/~pk
401
+
402
+ im = np.double(self._normim)
403
+ rows, cols = im.shape
404
+ newim = np.zeros((rows, cols))
405
+
406
+ freq_1d = np.reshape(self._freq, (1, rows * cols))
407
+ ind = np.where(freq_1d > 0)
408
+
409
+ ind = np.array(ind)
410
+ ind = ind[1, :]
411
+
412
+ # Round the array of frequencies to the nearest 0.01 to reduce the
413
+ # number of distinct frequencies we have to deal with.
414
+
415
+ non_zero_elems_in_freq = freq_1d[0][ind]
416
+ non_zero_elems_in_freq = np.double(np.round((non_zero_elems_in_freq * 100))) / 100
417
+
418
+ unfreq = np.unique(non_zero_elems_in_freq)
419
+
420
+ # Generate filters corresponding to these distinct frequencies and
421
+ # orientations in 'angleInc' increments.
422
+
423
+ sigmax = 1 / unfreq[0] * self.kx
424
+ sigmay = 1 / unfreq[0] * self.ky
425
+
426
+ sze = int(np.round(3 * np.max([sigmax, sigmay])))
427
+
428
+ x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)), np.linspace(-sze, sze, (2 * sze + 1)))
429
+
430
+ reffilter = np.exp(-(((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay)))) * np.cos(
431
+ 2 * np.pi * unfreq[0] * x) # this is the original gabor filter
432
+
433
+ filt_rows, filt_cols = reffilter.shape
434
+
435
+ angleRange = int(180 / self.angleInc)
436
+
437
+ gabor_filter = np.array(np.zeros((angleRange, filt_rows, filt_cols)))
438
+
439
+ for o in range(0, angleRange):
440
+ # Generate rotated versions of the filter. Note orientation
441
+ # image provides orientation *along* the ridges, hence +90
442
+ # degrees, and imrotate requires angles +ve anticlockwise, hence
443
+ # the minus sign.
444
+
445
+ rot_filt = scipy.ndimage.rotate(reffilter, -(o * self.angleInc + 90), reshape=False)
446
+ gabor_filter[o] = rot_filt
447
+
448
+ # Find indices of matrix points greater than maxsze from the image
449
+ # boundary
450
+
451
+ maxsze = int(sze)
452
+
453
+ temp = self._freq > 0
454
+ validr, validc = np.where(temp)
455
+
456
+ temp1 = validr > maxsze
457
+ temp2 = validr < rows - maxsze
458
+ temp3 = validc > maxsze
459
+ temp4 = validc < cols - maxsze
460
+
461
+ final_temp = temp1 & temp2 & temp3 & temp4
462
+
463
+ finalind = np.where(final_temp)
464
+
465
+ # Convert orientation matrix values from radians to an index value
466
+ # that corresponds to round(degrees/angleInc)
467
+
468
+ maxorientindex = np.round(180 / self.angleInc)
469
+ orientindex = np.round(self._orientim / np.pi * 180 / self.angleInc)
470
+
471
+ # do the filtering
472
+ for i in range(0, rows):
473
+ for j in range(0, cols):
474
+ if (orientindex[i][j] < 1):
475
+ orientindex[i][j] = orientindex[i][j] + maxorientindex
476
+ if (orientindex[i][j] > maxorientindex):
477
+ orientindex[i][j] = orientindex[i][j] - maxorientindex
478
+ finalind_rows, finalind_cols = np.shape(finalind)
479
+ sze = int(sze)
480
+ for k in range(0, finalind_cols):
481
+ r = validr[finalind[0][k]]
482
+ c = validc[finalind[0][k]]
483
+
484
+ img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]
485
+
486
+ newim[r][c] = np.sum(img_block * gabor_filter[int(orientindex[r][c]) - 1])
487
+
488
+ self._binim = newim < self.ridge_filter_thresh
489
+
490
+ def save_enhanced_image(self, path):
491
+ # saves the enhanced image at the specified path
492
+ cv2.imwrite(path, 255 - (255 * self._binim))
493
+
494
+ # image = (255 * self._binim)
495
+ # print(image)
496
+ # print(255 - image)
497
+
498
+ def enhance(self, img, resize=False):
499
+ # main function to enhance the image.
500
+ # calls all other subroutines
501
+
502
+ if(resize):
503
+ rows, cols = np.shape(img)
504
+ aspect_ratio = np.double(rows) / np.double(cols)
505
+
506
+ new_rows = 450 # randomly selected number
507
+ new_cols = new_rows / aspect_ratio
508
+
509
+ img = cv2.resize(img, (int(new_cols), int(new_rows)))
510
+
511
+ self.__ridge_segment(img) # normalise the image and find a ROI
512
+ self.__ridge_orient() # compute orientation image
513
+ self.__ridge_freq() # compute major frequency of ridges
514
+ self.__ridge_filter() # filter the image using oriented gabor filter
515
+ return(self._binim)
README.md CHANGED
@@ -1,10 +1,49 @@
1
  ---
2
- title: Contactless To Contactbased Fp Matching
3
- emoji: 🏃
4
  colorFrom: blue
5
- colorTo: yellow
6
  sdk: docker
 
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Fingerprint Matcher
3
+ emoji: 🧬
4
  colorFrom: blue
5
+ colorTo: green
6
  sdk: docker
7
+ app_port: 7860
8
  pinned: false
9
  ---
10
 
11
+ # Fingerprint Comparison API
12
+
13
+ This application provides a REST API to compare two fingerprint images (contactless vs. contact-based) using a Siamese Neural Network.
14
+
15
+ ## 🚀 How to use
16
+
17
+ ### API Endpoint
18
+ **POST** `/compare`
19
+
20
+ ### Parameters
21
+ | Key | Type | Description |
22
+ |---|---|---|
23
+ | `img_1` | File | First fingerprint image |
24
+ | `img_2` | File | Second fingerprint image |
25
+ | `type_1` | Text | `contactless` or `contactbased` (Default: contactless) |
26
+ | `type_2` | Text | `contactless` or `contactbased` (Default: contactbased) |
27
+
28
+ ### Example Request (cURL)
29
+ CMD local curl command
30
+ ```bash
31
+ curl -X POST http://127.0.0.1:5000/compare -F "img_1=@C:\\SagarKV\\sol9x\\geekykant\\contactless_2d_fingerprint_images\\second_session\\p1\\p6.bmp" -F "type_1=contactless" -F "img_2=@C:\\SagarKV\\sol9x\\geekykant\\contact-based_fingerprints\\second_session\\1_6.jpg" -F "type_2=contactbased"
32
+ ```
33
+
34
+ ## 🛠 Project Structure
35
+
36
+ * `app.py`: Main Flask application.
37
+ * `enhancer.py`: Contains `basicEnhancing` and `advancedEnhancing` logic.
38
+ * `model/12_120_fp160.h5`: The trained Keras model.
39
+
40
+ ```
41
+
42
+ ---
43
+
44
+ ### 💡 Final Tips for Success
45
+
46
+ 1. **Gunicorn vs. App.run**: In my Dockerfile, I switched the execution to `gunicorn`. It handles multiple requests much better than the development server you have at the bottom of `app.py`.
47
+ 2. **Model Path**: Ensure your model file is located at `./model/12_120_fp160.h5` in your repository.
48
+ 3. **Port 7860**: Hugging Face defaults to `7860`. I've set the Dockerfile and the README to use this to ensure the "Open" button on your Space actually works.
49
+
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import cv2
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ from tensorflow.keras.models import load_model
6
+ import os
7
+ from os.path import join, dirname
8
+
9
+ # Import your custom enhancement functions
10
+ from enhancer import basicEnhancing, advancedEnhancing
11
+
12
+ app = Flask(__name__)
13
+
14
+ # --- Configuration & Model Loading ---
15
+ DIRNAME = dirname(__file__)
16
+ MODEL_PATH = join(DIRNAME, 'model', '12_120_fp160.h5')
17
+
18
+ # Load model once at startup
19
+ try:
20
+ model = load_model(MODEL_PATH, compile=False)
21
+ print(f"Model loaded successfully from: {MODEL_PATH}")
22
+ except Exception as e:
23
+ print(f"Critical Error: Could not load model: {e}")
24
+
25
+ def preprocess_fingerprint(image_bytes, image_type):
26
+ """
27
+ image_type: 'contactless' or 'contactbased'
28
+ """
29
+ # Convert bytes to cv2 image
30
+ nparr = np.frombuffer(image_bytes, np.uint8)
31
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
32
+
33
+ if img is None:
34
+ return None
35
+
36
+ # Apply conditional enhancement logic
37
+ if image_type == 'contactless':
38
+ # Contactless needs both basic and advanced
39
+ enhanced = advancedEnhancing(basicEnhancing(img))
40
+ else:
41
+ # Contact-based needs only advanced
42
+ enhanced = advancedEnhancing(img)
43
+
44
+ # Standardize for the model
45
+ resized = cv2.resize(enhanced, (160, 160))
46
+ # Siamese models usually expect grayscale (1 channel) normalized to [0, 1]
47
+ normalized = resized.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0
48
+ return normalized
49
+
50
+ @app.route('/compare', methods=['POST'])
51
+ def compare_fingerprints():
52
+ # Expecting: img_1 (file), img_2 (file), type_1 (text), type_2 (text)
53
+ if 'img_1' not in request.files or 'img_2' not in request.files:
54
+ return jsonify({"error": "Missing image files"}), 400
55
+
56
+ img_1_file = request.files['img_1'].read()
57
+ img_2_file = request.files['img_2'].read()
58
+
59
+ # Defaults to 'contactless' if not specified
60
+ type_1 = request.form.get('type_1', 'contactless')
61
+ type_2 = request.form.get('type_2', 'contactbased')
62
+
63
+ # 1. Preprocess both images based on their types
64
+ processed_1 = preprocess_fingerprint(img_1_file, type_1)
65
+ processed_2 = preprocess_fingerprint(img_2_file, type_2)
66
+
67
+ if processed_1 is None or processed_2 is None:
68
+ return jsonify({"error": "Failed to process images"}), 400
69
+
70
+ # 2. Perform Inference
71
+ prediction = model.predict([processed_1, processed_2], verbose=0)
72
+ score = float(prediction[0][0])
73
+ accuracy = round(score * 100, 2)
74
+
75
+ # 3. Return JSON response
76
+ return jsonify({
77
+ "score": score,
78
+ "match_percentage": accuracy,
79
+ "is_match": score > 0.80,
80
+ "metadata": {
81
+ "img_1_type": type_1,
82
+ "img_2_type": type_2
83
+ }
84
+ })
85
+
86
+ if __name__ == '__main__':
87
+ # Use threaded=False if your CV2/Tensorflow version has issues with local threads
88
+ app.run(host='0.0.0.0', port=5000, debug=True)
89
+
90
+
91
+
92
+ # import os
93
+ # import threading
94
+ # import cv2
95
+ # import numpy as np
96
+ # from flask import Flask, request, jsonify
97
+ # from tensorflow.keras.models import load_model
98
+
99
+ # from enhancer import basicEnhancing, advancedEnhancing
100
+
101
+ # app = Flask(__name__)
102
+
103
+ # BASE_DIR = os.path.dirname(__file__)
104
+ # MODEL_PATH = os.path.join(BASE_DIR, "model", "12_120_fp160.h5")
105
+
106
+ # # Load once at startup
107
+ # model = load_model(MODEL_PATH, compile=False)
108
+
109
+ # # Guard predict if you run Flask threaded / multiple threads
110
+ # predict_lock = threading.Lock()
111
+
112
+ # def _read_image_from_filestorage(fs):
113
+ # data = fs.read()
114
+ # arr = np.frombuffer(data, dtype=np.uint8)
115
+ # img = cv2.imdecode(arr, cv2.IMREAD_COLOR) # keep color for enhancers if needed
116
+ # return img
117
+
118
+ # def preprocess(img_bgr, img_type: str):
119
+ # if img_bgr is None:
120
+ # raise ValueError("Invalid image (could not decode).")
121
+
122
+ # img_type = (img_type or "").strip().lower()
123
+ # if img_type not in ("contactless", "contactbased"):
124
+ # raise ValueError("type must be 'contactless' or 'contactbased'.")
125
+
126
+ # # Apply your rule:
127
+ # # contactless: basic + advanced
128
+ # # contactbased: advanced only
129
+ # if img_type == "contactless":
130
+ # en = advancedEnhancing(basicEnhancing(img_bgr))
131
+ # else:
132
+ # en = advancedEnhancing(img_bgr)
133
+
134
+ # # Ensure grayscale uint8
135
+ # if en is None:
136
+ # raise ValueError("Enhancer returned None.")
137
+ # if len(en.shape) > 2:
138
+ # en = cv2.cvtColor(en, cv2.COLOR_BGR2GRAY)
139
+ # en = np.array(en, dtype=np.uint8)
140
+
141
+ # # Resize to model input
142
+ # if en.shape != (160, 160):
143
+ # en = cv2.resize(en, (160, 160))
144
+
145
+ # # Normalize to float32, shape (1,160,160,1)
146
+ # x = en.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0
147
+ # return x
148
+
149
+ # def siamese_score(x1, x2):
150
+ # # model.predict may misbehave under multithreading in some setups,
151
+ # # so keep a lock unless you control workers/threads explicitly.
152
+ # with predict_lock:
153
+ # pred = model.predict([x1, x2], verbose=0)
154
+ # return float(pred[0][0])
155
+
156
+ # @app.route("/api/match", methods=["POST"])
157
+ # def match():
158
+ # if "img1" not in request.files or "img2" not in request.files:
159
+ # return jsonify({"error": "Both img1 and img2 are required."}), 400
160
+
161
+ # img1_type = request.form.get("type1", "")
162
+ # img2_type = request.form.get("type2", "")
163
+
164
+ # try:
165
+ # img1 = _read_image_from_filestorage(request.files["img1"])
166
+ # img2 = _read_image_from_filestorage(request.files["img2"])
167
+
168
+ # x1 = preprocess(img1, img1_type)
169
+ # x2 = preprocess(img2, img2_type)
170
+
171
+ # score = siamese_score(x1, x2)
172
+ # return jsonify({
173
+ # "score": score,
174
+ # "score_percent": round(score * 100.0, 5),
175
+ # "type1": img1_type,
176
+ # "type2": img2_type
177
+ # })
178
+ # except Exception as e:
179
+ # return jsonify({"error": str(e)}), 400
180
+
181
+ # if __name__ == "__main__":
182
+ # app.run(host="0.0.0.0", port=5000, debug=True)
enhancer.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+
5
+ # # Fix for older libraries using deprecated numpy aliases
6
+ # float = float
7
+ # int = int
8
+
9
+ from FingerprintImageEnhancer import FingerprintImageEnhancer
10
+
11
+ def basicEnhancing(img):
12
+ if len(img.shape) == 3:
13
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
14
+ ret, img = cv2.threshold(img, 140, 255, cv2.THRESH_TOZERO)
15
+ img = cv2.medianBlur(img, 5)
16
+ th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
17
+ cv2.THRESH_BINARY, 43, 1)
18
+ im = cv2.normalize(th3, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
19
+ res, im = cv2.threshold(im, 64, 255, cv2.THRESH_BINARY)
20
+ cv2.floodFill(im, None, (0,0), 0)
21
+ return im
22
+
23
+ def advancedEnhancing(img):
24
+ if len(img.shape) == 3:
25
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
26
+ image_enhancer = FingerprintImageEnhancer()
27
+ out = image_enhancer.enhance(img)
28
+ # Convert normalized output back to 0-255 uint8 format
29
+ out_image = (255 - (255 * out)).astype(np.uint8)
30
+ return out_image
31
+
32
+
33
+ if __name__ == '__main__':
34
+ # --- Execution Logic ---
35
+ image_path = r"C:\Users\SAGAR KESHAVE\Downloads\fore_tip.jpeg"
36
+ # image_path = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\first_session\p247\p1.bmp"
37
+ # image_path = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\13_3.jpg"
38
+ original = cv2.imread(image_path)
39
+
40
+ if original is None:
41
+ print(f"Error: Image not found at {image_path}")
42
+ else:
43
+ print("Processing... Please wait (Advanced enhancement takes time).")
44
+
45
+ # Run Enhancements
46
+ basic_res = basicEnhancing(original)
47
+ advanced_res = advancedEnhancing(basic_res)
48
+
49
+ # 1. Save individual files
50
+ cv2.imwrite("result_basic.jpg", basic_res)
51
+ cv2.imwrite("result_advanced.jpg", advanced_res)
52
+ print("Individual results saved: result_basic.jpg and result_advanced.jpg")
53
+
54
+ # 2. Create and save a Comparison Strip
55
+ h, w = original.shape[:2]
56
+ # Resize for the comparison strip (making them uniform height)
57
+ target_h = 800
58
+ scale = target_h / h
59
+ target_w = int(w * scale)
60
+
61
+ res_orig = cv2.resize(original, (target_w, target_h))
62
+ res_basic = cv2.resize(cv2.cvtColor(basic_res, cv2.COLOR_GRAY2BGR), (target_w, target_h))
63
+ res_adv = cv2.resize(cv2.cvtColor(advanced_res, cv2.COLOR_GRAY2BGR), (target_w, target_h))
64
+
65
+ comparison = np.hstack([res_orig, res_basic, res_adv])
66
+
67
+ output_path = "comparison_output.jpg"
68
+ cv2.imwrite(output_path, comparison)
69
+
70
+ print(f"Comparison strip saved successfully as: {os.path.abspath(output_path)}")
71
+
72
+
73
+
74
+
75
+
76
+
main.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import predictor
3
+ import enhancer
4
+ import utils
5
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
6
+ from flask import Flask, render_template, request, redirect, url_for, Response
7
+ import cv2, jsonpickle, numpy as np
8
+
9
+ # start flask
10
+ app = Flask(__name__, template_folder='templates')
11
+ app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 #16 MB
12
+
13
+ # render default webpage
14
+ @app.route('/')
15
+ def home():
16
+ stored_fp = utils.getAllImagesFromDatabase()
17
+ return render_template('database.html', fps=stored_fp, detection_page=False, title="Fingerprint Prediction")
18
+
19
+ # render default analyze page
20
+ @app.route('/analyze')
21
+ def analyze():
22
+ return render_template('analyze.html')
23
+
24
+ # when the post method detect, then redirect to success function
25
+ @app.route('/two_image_prediction', methods=['POST', 'GET'])
26
+ def get_data():
27
+ if request.method == 'POST':
28
+ fp1 = request.files['fp1'].read()
29
+ fp2 = request.files['fp2'].read()
30
+
31
+ fp1img = np.frombuffer(fp1,np.uint8)
32
+ fp2img = np.frombuffer(fp2,np.uint8)
33
+
34
+ fp1img = cv2.imdecode(fp1img,cv2.IMREAD_GRAYSCALE)
35
+ fp2img = cv2.imdecode(fp2img,cv2.IMREAD_GRAYSCALE)
36
+
37
+ #resize image into 160 x 160 if not.
38
+ if fp1img.shape != (160, 160):
39
+ fp1img = cv2.resize(fp1img, (160, 160))
40
+ if fp2img.shape != (160, 160):
41
+ fp2img = cv2.resize(fp2img, (160, 160))
42
+
43
+ prediction_result = -1
44
+
45
+ try:
46
+ prediction = predictor.two_image_prediction(fp1img, fp2img) * 100
47
+ prediction_result = float("%.5f" %prediction)
48
+ except Exception as e:
49
+ print(e)
50
+ return Response(response={'status': 'Predction Error'}, status=503, mimetype="application/json")
51
+
52
+ response = {'accuracy': prediction_result, 'status': 'up & running'}
53
+ response_pickled = jsonpickle.encode(response)
54
+ return Response(response=response_pickled, status=200, mimetype="application/json")
55
+
56
+ # render database webpage displaying all stored fingerprints
57
+ @app.route('/database')
58
+ def database_home():
59
+ stored_fp = utils.getAllImagesFromDatabase()
60
+ return render_template('database.html', fps=stored_fp, detection_page=True, title="Fingerprint Database")
61
+
62
+ # when the post method detect, then redirect to success function
63
+ @app.route('/upload_to_db', methods=['POST'])
64
+ def store_fingerprint():
65
+ if request.method == 'POST':
66
+ default_label = 'not_labelled'
67
+ label = request.form.get('fp_label', default_label)
68
+ fp = request.files['fp1'].read()
69
+
70
+ fpimg = np.frombuffer(fp, np.uint8)
71
+ fpimg = cv2.imdecode(fpimg, cv2.IMREAD_GRAYSCALE)
72
+
73
+ enhanced_image = enhancer.basicEnhancing(fpimg)
74
+ enhanced_image = enhancer.advancedEnhancing(enhanced_image)
75
+ utils.saveImageToDatabase(label, enhanced_image)
76
+
77
+ response = {'status': 'Saved successfully!'}
78
+ response_pickled = jsonpickle.encode(response)
79
+ return Response(response=response_pickled, status=200, mimetype="application/json")
80
+
81
+ @app.route('/predict_with_db', methods=['POST'])
82
+ def predictWithDb():
83
+ if request.method == 'POST':
84
+ fp = request.files['fp1'].read()
85
+
86
+ fpimg = np.frombuffer(fp, np.uint8)
87
+ fpimg = cv2.imdecode(fpimg, cv2.IMREAD_GRAYSCALE)
88
+
89
+ out1 = enhancer.basicEnhancing(fpimg)
90
+ main_img = enhancer.advancedEnhancing(out1)
91
+ all_db_imgs = utils.getAllImagesFromDatabase()
92
+
93
+ pred_result = None
94
+ try:
95
+ pred_result = predictor.getPredictionDb(main_img, all_db_imgs)
96
+ pred_result['best_pred']['accuracy'] = float("%.5f" %(pred_result['best_pred']['accuracy']))
97
+ except Exception as e:
98
+ print(e)
99
+ return Response(response={'status': 'Predction Problem! Check image size, maybe.'}, status=503, mimetype="application/json")
100
+
101
+ response_pickled = jsonpickle.encode(pred_result)
102
+ return Response(response=response_pickled, status=200, mimetype="application/json")
103
+
104
+ #get all stored fingerprints database -> json
105
+ @app.route('/get_db')
106
+ def getDb():
107
+ response = {'status': 'successful'}
108
+ response['data'] = utils.getAllImagesFromDatabase()
109
+ response_pickled = jsonpickle.encode(response)
110
+ return Response(response=response_pickled, status=200, mimetype="application/json")
111
+
112
+ @app.route('/test')
113
+ def test():
114
+ result = ""
115
+ for i in range(1, 5):
116
+ img1 = cv2.imread(f'{i}.png', cv2.IMREAD_GRAYSCALE)
117
+ for j in range(1,5):
118
+ img2 = cv2.imread(f'{j}.png', cv2.IMREAD_GRAYSCALE)
119
+ result += f"{i}-{j} --> {predictor.two_image_prediction(img1, img2) * 100} \n"
120
+ result += "\n"
121
+ print(result)
122
+ return Response(response=result, status=200, mimetype="text/plain")
123
+
124
+ @app.errorhandler(413)
125
+ def request_entity_too_large(error):
126
+ return 'File Too Large (Must be less than 2MB)', 413
127
+
128
+ @app.errorhandler(503)
129
+ def request_model_not_responding(error):
130
+ return 'Model is Not responding', 503
131
+
132
+
133
+ if __name__ == "__main__":
134
+
135
+ # app.run(debug=True, host= '0.0.0.0')
136
+ app.run(debug=True)
model/12_120_fp160.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34381b3058914524b704359627deea251d7b99f86ac5610ca741e388c4d8bd48
3
+ size 53283376
model/12_120_fp160.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c4c89a125f38abe84a7bb4c5ef1c4623c98b8b77bca6a6d2259e17d4e9dae4b
3
+ size 53293574
model/12_120_fp160_feature.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512b61866a32e9f60acdc5018f0f12a7156432aa6b4168dddd2c5f07a4e304bb
3
+ size 394056
model/12_120_fp160_feature.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:facab1bf3f4ff81925dab7c8df0ebb57fc2f5eaad178e05244222ccb19a72112
3
+ size 399335
model/model_checkpoint.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd0e37a6ac1eb3890335895b9884b28c86d73d754fd89c98b4480fc4278e63a7
3
+ size 53294023
predictor.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import load_model
5
+ from os.path import join, dirname, exists
6
+ import os
7
+
8
+ # Set up paths relative to this script
9
+ dirname_val = dirname(__file__)
10
+ # Points to your .h5 model
11
+ model_path = join(dirname_val, 'model', '12_120_fp160.h5')
12
+
13
+ # Load the Keras model
14
+ try:
15
+ # compile=False is used because we only need the model for prediction (inference)
16
+ model = load_model(model_path, compile=False)
17
+ print(f"Model loaded successfully from: {model_path}")
18
+ except Exception as e:
19
+ print(f"Error loading model: {e}")
20
+
21
+ # Contactless images testing (The Siamese Comparison)
22
+ def two_image_prediction(np_img1, np_img2):
23
+
24
+ # Ensure images are 160x160 and float32 normalized
25
+ input1_img = np_img1.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0
26
+ input2_img = np_img2.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0
27
+
28
+ # Keras prediction for Siamese (two inputs)
29
+ # verbose=0 keeps the console clean during the database loop
30
+ pred_right = model.predict([input1_img, input2_img], verbose=0)
31
+
32
+ # Optional: Save images for visual debugging
33
+ db_path = join(dirname_val, 'static', 'test_preds')
34
+ if not exists(db_path):
35
+ os.makedirs(db_path)
36
+
37
+ cv2.imwrite(join(db_path, 'last_query.jpg'), np_img1)
38
+ cv2.imwrite(join(db_path, 'last_gallery.jpg'), np_img2)
39
+
40
+ # Return the similarity score (usually index [0][0])
41
+ return pred_right[0][0]
42
+
43
+ # Testing with all other database images
44
+ def getPredictionDb(main_img, all_db_imgs):
45
+ best_pred = 0.00
46
+ best_html_id = "db_null"
47
+ matched_person = "null"
48
+ all_preds = []
49
+
50
+ # Ensure query image is grayscale and correct type
51
+ if len(main_img.shape) > 2:
52
+ main_img = cv2.cvtColor(main_img, cv2.COLOR_BGR2GRAY)
53
+
54
+ main_img = np.array(main_img, dtype='uint8')
55
+
56
+ # Resize to 160x160 if not already
57
+ if main_img.shape != (160, 160):
58
+ main_img = cv2.resize(main_img, (160, 160))
59
+
60
+ # Loop through the database provided by utils.getAllImagesFromDatabase()
61
+ for file in all_db_imgs:
62
+ # Construct path: remove leading '/' if present to join correctly
63
+ clean_url = file['url'].lstrip('/')
64
+ file_url = join(dirname_val, clean_url)
65
+
66
+ db_img = cv2.imread(file_url, cv2.IMREAD_GRAYSCALE)
67
+ if db_img is None:
68
+ continue
69
+
70
+ if db_img.shape != (160, 160):
71
+ db_img = cv2.resize(db_img, (160, 160))
72
+
73
+ # Perform the 1-to-1 match
74
+ score = float(two_image_prediction(main_img, db_img))
75
+
76
+ # Rounding for clean JSON response
77
+ pred_accuracy = float("%.5f" % (score * 100))
78
+
79
+ all_preds.append({
80
+ "html_id": f"db_{file['label']}",
81
+ "accuracy": pred_accuracy
82
+ })
83
+
84
+ # Update best match if this score is higher
85
+ if score > best_pred:
86
+ best_pred = score
87
+ matched_person = file['label']
88
+ best_html_id = f"db_{matched_person}"
89
+
90
+ # Structure the final result
91
+ result = {
92
+ "all_preds": all_preds,
93
+ "best_pred": {
94
+ "html_id": best_html_id,
95
+ "accuracy": float("%.5f" % (best_pred * 100)),
96
+ "matched_person": matched_person
97
+ }
98
+ }
99
+ return result
100
+
101
+
102
+ # if __name__ == "__main__":
103
+ # print("\n--- Starting Real Image Predictor Test ---")
104
+
105
+ # # 1. Update these paths to your real fingerprint images
106
+ # # Example: one of your extracted fingertips or an original image
107
+ # path_image_1 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p1\p4.bmp"
108
+ # path_image_2 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p240\p1.bmp"
109
+ # # Use same for 100%, different for comparison
110
+
111
+ # # Load and Preprocess
112
+ # img1 = cv2.imread(path_image_1, cv2.IMREAD_GRAYSCALE)
113
+ # img2 = cv2.imread(path_image_2, cv2.IMREAD_GRAYSCALE)
114
+
115
+ # if img1 is None or img2 is None:
116
+ # print("Error: One of the images could not be loaded. Check your paths!")
117
+ # else:
118
+ # # Resize to 160x160 as required by your model
119
+ # img1_res = cv2.resize(img1, (160, 160))
120
+ # img2_res = cv2.resize(img2, (160, 160))
121
+
122
+ # # 2. Test 1-to-1 Prediction
123
+ # print(f"Comparing: {os.path.basename(path_image_1)} vs {os.path.basename(path_image_2)}")
124
+ # try:
125
+ # score = two_image_prediction(img1_res, img2_res)
126
+ # print(f"Match Score (Raw): {score:.6f}")
127
+ # print(f"Match Confidence: {score * 100:.2f}%")
128
+
129
+ # if score > 0.85: # Typical threshold for Siamese networks
130
+ # print("Result: MATCH FOUND!")
131
+ # else:
132
+ # print("Result: NO MATCH.")
133
+ # except Exception as e:
134
+ # print(f"Error during 1-to-1 test: {e}")
135
+
136
+ # # 3. Test Database Search using your actual DB folder
137
+ # print("\nTesting getPredictionDb with existing static database...")
138
+ # from utils import getAllImagesFromDatabase
139
+
140
+ # try:
141
+ # real_db = getAllImagesFromDatabase()
142
+ # if not real_db:
143
+ # print("Database is empty. Upload some images via the web app first.")
144
+ # else:
145
+ # db_results = getPredictionDb(img1_res, real_db)
146
+ # print(f"Database Search Result:")
147
+ # print(f" Best Match Found: {db_results['best_pred']['matched_person']}")
148
+ # print(f" Accuracy: {db_results['best_pred']['accuracy']}%")
149
+ # except Exception as e:
150
+ # print(f"Error during DB search test: {e}")
151
+
152
+ # print("\n--- Real Test Complete ---")
153
+
154
+ if __name__ == "__main__":
155
+ from enhancer import basicEnhancing, advancedEnhancing
156
+
157
+ print("\n--- Starting Enhanced Image Predictor Test ---")
158
+
159
+ path_image_1 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\1_1.jpg"
160
+ path_image_2 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\1_4.jpg"
161
+
162
+ # # # Matched - 80%
163
+ # path_image_1 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p240\p6.bmp"
164
+ # path_image_2 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\240_6.jpg"
165
+
166
+ # 1. Load images
167
+ img1 = cv2.imread(path_image_1)
168
+ img2 = cv2.imread(path_image_2)
169
+
170
+ if img1 is None or img2 is None:
171
+ print("Error loading images.")
172
+ else:
173
+ print("Enhancing images to remove background bias...")
174
+ # 2. Process images EXACTLY like your web app does
175
+ # Basic + Advanced (Gabor filters) makes ridges the only visible feature
176
+ en1 = advancedEnhancing(basicEnhancing(img1))
177
+
178
+ en2 = advancedEnhancing(basicEnhancing(img2))
179
+ # en2 = advancedEnhancing(img2) # # IndexError: index 0 is out of bounds for axis 0 with size 0
180
+
181
+ # 3. Resize the CLEANED images
182
+ img1_res = cv2.resize(en1, (160, 160))
183
+
184
+ img2_res = cv2.resize(en2, (160, 160))
185
+
186
+ # 4. Predict
187
+ score = two_image_prediction(img1_res, img2_res)
188
+
189
+ print(f"\nComparing: {os.path.basename(path_image_1)} vs {os.path.basename(path_image_2)}")
190
+ print(f"Match Score: {score * 100:.2f}%")
191
+
192
+ if score > 0.80: # Higher threshold for enhanced images
193
+ print("Result: MATCH FOUND!")
194
+ else:
195
+ print("Result: NO MATCH")
196
+
197
+
198
+ # # Both advance test
199
+ # from tqdm import tqdm
200
+ # import json
201
+ # from enhancer import basicEnhancing, advancedEnhancing
202
+ # import time
203
+ # # --- CONFIGURATION ---
204
+ # CONTACTLESS_BASE = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session"
205
+ # CONTACT_BASE = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session"
206
+ # THRESHOLD = 0.70
207
+
208
+ # matched_pairs = []
209
+ # total_tested = 0
210
+
211
+ # print(f"\n--- Starting Batch Test: Contactless vs Contact-Based ---")
212
+ # total_start = time.time()
213
+ # # Loop through the 240 subjects
214
+ # for p_id in tqdm(range(61, 121), desc="Processing IDs"):
215
+ # for c_id in range(1, 7):
216
+ # # Your pattern: contactless/p{id}/p6.bmp vs contact/{id}_6.jpg
217
+ # path1 = join(CONTACTLESS_BASE, f"p{p_id}", f"p{c_id}.bmp")
218
+ # path2 = join(CONTACT_BASE, f"{p_id}_{c_id}.jpg")
219
+
220
+ # if not exists(path1) or not exists(path2):
221
+ # # Some IDs might be missing in real datasets
222
+ # continue
223
+
224
+
225
+ # # print(f"[{total_tested}/240] Processing ID: {p_id}...", end="\r")
226
+
227
+ # try:
228
+ # # 1. Load
229
+ # img1 = cv2.imread(path1)
230
+ # img2 = cv2.imread(path2)
231
+
232
+ # # 2. Enhance (Wrapped in enhancer logic to avoid crashes)
233
+ # en1 = advancedEnhancing(basicEnhancing(img1))
234
+ # en2 = advancedEnhancing(img2)
235
+
236
+ # # 3. Resize & Predict
237
+ # img1_res = cv2.resize(en1, (160, 160))
238
+ # img2_res = cv2.resize(en2, (160, 160))
239
+
240
+ # score = float(two_image_prediction(img1_res, img2_res))
241
+ # accuracy = score * 100
242
+ # total_tested += 1
243
+
244
+ # if score > THRESHOLD:
245
+ # match_data = {
246
+ # "id": p_id,
247
+ # "c_id" : c_id,
248
+ # "contactless_path": path1,
249
+ # "contact_path": path2,
250
+ # "accuracy": f"{accuracy:.2f}%",
251
+ # "status": "MATCH FOUND"
252
+ # }
253
+ # matched_pairs.append(match_data)
254
+ # tqdm.write(f"P_ID {p_id} C_ID {c_id}: MATCH FOUND ({accuracy:.2f}%)")
255
+
256
+ # except Exception as e:
257
+ # # print(f"\nError processing ID {p_id}: {e}")
258
+ # tqdm.write(f"Error processing p ID {p_id} c ID {c_id} : {e}")
259
+
260
+ # # Prepare the final data structure
261
+ # results_to_save = {
262
+ # "summary": f"Found {len(matched_pairs)} out of {total_tested} pairs.",
263
+ # "total_tested": total_tested,
264
+ # "total_matched": len(matched_pairs),
265
+ # "results": matched_pairs # Your list of match dictionaries
266
+ # }
267
+
268
+ # # Save individual JSON
269
+ # output_file = "61_120_Both_vs_advance.json"
270
+ # with open(output_file, 'w') as f:
271
+ # json.dump(results_to_save, f, indent=4)
272
+
273
+ # print(f"\n--- Total Time: {time.time() - total_start:.2f}s ---")
274
+ # print(f"Finished. Found {len(matched_pairs)} out of {total_tested} matches.")
275
+
276
+ # print(f"\n--- Batch Test Complete ---")
277
+ # print(f"Total Tested: {total_tested}")
278
+ # print(f"Total Matches Found: {len(matched_pairs)}")
279
+ # print(f"Results saved to: {os.path.abspath(output_file)}")
280
+
281
+
282
+
283
+
284
+
285
+
286
+
287
+ # "-----------------------------------------------------------------------------------"
288
+
289
+ # import os
290
+ # import cv2
291
+ # import json
292
+ # from os.path import join, exists
293
+ # from enhancer import basicEnhancing, advancedEnhancing
294
+
295
+ # # --- CONFIGURATION ---
296
+ # CONTACTLESS_BASE = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session"
297
+ # CONTACT_BASE = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session"
298
+ # THRESHOLD = 0.70
299
+
300
+ # # Define the enhancement "pipelines"
301
+ # strategies = {
302
+ # "Basic": lambda x: basicEnhancing(x),
303
+ # "Advanced": lambda x: advancedEnhancing(x),
304
+ # "Both": lambda x: advancedEnhancing(basicEnhancing(x))
305
+ # }
306
+
307
+ # def run_test_combination(name1, func1, name2, func2):
308
+ # matched_pairs = []
309
+ # total_tested = 0
310
+ # filename = f"results_{name1}_vs_{name2}.json"
311
+
312
+ # print(f"\n--- Testing: Img1({name1}) vs Img2({name2}) ---")
313
+
314
+ # for p_id in range(1, 241):
315
+ # for c_id in range(1,7):
316
+ # print(f"\n--- Processing ID: ({p_id}) ({c_id})---")
317
+ # path1 = join(CONTACTLESS_BASE, f"p{p_id}", f"p{c_id}.bmp")
318
+ # path2 = join(CONTACT_BASE, f"{p_id}_{c_id}.jpg")
319
+
320
+ # if not exists(path1) or not exists(path2):
321
+ # continue
322
+
323
+ # try:
324
+ # img1 = cv2.imread(path1)
325
+ # img2 = cv2.imread(path2)
326
+
327
+ # # Apply the selected strategy
328
+ # en1 = func1(img1)
329
+ # en2 = func2(img2)
330
+
331
+ # # Resize & Predict
332
+ # img1_res = cv2.resize(en1, (160, 160))
333
+ # img2_res = cv2.resize(en2, (160, 160))
334
+
335
+ # # Assuming two_image_prediction is defined globally
336
+ # score = float(two_image_prediction(img1_res, img2_res))
337
+ # accuracy = score * 100
338
+
339
+ # if score > THRESHOLD:
340
+ # matched_pairs.append({
341
+ # "id": p_id,
342
+ # "contactless_path" : path1,
343
+ # "contact_based_path" : path2,
344
+ # "accuracy": f"{accuracy:.2f}%",
345
+ # "status": "MATCH FOUND"
346
+ # })
347
+ # total_tested += 1
348
+
349
+ # except Exception as e:
350
+ # print(f"\nError processing ID {p_id}: {e}")
351
+ # # pass # Silent fail for speed, or print(e) for debugging
352
+
353
+ # # Save individual JSON
354
+ # with open(filename, 'w') as f:
355
+ # json.dump(matched_pairs, f, indent=4)
356
+
357
+ # print(f"Finished. Found {len(matched_pairs)} out of {len(total_tested)} matches. Saved to {filename}")
358
+
359
+ # # --- EXECUTION LOOP ---
360
+ # # This will run all 16 combinations automatically
361
+ # for name1, func1 in strategies.items():
362
+ # for name2, func2 in strategies.items():
363
+ # filename = f"results_{name1}_vs_{name2}.json"
364
+ # if os.path.exists(filename):
365
+ # print(f"Skipping {filename} bcz exists")
366
+ # continue
367
+ # run_test_combination(name1, func1, name2, func2)
requirements.txt ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ albucore==0.0.24
3
+ albumentations==2.0.8
4
+ annotated-types==0.7.0
5
+ astunparse==1.6.3
6
+ blinker==1.9.0
7
+ certifi==2026.1.4
8
+ charset-normalizer==3.4.4
9
+ click==8.3.1
10
+ colorama==0.4.6
11
+ fingerprint-feature-extractor==0.0.10
12
+ fingerprint_enhancer==0.0.14
13
+ Flask==3.1.2
14
+ flatbuffers==25.12.19
15
+ gast==0.7.0
16
+ glob2==0.7
17
+ google-pasta==0.2.0
18
+ grpcio==1.76.0
19
+ gunicorn==24.0.0
20
+ h5py==3.15.1
21
+ idna==3.11
22
+ ImageIO==2.37.2
23
+ itsdangerous==2.2.0
24
+ Jinja2==3.1.6
25
+ jsonpickle==4.1.1
26
+ keras==3.13.1
27
+ lazy_loader==0.4
28
+ libclang==18.1.1
29
+ Markdown==3.10.1
30
+ markdown-it-py==4.0.0
31
+ MarkupSafe==3.0.3
32
+ mdurl==0.1.2
33
+ ml_dtypes==0.5.4
34
+ namex==0.1.0
35
+ networkx==3.6.1
36
+ numpy==2.4.1
37
+ opencv-python==4.13.0.90
38
+ opencv-python-headless==4.13.0.90
39
+ opt_einsum==3.4.0
40
+ optree==0.18.0
41
+ packaging==26.0
42
+ pillow==12.1.0
43
+ protobuf==6.33.4
44
+ pydantic==2.12.5
45
+ pydantic_core==2.41.5
46
+ Pygments==2.19.2
47
+ PyYAML==6.0.3
48
+ requests==2.32.5
49
+ rich==14.2.0
50
+ scikit-image==0.26.0
51
+ scipy==1.17.0
52
+ simsimd==6.5.12
53
+ six==1.17.0
54
+ stringzilla==4.6.0
55
+ tensorboard==2.20.0
56
+ tensorboard-data-server==0.7.2
57
+ tensorflow==2.20.0
58
+ termcolor==3.3.0
59
+ tifffile==2026.1.14
60
+ tqdm==4.67.1
61
+ typing-inspection==0.4.2
62
+ typing_extensions==4.15.0
63
+ urllib3==2.6.3
64
+ Werkzeug==3.1.5
65
+ wrapt==2.0.1
static/css/main.css ADDED
File without changes
static/db_fingerprints/test.jpg ADDED
static/js/main.js ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ $(function() {
2
+ $(document).ready(function() {
3
+ const crop_format = {
4
+ enableExif: true,
5
+ viewport: {
6
+ width: 300,
7
+ height: 300,
8
+ type: 'circle' //circle
9
+ },
10
+ boundary: {
11
+ width: 300,
12
+ height: 300
13
+ }
14
+ };
15
+
16
+ $image_crop1 = $('#f1').croppie(crop_format);
17
+ $image_crop2 = $('#f2').croppie(crop_format);
18
+
19
+ $('#fp1_upload').click(() => {
20
+ $('#fp1').click();
21
+ });
22
+ $('#fp2_upload').click(() => {
23
+ $('#fp2').click();
24
+ });
25
+
26
+ let sample_check1 = false,
27
+ sample_check2 = false
28
+
29
+ $('#fp1_sample').click(() => {
30
+ $image_crop1.croppie('bind', {
31
+ url: '/static/img/sample_fingerprint.jpg',
32
+ zoom: 0
33
+ }).then(function() {
34
+ console.log('sample finger 1 added');
35
+ sample_check1 = true
36
+ });
37
+ });
38
+
39
+ $('#fp2_sample').click(() => {
40
+ $image_crop2.croppie('bind', {
41
+ url: '/static/img/sample_fingerprint.jpg',
42
+ zoom: 0
43
+ }).then(function() {
44
+ console.log('sample finger 2 added');
45
+ sample_check2 = true
46
+ });
47
+ });
48
+
49
+ $('#fp1').on('change', function() {
50
+ var reader = new FileReader();
51
+ reader.onload = function(event) {
52
+ $image_crop1.croppie('bind', {
53
+ url: event.target.result,
54
+ zoom: 0
55
+ }).then(function() {
56
+ console.log('jQuery bind 1 complete');
57
+ });
58
+ }
59
+ reader.readAsDataURL(this.files[0]);
60
+ });
61
+
62
+ $('#fp2').on('change', function() {
63
+ var reader = new FileReader();
64
+ reader.onload = function(event) {
65
+ $image_crop2.croppie('bind', {
66
+ url: event.target.result,
67
+ zoom: 0
68
+ }).then(function() {
69
+ console.log('jQuery bind 2 complete');
70
+ });
71
+ }
72
+ reader.readAsDataURL(this.files[0]);
73
+ });
74
+
75
+ function makeblob(dataURL) {
76
+ const BASE64_MARKER = ';base64,';
77
+ const parts = dataURL.split(BASE64_MARKER);
78
+ const contentType = parts[0].split(':')[1];
79
+ const raw = window.atob(parts[1]);
80
+ const rawLength = raw.length;
81
+ const uInt8Array = new Uint8Array(rawLength);
82
+
83
+ for (let i = 0; i < rawLength; ++i) {
84
+ uInt8Array[i] = raw.charCodeAt(i);
85
+ }
86
+
87
+ return new Blob([uInt8Array], {type: contentType});
88
+ }
89
+
90
+ $("#predict_submit").click(e => {
91
+ e.preventDefault();
92
+ sendFileToPredictor();
93
+ });
94
+
95
+ const validateFiles = () => {
96
+ return $('#fp1').prop('files').length != 0 && $('#fp2').prop('files').length != 0
97
+ }
98
+
99
+ const validateSingleFile = () => {
100
+ return $('#fp1').prop('files').length != 0
101
+ }
102
+
103
+ const validateSampleFiles = () => {
104
+ return sample_check1 && sample_check2
105
+ }
106
+
107
+ function sendFileToPredictor() {
108
+ if (!validateFiles() && !validateSampleFiles()) {
109
+ alert("You haven't inputted 2 images!");
110
+ return;
111
+ }
112
+
113
+ var formData = new FormData();
114
+ // const fp1 = $('#fp1').prop('files')[0];
115
+ // const fp2 = $('#fp2').prop('files')[0];
116
+ // formData.append('fp1', fp1, fp1.name);
117
+ // formData.append('fp2', fp2, fp2.name);
118
+
119
+ var fp1_cropped,
120
+ fp2_cropped;
121
+
122
+ $image_crop1.croppie('result', {
123
+ type: 'canvas',
124
+ size: 'viewport'
125
+ }).then((crop_img1) => {
126
+ fp1_cropped = makeblob(crop_img1);
127
+ $image_crop2.croppie('result', {
128
+ type: 'canvas',
129
+ size: 'viewport'
130
+ }).then((crop_img2) => {
131
+ fp2_cropped = makeblob(crop_img2);
132
+ }).then(() => {
133
+ formData.append('fp1', fp1_cropped);
134
+ formData.append('fp2', fp2_cropped);
135
+
136
+ $.ajax({
137
+ type: "POST",
138
+ url: "/two_image_prediction",
139
+ data: formData,
140
+ processData: false,
141
+ contentType: false,
142
+ success: function(result) {
143
+ // console.log(result);
144
+ const accuracy = result['accuracy'];
145
+ if (accuracy > 80) {
146
+ $('#pred_emoji').attr('x-text', "`👍`");
147
+ } else {
148
+ $('#pred_emoji').attr('x-text', "`❌`");
149
+ }
150
+
151
+ $('#accuracy_text').text(`${accuracy}%`);
152
+ $("#accuracy_circle").attr("x-data", `{ circumference: 50 * 2 * Math.PI, percent: ${Math.round(accuracy)} }`);
153
+ },
154
+ error: function(err) {
155
+ alert(err);
156
+ console.log(err);
157
+ }
158
+ });
159
+ })
160
+ });
161
+ }
162
+
163
+ const hideProgresss = () => $('#progress').addClass("hidden");
164
+ const showProgresss = () => $('#progress').removeClass("hidden");
165
+
166
+ //sending new fingerprint to database
167
+ $("#upload_to_db").click(e => {
168
+ e.preventDefault();
169
+ sendFingerprintToDatabase();
170
+ });
171
+
172
+ function sendFingerprintToDatabase() {
173
+ if (!validateSingleFile()) {
174
+ alert(`Fingerprint image not uploaded!`);
175
+ return;
176
+ }
177
+
178
+ if (!$('#upload_label').val()) {
179
+ alert(`Fingerprint Label can't be empty!`);
180
+ return;
181
+ }
182
+
183
+ var formData = new FormData();
184
+ const fp1 = $('#fp1').prop('files')[0];
185
+ const label = $('#upload_label').val().trim();
186
+ formData.append('fp1', fp1, fp1.name);
187
+ formData.append('fp_label', label);
188
+
189
+ showProgresss();
190
+ $.ajax({
191
+ type: "POST",
192
+ url: "/upload_to_db",
193
+ data: formData,
194
+ processData: false,
195
+ contentType: false,
196
+ success: function(result) {
197
+ // console.log(result);
198
+ // window.location = '/database'
199
+ location.reload(true);
200
+ },
201
+ error: function(err) {
202
+ alert(err);
203
+ console.log(err);
204
+ hideProgresss();
205
+ }
206
+ });
207
+ }
208
+
209
+ //sending new fingerprint to database
210
+ $("#predict_with_db").click(e => {
211
+ e.preventDefault();
212
+ predictWithDb();
213
+ });
214
+
215
+ function predictWithDb() {
216
+ if (!validateSingleFile()) {
217
+ alert(`Fingerprint image not uploaded!`);
218
+ return;
219
+ }
220
+
221
+ var formData = new FormData();
222
+ const fp1 = $('#fp1').prop('files')[0];
223
+ formData.append('fp1', fp1, fp1.name);
224
+
225
+ showProgresss();
226
+ $.ajax({
227
+ type: "POST",
228
+ url: "/predict_with_db",
229
+ data: formData,
230
+ processData: false,
231
+ contentType: false,
232
+ success: function(result) {
233
+ console.log(result);
234
+ let {all_preds, best_pred} = result;
235
+ all_preds.forEach( (pred) => {
236
+ $(`#${pred['html_id']}_acc`).text('(' + pred['accuracy'].toFixed(3) + ')').removeClass('text-green-600').addClass('text-red-600');
237
+ });
238
+ $(`#${best_pred['html_id']}_acc`).removeClass('text-red-600').addClass('text-green-600');
239
+ hideProgresss();
240
+ },
241
+ error: function(err) {
242
+ alert(err);
243
+ console.log(err);
244
+ hideProgresss();
245
+ }
246
+ });
247
+ }
248
+
249
+ });
250
+ });
static/logo.png ADDED
static/test_preds/last_gallery.jpg ADDED
static/test_preds/last_query.jpg ADDED
templates/analyze.html ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <meta http-equiv="X-UA-Compatible" content="ie=edge">
8
+ <meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
9
+ <meta http-equiv="Pragma" content="no-cache" />
10
+ <meta http-equiv="Expires" content="0" />
11
+ <title>Fingerprint DeepLearning</title>
12
+
13
+ <link rel="icon" type="image/png" href="{{ url_for('static', filename='logo.png')}}" />
14
+
15
+ <link rel="stylesheet" href="{{ url_for('static', filename='css/main.css')}}">
16
+ <link href="https://unpkg.com/tailwindcss@^2/dist/tailwind.min.css" rel="stylesheet">
17
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/croppie/2.6.5/croppie.css" rel="stylesheet">
18
+ </head>
19
+
20
+ <body>
21
+
22
+ <!-- This example requires Tailwind CSS v2.0+ -->
23
+ <div class="bg-indigo-600">
24
+ <div class="max-w-7xl mx-auto py-3 px-3 sm:px-6 lg:px-8">
25
+ <div class="flex items-center justify-between flex-wrap">
26
+ <div class="w-0 flex-1 flex items-center">
27
+ <span class="flex p-2 rounded-lg bg-indigo-800">
28
+ <svg class="h-6 w-6 text-white" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
29
+ <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M11 5.882V19.24a1.76 1.76 0 01-3.417.592l-2.147-6.15M18 13a3 3 0 100-6M5.436 13.683A4.001 4.001 0 017 6h1.832c4.1 0 7.625-1.234 9.168-3v14c-1.543-1.766-5.067-3-9.168-3H7a3.988 3.988 0 01-1.564-.317z" />
30
+ </svg>
31
+ </span>
32
+ <p class="ml-3 font-medium text-white truncate">
33
+ <span class="md:hidden">
34
+ This project is on beta version!
35
+ </span>
36
+ <span class="hidden md:inline">
37
+ This project is currectly under development. See more soon!
38
+ </span>
39
+ </p>
40
+ </div>
41
+ </div>
42
+ </div>
43
+ </div>
44
+
45
+
46
+ <!-- <div class="relative bg-white">
47
+ <div class="max-w-7xl mx-auto px-4 sm:px-6">
48
+ <div class="flex justify-between items-center border-b-2 border-gray-100 py-6 md:justify-start md:space-x-10">
49
+ <div class="flex justify-start lg:w-0 lg:flex-1">
50
+ <a href="/">
51
+ <span class="sr-only">Contactless FP</span>
52
+ <img class="h-12 w-auto sm:h-12" src="{{ url_for('static', filename='logo.png')}}" alt="">
53
+ </a>
54
+ </div>
55
+
56
+ <nav class="md:flex space-x-10 text-gray-500 ">
57
+ <span>Fingerprint Detection</span>
58
+ </nav>
59
+
60
+ </div>
61
+ </div> -->
62
+
63
+
64
+ <!-- <div class="absolute top-0 inset-x-0 p-2 transition transform origin-top-right md:hidden">
65
+ <div class="rounded-lg shadow-lg ring-1 ring-black ring-opacity-5 bg-white divide-y-2 divide-gray-50">
66
+ <div class="pt-5 pb-6 px-5">
67
+ <div class="flex items-center justify-between">
68
+ <div>
69
+ <img class="h-8 w-auto" src="https://tailwindui.com/img/logos/workflow-mark-indigo-600.svg" alt="Workflow">
70
+ </div>
71
+ </div>
72
+ </div>
73
+ </div>
74
+ </div>
75
+ </div> -->
76
+
77
+
78
+ <div class="min-h-0 md:py-10 py-5 md:px-60 grid grid-cols-2">
79
+ <div x-data="{photoName: null, photoPreview: null}" class="">
80
+ <!-- Photo File Input -->
81
+ <input id="fp1" type="file" class="hidden">
82
+ <label class="font-mono block text-gray-700 text-base font-bold mb-2 text-center" for="photo">
83
+ Fingerprint 1 <span class="text-red-600"> </span>
84
+ </label>
85
+
86
+ <div class="text-center">
87
+ <!-- Current Profile Photo -->
88
+ <div class="mt-2 rounded-full ">
89
+ <img id="f1" src="https://images.unsplash.com/photo-1531316282956-d38457be0993?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=700&q=80" class="block w-40 h-40 rounded-full m-auto shadow">
90
+ </div>
91
+
92
+ <button id="fp1_upload" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
93
+ Select New Photo
94
+ </button>
95
+ <button id="fp1_sample" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
96
+ Use Sample
97
+ </button>
98
+ </div>
99
+ </div>
100
+
101
+ <div x-data="{photoName: null, photoPreview: null}" class="">
102
+ <input id="fp2" type="file" class="hidden">
103
+ <label class="font-mono block text-gray-700 text-base font-bold mb-2 text-center" for="photo">
104
+ Fingerprint 2 <span class="text-red-600"> </span>
105
+ </label>
106
+
107
+ <div class="text-center">
108
+ <!-- Current Profile Photo -->
109
+ <div class="mt-2 rounded-full ">
110
+ <img id="f2" src="https://images.unsplash.com/photo-1531316282956-d38457be0993?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=700&q=80" class="block w-40 h-40 rounded-full m-auto shadow">
111
+ </div>
112
+
113
+ <button id="fp2_upload" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
114
+ Select New Photo
115
+ </button>
116
+ <button id="fp2_sample" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
117
+ Use Sample
118
+ </button>
119
+ </div>
120
+ </div>
121
+ </div>
122
+
123
+ <div class="text-white font-mono flex flex-col">
124
+ <div class="flex flex-row-reverse flex-wrap m-auto">
125
+ <button id="predict_submit" class="rounded px-3 py-2 m-1 border-b-4 border-l-2 shadow-lg bg-blue-700 border-blue-800 text-white">
126
+ Predict
127
+ </button>
128
+ </div>
129
+ </div>
130
+
131
+
132
+ <div class="min-h-0 py-10 px-10">
133
+ <div id="accuracy_circle" class="flex items-center m-auto flex-wrap max-w-md px-10 bg-white shadow-xl rounded-2xl h-20" x-data="{ circumference: 50 * 2 * Math.PI, percent: 0 }">
134
+ <div class="flex items-center justify-center -m-6 overflow-hidden bg-white rounded-full">
135
+ <svg class="w-32 h-32 transform translate-x-1 translate-y-1" x-cloak aria-hidden="true">
136
+ <circle class="text-gray-300" stroke-width="10" stroke="currentColor" fill="transparent" r="50" cx="60" cy="60" />
137
+ <circle class="text-blue-700" stroke-width="10" :stroke-dasharray="circumference" :stroke-dashoffset="circumference - percent / 100 * circumference" stroke-linecap="round" stroke="currentColor" fill="transparent" r="50" cx="60" cy="60" />
138
+ </svg>
139
+ <span id="pred_emoji" class="absolute text-2xl text-blue-700" x-text="`🤔`"></span>
140
+ </div>
141
+ <p class="ml-10 font-medium text-gray-600 text-xs md:text-xl">Accuracy</p>
142
+
143
+ <span id="accuracy_text" class="ml-auto text-xs font-medium text-blue-600 md:text-xl">0.00%</span>
144
+ </div>
145
+ </div>
146
+
147
+ </body>
148
+
149
+
150
+
151
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/croppie/2.6.5/croppie.js" defer></script>
152
+ <script src="https://code.jquery.com/jquery-3.6.0.min.js" integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4=" crossorigin="anonymous"></script>
153
+ <script src="{{ url_for('static', filename='js/main.js')}}" charset="utf-8"></script>
154
+
155
+ <script src="https://cdn.jsdelivr.net/gh/alpinejs/alpine@v2.8.0/dist/alpine.min.js" defer></script>
156
+ <script src="https://cdn.jsdelivr.net/gh/alpine-collective/alpine-magic-helpers@0.3.x/dist/index.js"></script>
157
+
158
+ </html>
templates/database.html ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <meta http-equiv="X-UA-Compatible" content="ie=edge">
8
+ <meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
9
+ <meta http-equiv="Pragma" content="no-cache" />
10
+ <meta http-equiv="Expires" content="0" />
11
+ <title>Fingerprint DeepLearning</title>
12
+
13
+ <link rel="icon" type="image/png" href="{{ url_for('static', filename='logo.png')}}" />
14
+
15
+ <link rel="stylesheet" href="{{ url_for('static', filename='css/main.css')}}">
16
+ <link href="https://unpkg.com/tailwindcss@^2/dist/tailwind.min.css" rel="stylesheet">
17
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/croppie/2.6.5/croppie.css" rel="stylesheet">
18
+ </head>
19
+
20
+ <body>
21
+
22
+ <!-- This example requires Tailwind CSS v2.0+ -->
23
+ <div class="bg-indigo-600">
24
+ <div class="max-w-7xl mx-auto py-3 px-3 sm:px-6 lg:px-8">
25
+ <div class="flex items-center justify-between flex-wrap">
26
+ <div class="w-0 flex-1 flex items-center">
27
+ <span class="flex p-2 rounded-lg bg-indigo-800">
28
+ <svg class="h-6 w-6 text-white" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
29
+ <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M11 5.882V19.24a1.76 1.76 0 01-3.417.592l-2.147-6.15M18 13a3 3 0 100-6M5.436 13.683A4.001 4.001 0 017 6h1.832c4.1 0 7.625-1.234 9.168-3v14c-1.543-1.766-5.067-3-9.168-3H7a3.988 3.988 0 01-1.564-.317z" />
30
+ </svg>
31
+ </span>
32
+ <p class="ml-3 font-medium text-white truncate">
33
+ <span class="md:hidden">
34
+ This project is on beta version!
35
+ </span>
36
+ <span class="hidden md:inline">
37
+ This project is currectly under development. See more soon!
38
+ </span>
39
+ </p>
40
+ </div>
41
+ </div>
42
+ </div>
43
+ </div>
44
+
45
+
46
+ <div class="relative bg-white">
47
+ <div class="max-w-7xl mx-auto px-4 sm:px-6">
48
+ <div class="flex justify-between items-center border-b-2 border-gray-100 py-6 md:justify-start md:space-x-10">
49
+ <div class="flex m-auto">
50
+ <a href="/">
51
+ <span class="sr-only">Contactless FP</span>
52
+ <img class="h-10 w-auto sm:h-10" src="{{ url_for('static', filename='logo.png')}}" alt="">
53
+ </a>
54
+ <h3 class="font-mono px-2 py-2 text-lg"> {{ title }}</h3>
55
+ </div>
56
+ </div>
57
+ </div>
58
+
59
+
60
+ <div class="min-h-0 md:py-8 py-8 md:px-20 grid grid-cols-2">
61
+ <div x-data="{photoName: null, photoPreview: null}" class="w-2/3">
62
+ <!-- Photo File Input -->
63
+ <input id="fp1" type="file" class="hidden">
64
+ <label class="font-mono block text-gray-700 text-base font-bold mb-2 text-center" for="photo">
65
+ Upload New Fingerprint
66
+ </label>
67
+
68
+ <div class="text-center">
69
+ <div class="mt-2 rounded-full ">
70
+ <img id="f1" src="https://images.unsplash.com/photo-1531316282956-d38457be0993?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=700&q=80" class="block w-40 h-40 rounded-full m-auto shadow">
71
+ </div>
72
+
73
+ {% if detection_page %}
74
+ <div class="mb-3 m-5 pt-0">
75
+ <input id="upload_label" type="text" placeholder="Fingerprint Label" class="px-3 py-3 placeholder-blueGray-300 text-blueGray-600 relative bg-white bg-white rounded text-sm border-2 shadow w-60" />
76
+ </div>
77
+ {% endif %}
78
+
79
+ {% if detection_page %}
80
+ <button id="fp1_upload" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
81
+ Select Photo
82
+ </button>
83
+ <button id="upload_to_db" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
84
+ Upload
85
+ </button>
86
+ {% else %}
87
+ <button id="fp1_upload" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-blue-700 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
88
+ Upload Photo
89
+ </button>
90
+ <button id="predict_with_db" type="button" class="font-mono inline-flex items-center px-4 py-2 bg-white border border-blue-300 bg-red-500 rounded-md font-semibold text-xs text-white uppercase tracking-widest shadow-sm hover focus:outline-none focus:border-blue-400 focus:shadow-outline-blue active:text-gray-800 active:bg-gray-50 transition ease-in-out duration-150 mt-2 ml-3">
91
+ Predict
92
+ </button>
93
+ {% endif %}
94
+
95
+ </div>
96
+
97
+ <svg version="1.1" class="hidden" id="progress" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 100 100" enable-background="new 0 0 0 0" xml:space="preserve" style="height: 100px;width: 100px;margin: auto;">
98
+ <path fill="#000" d="M73,50c0-12.7-10.3-23-23-23S27,37.3,27,50 M30.9,50c0-10.5,8.5-19.1,19.1-19.1S69.1,39.5,69.1,50">
99
+ <animateTransform attributeName="transform" attributeType="XML" type="rotate" dur="1s" from="0 50 50" to="360 50 50" repeatCount="indefinite" />
100
+ </path>
101
+ </svg>
102
+
103
+ </div>
104
+
105
+ <!-- rest of right half-->
106
+ <div>
107
+ {% if fps %}
108
+ <div class="text-center">
109
+ <label class="font-mono text-gray-700 text-base font-bold">
110
+ All Database Fingerprints
111
+ </label>
112
+ </div>
113
+ {% endif %}
114
+
115
+ <div class="col-start-2 grid grid-cols-4 gap-4">
116
+ {% if fps %}
117
+ {% for fp in fps %}
118
+ <div class="text-center">
119
+ <div class="mt-2 rounded-full ">
120
+ <img id="db_{{ fp['label'] }}" src="{{ fp['url'] }}" class="block w-25 h-25 rounded-full shadow-md">
121
+ </div>
122
+ <label class="font-mono block font-bold text-gray-700 text-sm mb-2 text-center" for="photo">
123
+ {{ fp['label'] }}
124
+ {% if not detection_page %}
125
+ <span id="db_{{ fp['label'] }}_acc" class="text-red-600 block"></span>
126
+ {% endif %}
127
+ </label>
128
+ </div>
129
+ {% endfor %}
130
+ {% endif %}
131
+
132
+ <!-- <div class="text-center">
133
+ <div class="mt-2 rounded-full ">
134
+ <img id="f1" src="https://images.unsplash.com/photo-1531316282956-d38457be0993?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=700&q=80" class="block w-25 h-25 object-contain rounded-full shadow">
135
+ </div>
136
+ <label class="font-mono block text-gray-700 text-sm mb-2 text-center" for="photo">
137
+ Alice <span class="text-red-600"> </span>
138
+ </label>
139
+ </div> -->
140
+
141
+ </div>
142
+
143
+
144
+ </div>
145
+ </div>
146
+
147
+ </body>
148
+
149
+
150
+
151
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/croppie/2.6.5/croppie.js" defer></script>
152
+ <script src="https://code.jquery.com/jquery-3.6.0.min.js" integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4=" crossorigin="anonymous"></script>
153
+ <script src="{{ url_for('static', filename='js/main.js')}}" charset="utf-8"></script>
154
+
155
+ <script src="https://cdn.jsdelivr.net/gh/alpinejs/alpine@v2.8.0/dist/alpine.min.js" defer></script>
156
+ <script src="https://cdn.jsdelivr.net/gh/alpine-collective/alpine-magic-helpers@0.3.x/dist/index.js"></script>
157
+
158
+ </html>
test_len_of_combinations.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ def analyze_json_lists():
5
+ # Get the current working directory
6
+ current_dir = Path.cwd()
7
+
8
+ # Find all files ending with .json
9
+ json_files = list(current_dir.glob('*.json'))
10
+
11
+ if not json_files:
12
+ print("No .json files found in the current directory.")
13
+ return
14
+
15
+ # Print header
16
+ print(f"{'File Name':<40} | {'List Length'}")
17
+ print("-" * 55)
18
+
19
+ for file_path in json_files:
20
+ try:
21
+ with open(file_path, 'r', encoding='utf-8') as f:
22
+ data = json.load(f)
23
+
24
+ # Check if the root element is actually a list
25
+ if isinstance(data, list):
26
+ print(f"{file_path.name:<40} | {len(data)}")
27
+ else:
28
+ print(f"{file_path.name:<40} | Not a list (Type: {type(data).__name__})")
29
+
30
+ except json.JSONDecodeError:
31
+ print(f"{file_path.name:<40} | Error: Invalid JSON format")
32
+ except Exception as e:
33
+ print(f"{file_path.name:<40} | Error: {e}")
34
+
35
+ if __name__ == "__main__":
36
+ analyze_json_lists()
utils.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join, dirname, basename, exists, normpath
2
+ from os import makedirs
3
+ from glob import glob
4
+ import cv2
5
+ import numpy as np
6
+
7
+ # Use normpath to ensure Windows/Linux compatibility
8
+ dirname_val = normpath(dirname(__file__))
9
+ db_path = join(dirname_val, 'static', 'db_fingerprints')
10
+
11
+ def getAllImagesFromDatabase():
12
+ fingerprint_db = []
13
+ # Search for all common image formats
14
+ for ext in ('*.png', '*.jpg', '*.jpeg', '*.bmp'):
15
+ fingerprint_db.extend(glob(join(db_path, ext)))
16
+
17
+ db_json = []
18
+ # Use enumerate for cleaner ID generation
19
+ for i, file_path in enumerate(fingerprint_db, 1):
20
+ # 1. Extract the label (filename without extension)
21
+ name = basename(file_path).rsplit('.', 1)[0]
22
+
23
+ # 2. Fix the URL pathing
24
+ # We ensure it starts with a forward slash and uses forward slashes
25
+ # for Flask template compatibility
26
+ relative_url = file_path.replace(dirname_val, '').replace('\\', '/')
27
+
28
+ db_json.append({
29
+ "id": i,
30
+ "label": name,
31
+ "url": relative_url
32
+ })
33
+
34
+ # Sort by label so the database view looks organized
35
+ return sorted(db_json, key=lambda k: k['label'])
36
+
37
+ def saveImageToDatabase(label, img):
38
+ """Saves the enhanced fingerprint to the database folder."""
39
+ if not exists(db_path):
40
+ makedirs(db_path)
41
+
42
+ # Ensure filename is safe (remove spaces/special chars if needed)
43
+ safe_label = "".join([c for c in label if c.isalnum() or c in (' ', '-', '_')]).strip()
44
+ save_path = join(db_path, f"{safe_label}.jpg")
45
+
46
+ cv2.imwrite(save_path, img)
47
+ return save_path