Spaces:
Sleeping
Sleeping
| import cv2 | |
| import numpy as np | |
| import tensorflow as tf | |
| from tensorflow.keras.models import load_model | |
| from os.path import join, dirname, exists | |
| import os | |
| # Set up paths relative to this script | |
| dirname_val = dirname(__file__) | |
| # Points to your .h5 model | |
| model_path = join(dirname_val, 'model', '12_120_fp160.h5') | |
| # Load the Keras model | |
| try: | |
| # compile=False is used because we only need the model for prediction (inference) | |
| model = load_model(model_path, compile=False) | |
| print(f"Model loaded successfully from: {model_path}") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| # Contactless images testing (The Siamese Comparison) | |
| def two_image_prediction(np_img1, np_img2): | |
| # Ensure images are 160x160 and float32 normalized | |
| input1_img = np_img1.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0 | |
| input2_img = np_img2.reshape((1, 160, 160, 1)).astype(np.float32) / 255.0 | |
| # Keras prediction for Siamese (two inputs) | |
| # verbose=0 keeps the console clean during the database loop | |
| pred_right = model.predict([input1_img, input2_img], verbose=0) | |
| # Optional: Save images for visual debugging | |
| db_path = join(dirname_val, 'static', 'test_preds') | |
| if not exists(db_path): | |
| os.makedirs(db_path) | |
| cv2.imwrite(join(db_path, 'last_query.jpg'), np_img1) | |
| cv2.imwrite(join(db_path, 'last_gallery.jpg'), np_img2) | |
| # Return the similarity score (usually index [0][0]) | |
| return pred_right[0][0] | |
| # Testing with all other database images | |
| def getPredictionDb(main_img, all_db_imgs): | |
| best_pred = 0.00 | |
| best_html_id = "db_null" | |
| matched_person = "null" | |
| all_preds = [] | |
| # Ensure query image is grayscale and correct type | |
| if len(main_img.shape) > 2: | |
| main_img = cv2.cvtColor(main_img, cv2.COLOR_BGR2GRAY) | |
| main_img = np.array(main_img, dtype='uint8') | |
| # Resize to 160x160 if not already | |
| if main_img.shape != (160, 160): | |
| main_img = cv2.resize(main_img, (160, 160)) | |
| # Loop through the database provided by utils.getAllImagesFromDatabase() | |
| for file in all_db_imgs: | |
| # Construct path: remove leading '/' if present to join correctly | |
| clean_url = file['url'].lstrip('/') | |
| file_url = join(dirname_val, clean_url) | |
| db_img = cv2.imread(file_url, cv2.IMREAD_GRAYSCALE) | |
| if db_img is None: | |
| continue | |
| if db_img.shape != (160, 160): | |
| db_img = cv2.resize(db_img, (160, 160)) | |
| # Perform the 1-to-1 match | |
| score = float(two_image_prediction(main_img, db_img)) | |
| # Rounding for clean JSON response | |
| pred_accuracy = float("%.5f" % (score * 100)) | |
| all_preds.append({ | |
| "html_id": f"db_{file['label']}", | |
| "accuracy": pred_accuracy | |
| }) | |
| # Update best match if this score is higher | |
| if score > best_pred: | |
| best_pred = score | |
| matched_person = file['label'] | |
| best_html_id = f"db_{matched_person}" | |
| # Structure the final result | |
| result = { | |
| "all_preds": all_preds, | |
| "best_pred": { | |
| "html_id": best_html_id, | |
| "accuracy": float("%.5f" % (best_pred * 100)), | |
| "matched_person": matched_person | |
| } | |
| } | |
| return result | |
| # if __name__ == "__main__": | |
| # print("\n--- Starting Real Image Predictor Test ---") | |
| # # 1. Update these paths to your real fingerprint images | |
| # # Example: one of your extracted fingertips or an original image | |
| # path_image_1 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p1\p4.bmp" | |
| # path_image_2 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p240\p1.bmp" | |
| # # Use same for 100%, different for comparison | |
| # # Load and Preprocess | |
| # img1 = cv2.imread(path_image_1, cv2.IMREAD_GRAYSCALE) | |
| # img2 = cv2.imread(path_image_2, cv2.IMREAD_GRAYSCALE) | |
| # if img1 is None or img2 is None: | |
| # print("Error: One of the images could not be loaded. Check your paths!") | |
| # else: | |
| # # Resize to 160x160 as required by your model | |
| # img1_res = cv2.resize(img1, (160, 160)) | |
| # img2_res = cv2.resize(img2, (160, 160)) | |
| # # 2. Test 1-to-1 Prediction | |
| # print(f"Comparing: {os.path.basename(path_image_1)} vs {os.path.basename(path_image_2)}") | |
| # try: | |
| # score = two_image_prediction(img1_res, img2_res) | |
| # print(f"Match Score (Raw): {score:.6f}") | |
| # print(f"Match Confidence: {score * 100:.2f}%") | |
| # if score > 0.85: # Typical threshold for Siamese networks | |
| # print("Result: MATCH FOUND!") | |
| # else: | |
| # print("Result: NO MATCH.") | |
| # except Exception as e: | |
| # print(f"Error during 1-to-1 test: {e}") | |
| # # 3. Test Database Search using your actual DB folder | |
| # print("\nTesting getPredictionDb with existing static database...") | |
| # from utils import getAllImagesFromDatabase | |
| # try: | |
| # real_db = getAllImagesFromDatabase() | |
| # if not real_db: | |
| # print("Database is empty. Upload some images via the web app first.") | |
| # else: | |
| # db_results = getPredictionDb(img1_res, real_db) | |
| # print(f"Database Search Result:") | |
| # print(f" Best Match Found: {db_results['best_pred']['matched_person']}") | |
| # print(f" Accuracy: {db_results['best_pred']['accuracy']}%") | |
| # except Exception as e: | |
| # print(f"Error during DB search test: {e}") | |
| # print("\n--- Real Test Complete ---") | |
| if __name__ == "__main__": | |
| from enhancer import basicEnhancing, advancedEnhancing | |
| print("\n--- Starting Enhanced Image Predictor Test ---") | |
| path_image_1 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\1_1.jpg" | |
| path_image_2 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\1_4.jpg" | |
| # # # Matched - 80% | |
| # path_image_1 = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session\p240\p6.bmp" | |
| # path_image_2 = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session\240_6.jpg" | |
| # 1. Load images | |
| img1 = cv2.imread(path_image_1) | |
| img2 = cv2.imread(path_image_2) | |
| if img1 is None or img2 is None: | |
| print("Error loading images.") | |
| else: | |
| print("Enhancing images to remove background bias...") | |
| # 2. Process images EXACTLY like your web app does | |
| # Basic + Advanced (Gabor filters) makes ridges the only visible feature | |
| en1 = advancedEnhancing(basicEnhancing(img1)) | |
| en2 = advancedEnhancing(basicEnhancing(img2)) | |
| # en2 = advancedEnhancing(img2) # # IndexError: index 0 is out of bounds for axis 0 with size 0 | |
| # 3. Resize the CLEANED images | |
| img1_res = cv2.resize(en1, (160, 160)) | |
| img2_res = cv2.resize(en2, (160, 160)) | |
| # 4. Predict | |
| score = two_image_prediction(img1_res, img2_res) | |
| print(f"\nComparing: {os.path.basename(path_image_1)} vs {os.path.basename(path_image_2)}") | |
| print(f"Match Score: {score * 100:.2f}%") | |
| if score > 0.80: # Higher threshold for enhanced images | |
| print("Result: MATCH FOUND!") | |
| else: | |
| print("Result: NO MATCH") | |
| # # Both advance test | |
| # from tqdm import tqdm | |
| # import json | |
| # from enhancer import basicEnhancing, advancedEnhancing | |
| # import time | |
| # # --- CONFIGURATION --- | |
| # CONTACTLESS_BASE = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session" | |
| # CONTACT_BASE = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session" | |
| # THRESHOLD = 0.70 | |
| # matched_pairs = [] | |
| # total_tested = 0 | |
| # print(f"\n--- Starting Batch Test: Contactless vs Contact-Based ---") | |
| # total_start = time.time() | |
| # # Loop through the 240 subjects | |
| # for p_id in tqdm(range(61, 121), desc="Processing IDs"): | |
| # for c_id in range(1, 7): | |
| # # Your pattern: contactless/p{id}/p6.bmp vs contact/{id}_6.jpg | |
| # path1 = join(CONTACTLESS_BASE, f"p{p_id}", f"p{c_id}.bmp") | |
| # path2 = join(CONTACT_BASE, f"{p_id}_{c_id}.jpg") | |
| # if not exists(path1) or not exists(path2): | |
| # # Some IDs might be missing in real datasets | |
| # continue | |
| # # print(f"[{total_tested}/240] Processing ID: {p_id}...", end="\r") | |
| # try: | |
| # # 1. Load | |
| # img1 = cv2.imread(path1) | |
| # img2 = cv2.imread(path2) | |
| # # 2. Enhance (Wrapped in enhancer logic to avoid crashes) | |
| # en1 = advancedEnhancing(basicEnhancing(img1)) | |
| # en2 = advancedEnhancing(img2) | |
| # # 3. Resize & Predict | |
| # img1_res = cv2.resize(en1, (160, 160)) | |
| # img2_res = cv2.resize(en2, (160, 160)) | |
| # score = float(two_image_prediction(img1_res, img2_res)) | |
| # accuracy = score * 100 | |
| # total_tested += 1 | |
| # if score > THRESHOLD: | |
| # match_data = { | |
| # "id": p_id, | |
| # "c_id" : c_id, | |
| # "contactless_path": path1, | |
| # "contact_path": path2, | |
| # "accuracy": f"{accuracy:.2f}%", | |
| # "status": "MATCH FOUND" | |
| # } | |
| # matched_pairs.append(match_data) | |
| # tqdm.write(f"P_ID {p_id} C_ID {c_id}: MATCH FOUND ({accuracy:.2f}%)") | |
| # except Exception as e: | |
| # # print(f"\nError processing ID {p_id}: {e}") | |
| # tqdm.write(f"Error processing p ID {p_id} c ID {c_id} : {e}") | |
| # # Prepare the final data structure | |
| # results_to_save = { | |
| # "summary": f"Found {len(matched_pairs)} out of {total_tested} pairs.", | |
| # "total_tested": total_tested, | |
| # "total_matched": len(matched_pairs), | |
| # "results": matched_pairs # Your list of match dictionaries | |
| # } | |
| # # Save individual JSON | |
| # output_file = "61_120_Both_vs_advance.json" | |
| # with open(output_file, 'w') as f: | |
| # json.dump(results_to_save, f, indent=4) | |
| # print(f"\n--- Total Time: {time.time() - total_start:.2f}s ---") | |
| # print(f"Finished. Found {len(matched_pairs)} out of {total_tested} matches.") | |
| # print(f"\n--- Batch Test Complete ---") | |
| # print(f"Total Tested: {total_tested}") | |
| # print(f"Total Matches Found: {len(matched_pairs)}") | |
| # print(f"Results saved to: {os.path.abspath(output_file)}") | |
| # "-----------------------------------------------------------------------------------" | |
| # import os | |
| # import cv2 | |
| # import json | |
| # from os.path import join, exists | |
| # from enhancer import basicEnhancing, advancedEnhancing | |
| # # --- CONFIGURATION --- | |
| # CONTACTLESS_BASE = r"C:\SagarKV\sol9x\geekykant\contactless_2d_fingerprint_images\second_session" | |
| # CONTACT_BASE = r"C:\SagarKV\sol9x\geekykant\contact-based_fingerprints\second_session" | |
| # THRESHOLD = 0.70 | |
| # # Define the enhancement "pipelines" | |
| # strategies = { | |
| # "Basic": lambda x: basicEnhancing(x), | |
| # "Advanced": lambda x: advancedEnhancing(x), | |
| # "Both": lambda x: advancedEnhancing(basicEnhancing(x)) | |
| # } | |
| # def run_test_combination(name1, func1, name2, func2): | |
| # matched_pairs = [] | |
| # total_tested = 0 | |
| # filename = f"results_{name1}_vs_{name2}.json" | |
| # print(f"\n--- Testing: Img1({name1}) vs Img2({name2}) ---") | |
| # for p_id in range(1, 241): | |
| # for c_id in range(1,7): | |
| # print(f"\n--- Processing ID: ({p_id}) ({c_id})---") | |
| # path1 = join(CONTACTLESS_BASE, f"p{p_id}", f"p{c_id}.bmp") | |
| # path2 = join(CONTACT_BASE, f"{p_id}_{c_id}.jpg") | |
| # if not exists(path1) or not exists(path2): | |
| # continue | |
| # try: | |
| # img1 = cv2.imread(path1) | |
| # img2 = cv2.imread(path2) | |
| # # Apply the selected strategy | |
| # en1 = func1(img1) | |
| # en2 = func2(img2) | |
| # # Resize & Predict | |
| # img1_res = cv2.resize(en1, (160, 160)) | |
| # img2_res = cv2.resize(en2, (160, 160)) | |
| # # Assuming two_image_prediction is defined globally | |
| # score = float(two_image_prediction(img1_res, img2_res)) | |
| # accuracy = score * 100 | |
| # if score > THRESHOLD: | |
| # matched_pairs.append({ | |
| # "id": p_id, | |
| # "contactless_path" : path1, | |
| # "contact_based_path" : path2, | |
| # "accuracy": f"{accuracy:.2f}%", | |
| # "status": "MATCH FOUND" | |
| # }) | |
| # total_tested += 1 | |
| # except Exception as e: | |
| # print(f"\nError processing ID {p_id}: {e}") | |
| # # pass # Silent fail for speed, or print(e) for debugging | |
| # # Save individual JSON | |
| # with open(filename, 'w') as f: | |
| # json.dump(matched_pairs, f, indent=4) | |
| # print(f"Finished. Found {len(matched_pairs)} out of {len(total_tested)} matches. Saved to {filename}") | |
| # # --- EXECUTION LOOP --- | |
| # # This will run all 16 combinations automatically | |
| # for name1, func1 in strategies.items(): | |
| # for name2, func2 in strategies.items(): | |
| # filename = f"results_{name1}_vs_{name2}.json" | |
| # if os.path.exists(filename): | |
| # print(f"Skipping {filename} bcz exists") | |
| # continue | |
| # run_test_combination(name1, func1, name2, func2) |