Spaces:
Sleeping
Sleeping
Commit
·
e735bf3
1
Parent(s):
ea0b3ec
initial setup
Browse files- Dockerfile +9 -0
- README.md +49 -6
- app.py +93 -0
- finger_detector.py +67 -0
- finger_quality.py +388 -0
- main.py +143 -0
- models.py +47 -0
- quality_analyzer.py +123 -0
- requirements.txt +3 -0
- results/finger_quality_result.json +49 -0
- utils.py +31 -0
- visualizer.py +50 -0
Dockerfile
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
COPY . /app
|
| 5 |
+
|
| 6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
+
|
| 8 |
+
EXPOSE 7860
|
| 9 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
|
@@ -1,10 +1,53 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji: 📉
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: pink
|
| 6 |
sdk: docker
|
| 7 |
-
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Flask Finger Quality API
|
|
|
|
|
|
|
|
|
|
| 3 |
sdk: docker
|
| 4 |
+
app_port: 7860
|
| 5 |
---
|
| 6 |
|
| 7 |
+
Here is how to read each score and why some “good-looking” images still fail overall.
|
| 8 |
+
|
| 9 |
+
***
|
| 10 |
+
|
| 11 |
+
## 1. What each score means
|
| 12 |
+
|
| 13 |
+
These ranges are based on the thresholds used in the code you ran (blur_min=80, illum_min=60, illum_max=200, coverage_min=0.10, orientation_max_deviation=30, vertical_expected=True).
|
| 14 |
+
|
| 15 |
+
### Blur score (variance of Laplacian)
|
| 16 |
+
|
| 17 |
+
- Metric: “How sharp is the finger texture?” Higher is better.
|
| 18 |
+
- Rough scale (with current threshold 80):
|
| 19 |
+
- 0–30 → Very blurry (fail).
|
| 20 |
+
- 30–60 → Soft / borderline sharpness (often fail, might be visually OK).
|
| 21 |
+
- 80–200 → Good sharpness for most mobile cases (pass).
|
| 22 |
+
- 200+ → Very sharp (pass).
|
| 23 |
+
|
| 24 |
+
### Illumination score (mean grayscale in ROI)
|
| 25 |
+
|
| 26 |
+
- Metric: “Average brightness of the finger region.”
|
| 27 |
+
- Thresholds now: 60 ≤ illumination ≤ 200.
|
| 28 |
+
- Rough interpretation:
|
| 29 |
+
- < 50 → Too dark / underexposed (fail).
|
| 30 |
+
- 50–200 → Acceptable brightness (pass).
|
| 31 |
+
- > 200 → Too bright / overexposed (fail).
|
| 32 |
+
|
| 33 |
+
### Coverage ratio
|
| 34 |
+
|
| 35 |
+
- Metric: “Fraction of the full frame covered by segmented finger pixels.”
|
| 36 |
+
- Threshold now: coverage ≥ 0.10 (10% of frame).
|
| 37 |
+
- Rough interpretation:
|
| 38 |
+
- < 0.10 → Finger too small, far away, or segmentation bad (fail).
|
| 39 |
+
- 0.10–0.20 → Minimum acceptable; finger is present but not very large.
|
| 40 |
+
- ≥ 0.20–0.40 → Good coverage for biometric capture (pass).
|
| 41 |
+
|
| 42 |
+
### Orientation angle
|
| 43 |
+
|
| 44 |
+
- Metric: PCA angle of the main axis of the finger ROI, in degrees relative to x‑axis, normalized to about −90° to 90°.
|
| 45 |
+
- With `vertical_expected=True`, the code expects the finger to be **vertical** (angle near ±90°).
|
| 46 |
+
- Threshold now: within 30° of the expected orientation.
|
| 47 |
+
- If vertical expected:
|
| 48 |
+
- Angles near +90° or −90° → good.
|
| 49 |
+
- Big deviation from vertical → fail. Deviation allowed - 45°
|
| 50 |
+
- If horizontal finger is desired, you would set `vertical_expected=False` to expect angles near 0°.
|
| 51 |
+
|
| 52 |
+
***
|
| 53 |
+
|
app.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, request, jsonify
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
# import your existing code
|
| 6 |
+
from main import FingerQualityAssessor # or wherever the class lives
|
| 7 |
+
from quality_analyzer import QualityConfig
|
| 8 |
+
|
| 9 |
+
app = Flask(__name__)
|
| 10 |
+
|
| 11 |
+
# Create once (so model/config not recreated per request)
|
| 12 |
+
CONFIG = QualityConfig(
|
| 13 |
+
target_width=640,
|
| 14 |
+
blur_min=60.0,
|
| 15 |
+
illum_min=50.0,
|
| 16 |
+
illum_max=200.0,
|
| 17 |
+
coverage_min=0.10,
|
| 18 |
+
orientation_max_deviation=45.0,
|
| 19 |
+
vertical_expected=True,
|
| 20 |
+
overall_quality_threshold=0.70,
|
| 21 |
+
)
|
| 22 |
+
ASSESSOR = FingerQualityAssessor(CONFIG)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _decode_uploaded_image(file_storage):
|
| 26 |
+
# file_storage is a Werkzeug FileStorage from request.files
|
| 27 |
+
data = file_storage.read()
|
| 28 |
+
if not data:
|
| 29 |
+
raise ValueError("Empty file")
|
| 30 |
+
|
| 31 |
+
nparr = np.frombuffer(data, np.uint8)
|
| 32 |
+
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # BGR image
|
| 33 |
+
if img is None:
|
| 34 |
+
raise ValueError("Could not decode image (invalid/unsupported format)")
|
| 35 |
+
return img
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@app.post("/api/v1/finger-quality")
|
| 39 |
+
def finger_quality():
|
| 40 |
+
"""
|
| 41 |
+
Expects: multipart/form-data with file field name 'image'
|
| 42 |
+
Returns: JSON with result + feedback
|
| 43 |
+
"""
|
| 44 |
+
if "image" not in request.files:
|
| 45 |
+
return jsonify({"error": "Missing file field 'image'"}), 400 # jsonify returns application/json response [web:7]
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
bgr = _decode_uploaded_image(request.files["image"])
|
| 49 |
+
result, feedback, _debug = ASSESSOR.assess(bgr, draw_debug=False)
|
| 50 |
+
|
| 51 |
+
# Build JSON-safe dict (do not return numpy types/tuples blindly)
|
| 52 |
+
payload = {
|
| 53 |
+
"result": {
|
| 54 |
+
"blur_score": float(result.blur_score),
|
| 55 |
+
"illumination_score": float(result.illumination_score),
|
| 56 |
+
"coverage_ratio": float(result.coverage_ratio),
|
| 57 |
+
"orientation_angle_deg": float(result.orientation_angle_deg),
|
| 58 |
+
|
| 59 |
+
"blur_pass": bool(result.blur_pass),
|
| 60 |
+
"illumination_pass": bool(result.illumination_pass),
|
| 61 |
+
"coverage_pass": bool(result.coverage_pass),
|
| 62 |
+
"orientation_pass": bool(result.orientation_pass),
|
| 63 |
+
|
| 64 |
+
"quality_score": float(result.quality_score),
|
| 65 |
+
"overall_pass": bool(result.overall_pass),
|
| 66 |
+
|
| 67 |
+
"bbox": list(result.bbox) if result.bbox is not None else None,
|
| 68 |
+
"contour_area": float(result.contour_area),
|
| 69 |
+
},
|
| 70 |
+
"feedback": {
|
| 71 |
+
"is_acceptable": bool(feedback.is_acceptable),
|
| 72 |
+
"messages": [
|
| 73 |
+
{
|
| 74 |
+
"severity": m.severity,
|
| 75 |
+
"category": m.category,
|
| 76 |
+
"message": m.message,
|
| 77 |
+
}
|
| 78 |
+
for m in (feedback.messages or [])
|
| 79 |
+
],
|
| 80 |
+
},
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
return jsonify(payload), 200 # jsonify formats JSON response conveniently [web:7]
|
| 84 |
+
|
| 85 |
+
except ValueError as e:
|
| 86 |
+
return jsonify({"error": str(e)}), 400 # jsonify is fine for structured errors [web:7]
|
| 87 |
+
except Exception as e:
|
| 88 |
+
# avoid leaking internals in production; log e server-side
|
| 89 |
+
return jsonify({"error": "Internal server error"}), 500 # jsonify response [web:7]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
app.run(host="0.0.0.0", port=5000, debug=True)
|
finger_detector.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class FingerDetector:
|
| 8 |
+
"""
|
| 9 |
+
Skin-based finger segmentation + largest contour extraction.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, min_contour_area_ratio: float = 0.02):
|
| 13 |
+
self.min_contour_area_ratio = min_contour_area_ratio
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def segment_skin_ycbcr(img_bgr: np.ndarray) -> np.ndarray:
|
| 17 |
+
"""
|
| 18 |
+
Returns binary mask (uint8 0/255) using YCbCr thresholds.
|
| 19 |
+
Commonly used range: 77≤Cb≤127 and 133≤Cr≤173. [page:2]
|
| 20 |
+
"""
|
| 21 |
+
ycrcb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YCrCb)
|
| 22 |
+
|
| 23 |
+
# OpenCV order is Y, Cr, Cb in COLOR_BGR2YCrCb
|
| 24 |
+
lower = np.array([0, 133, 77], dtype=np.uint8)
|
| 25 |
+
upper = np.array([255, 173, 127], dtype=np.uint8)
|
| 26 |
+
mask = cv2.inRange(ycrcb, lower, upper)
|
| 27 |
+
|
| 28 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 29 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
|
| 30 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=2)
|
| 31 |
+
return mask
|
| 32 |
+
|
| 33 |
+
def find_largest_contour(
|
| 34 |
+
self,
|
| 35 |
+
mask: np.ndarray,
|
| 36 |
+
frame_area: float
|
| 37 |
+
) -> Optional[np.ndarray]:
|
| 38 |
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 39 |
+
if not contours:
|
| 40 |
+
return None
|
| 41 |
+
|
| 42 |
+
min_area = self.min_contour_area_ratio * frame_area
|
| 43 |
+
valid = [c for c in contours if cv2.contourArea(c) >= min_area]
|
| 44 |
+
if not valid:
|
| 45 |
+
return None
|
| 46 |
+
|
| 47 |
+
return max(valid, key=cv2.contourArea)
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
def bounding_box(contour: np.ndarray) -> Tuple[int, int, int, int]:
|
| 51 |
+
x, y, w, h = cv2.boundingRect(contour)
|
| 52 |
+
return x, y, w, h
|
| 53 |
+
|
| 54 |
+
@staticmethod
|
| 55 |
+
def orientation_pca_deg(contour: np.ndarray) -> float:
|
| 56 |
+
pts = contour.reshape(-1, 2).astype(np.float64)
|
| 57 |
+
mean, eigenvectors, _ = cv2.PCACompute2(pts, mean=np.empty(0))
|
| 58 |
+
vx, vy = eigenvectors[0]
|
| 59 |
+
angle_rad = np.arctan2(vy, vx)
|
| 60 |
+
angle_deg = float(np.degrees(angle_rad))
|
| 61 |
+
|
| 62 |
+
# Normalize to [-90, 90]
|
| 63 |
+
if angle_deg < -90:
|
| 64 |
+
angle_deg += 180
|
| 65 |
+
elif angle_deg > 90:
|
| 66 |
+
angle_deg -= 180
|
| 67 |
+
return float(angle_deg)
|
finger_quality.py
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
from dataclasses import asdict
|
| 7 |
+
import json
|
| 8 |
+
from typing import Any
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class FingerQualityResult:
|
| 12 |
+
# Raw scores
|
| 13 |
+
blur_score: float
|
| 14 |
+
illumination_score: float
|
| 15 |
+
coverage_ratio: float
|
| 16 |
+
orientation_angle_deg: float # angle of main axis w.r.t. x-axis
|
| 17 |
+
|
| 18 |
+
# Per-metric pass/fail
|
| 19 |
+
blur_pass: bool
|
| 20 |
+
illumination_pass: bool
|
| 21 |
+
coverage_pass: bool
|
| 22 |
+
orientation_pass: bool
|
| 23 |
+
|
| 24 |
+
# Overall
|
| 25 |
+
quality_score: float # 0–1
|
| 26 |
+
overall_pass: bool
|
| 27 |
+
|
| 28 |
+
# Debug / geometry
|
| 29 |
+
bbox: Optional[Tuple[int, int, int, int]] # x, y, w, h of finger bounding box
|
| 30 |
+
contour_area: float
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class FingerQualityAssessor:
|
| 34 |
+
"""
|
| 35 |
+
End-to-end finger quality computation on single-finger mobile images.
|
| 36 |
+
|
| 37 |
+
Pipeline:
|
| 38 |
+
1. Preprocess (resize, blur, colorspace).
|
| 39 |
+
2. Skin-based finger segmentation (YCbCr + morphology).
|
| 40 |
+
3. Largest contour -> bounding box + PCA orientation.
|
| 41 |
+
4. Metrics on finger ROI (blur, illumination, coverage, orientation).
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
target_width: int = 640,
|
| 47 |
+
min_contour_area_ratio: float = 0.02,
|
| 48 |
+
# Thresholds (tune for your data/device):
|
| 49 |
+
blur_min: float = 60.0, # variance-of-Laplacian; > threshold = sharp
|
| 50 |
+
illum_min: float = 50.0, # mean gray lower bound
|
| 51 |
+
illum_max: float = 200.0, # mean gray upper bound
|
| 52 |
+
coverage_min: float = 0.10, # fraction of frame area covered by finger
|
| 53 |
+
orientation_max_deviation: float = 45.0, # degrees from vertical or horizontal (tunable)
|
| 54 |
+
vertical_expected: bool = True # if True, expect finger roughly vertical
|
| 55 |
+
):
|
| 56 |
+
self.target_width = target_width
|
| 57 |
+
self.min_contour_area_ratio = min_contour_area_ratio
|
| 58 |
+
self.blur_min = blur_min
|
| 59 |
+
self.illum_min = illum_min
|
| 60 |
+
self.illum_max = illum_max
|
| 61 |
+
self.coverage_min = coverage_min
|
| 62 |
+
self.orientation_max_deviation = orientation_max_deviation
|
| 63 |
+
self.vertical_expected = vertical_expected
|
| 64 |
+
|
| 65 |
+
# ---------- Public API ----------
|
| 66 |
+
|
| 67 |
+
def assess(
|
| 68 |
+
self,
|
| 69 |
+
bgr: np.ndarray,
|
| 70 |
+
draw_debug: bool = False
|
| 71 |
+
) -> Tuple[FingerQualityResult, Optional[np.ndarray]]:
|
| 72 |
+
"""
|
| 73 |
+
Main entrypoint.
|
| 74 |
+
|
| 75 |
+
:param bgr: HxWx3 uint8 BGR finger image from mobile camera.
|
| 76 |
+
:param draw_debug: If True, returns image with bbox and orientation visualized.
|
| 77 |
+
:return: (FingerQualityResult, debug_image or None)
|
| 78 |
+
"""
|
| 79 |
+
if bgr is None or bgr.size == 0:
|
| 80 |
+
raise ValueError("Input image is empty")
|
| 81 |
+
|
| 82 |
+
# 1) Resize for consistent metrics
|
| 83 |
+
img = self._resize_keep_aspect(bgr, self.target_width)
|
| 84 |
+
h, w = img.shape[:2]
|
| 85 |
+
frame_area = float(h * w)
|
| 86 |
+
|
| 87 |
+
# 2) Segment finger (skin) and find largest contour
|
| 88 |
+
mask = self._segment_skin_ycbcr(img)
|
| 89 |
+
contour = self._find_largest_contour(mask, frame_area)
|
| 90 |
+
if contour is None:
|
| 91 |
+
# No valid finger found; everything fails.
|
| 92 |
+
result = FingerQualityResult(
|
| 93 |
+
blur_score=0.0,
|
| 94 |
+
illumination_score=0.0,
|
| 95 |
+
coverage_ratio=0.0,
|
| 96 |
+
orientation_angle_deg=0.0,
|
| 97 |
+
blur_pass=False,
|
| 98 |
+
illumination_pass=False,
|
| 99 |
+
coverage_pass=False,
|
| 100 |
+
orientation_pass=False,
|
| 101 |
+
quality_score=0.0,
|
| 102 |
+
overall_pass=False,
|
| 103 |
+
bbox=None,
|
| 104 |
+
contour_area=0.0
|
| 105 |
+
)
|
| 106 |
+
return result, img if draw_debug else None
|
| 107 |
+
|
| 108 |
+
contour_area = cv2.contourArea(contour)
|
| 109 |
+
x, y, w_box, h_box = cv2.boundingRect(contour)
|
| 110 |
+
bbox = (x, y, w_box, h_box)
|
| 111 |
+
|
| 112 |
+
# ROI around finger
|
| 113 |
+
roi = img[y:y + h_box, x:x + w_box]
|
| 114 |
+
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
|
| 115 |
+
|
| 116 |
+
# Recompute mask for ROI to calculate coverage accurately
|
| 117 |
+
mask_roi = mask[y:y + h_box, x:x + w_box]
|
| 118 |
+
|
| 119 |
+
# 3) Metrics
|
| 120 |
+
blur_score = self._blur_score_laplacian(roi_gray)
|
| 121 |
+
illumination_score = float(roi_gray.mean())
|
| 122 |
+
coverage_ratio = float(np.count_nonzero(mask_roi)) / float(frame_area)
|
| 123 |
+
|
| 124 |
+
orientation_angle_deg = self._orientation_pca(contour)
|
| 125 |
+
|
| 126 |
+
# 4) Per-metric pass/fail
|
| 127 |
+
blur_pass = blur_score >= self.blur_min
|
| 128 |
+
illum_pass = self.illum_min <= illumination_score <= self.illum_max
|
| 129 |
+
coverage_pass = coverage_ratio >= self.coverage_min
|
| 130 |
+
orientation_pass = self._orientation_pass(orientation_angle_deg)
|
| 131 |
+
|
| 132 |
+
# 5) Quality score (simple weighted average; tune as needed)
|
| 133 |
+
# Scale each metric to [0,1], then weight.
|
| 134 |
+
blur_norm = np.clip(blur_score / (self.blur_min * 2.0), 0.0, 1.0)
|
| 135 |
+
illum_center = (self.illum_min + self.illum_max) / 2.0
|
| 136 |
+
illum_range = (self.illum_max - self.illum_min) / 2.0
|
| 137 |
+
illum_norm = 1.0 - np.clip(abs(illumination_score - illum_center) / (illum_range + 1e-6), 0.0, 1.0)
|
| 138 |
+
coverage_norm = np.clip(coverage_ratio / (self.coverage_min * 2.0), 0.0, 1.0)
|
| 139 |
+
orient_norm = 1.0 if orientation_pass else 0.0
|
| 140 |
+
|
| 141 |
+
# weights: prioritize blur and coverage for biometrics
|
| 142 |
+
w_blur, w_illum, w_cov, w_orient = 0.35, 0.25, 0.25, 0.15
|
| 143 |
+
quality_score = float(
|
| 144 |
+
w_blur * blur_norm +
|
| 145 |
+
w_illum * illum_norm +
|
| 146 |
+
w_cov * coverage_norm +
|
| 147 |
+
w_orient * orient_norm
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
# Comment strict condition - for tuning other metrics
|
| 151 |
+
# overall_pass = blur_pass and illum_pass and coverage_pass and orientation_pass
|
| 152 |
+
overall_pass = quality_score >= 0.7
|
| 153 |
+
|
| 154 |
+
result = FingerQualityResult(
|
| 155 |
+
blur_score=float(blur_score),
|
| 156 |
+
illumination_score=float(illumination_score),
|
| 157 |
+
coverage_ratio=float(coverage_ratio),
|
| 158 |
+
orientation_angle_deg=float(orientation_angle_deg),
|
| 159 |
+
blur_pass=blur_pass,
|
| 160 |
+
illumination_pass=illum_pass,
|
| 161 |
+
coverage_pass=coverage_pass,
|
| 162 |
+
orientation_pass=orientation_pass,
|
| 163 |
+
quality_score=quality_score,
|
| 164 |
+
overall_pass=overall_pass,
|
| 165 |
+
bbox=bbox,
|
| 166 |
+
contour_area=float(contour_area),
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
debug_img = None
|
| 170 |
+
if draw_debug:
|
| 171 |
+
debug_img = img.copy()
|
| 172 |
+
self._draw_debug(debug_img, contour, bbox, orientation_angle_deg, result)
|
| 173 |
+
|
| 174 |
+
return result, debug_img
|
| 175 |
+
|
| 176 |
+
# ---------- Preprocessing ----------
|
| 177 |
+
|
| 178 |
+
@staticmethod
|
| 179 |
+
def _resize_keep_aspect(img: np.ndarray, target_width: int) -> np.ndarray:
|
| 180 |
+
h, w = img.shape[:2]
|
| 181 |
+
if w == target_width:
|
| 182 |
+
return img
|
| 183 |
+
scale = target_width / float(w)
|
| 184 |
+
new_size = (target_width, int(round(h * scale)))
|
| 185 |
+
return cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)
|
| 186 |
+
|
| 187 |
+
# ---------- Segmentation ----------
|
| 188 |
+
|
| 189 |
+
@staticmethod
|
| 190 |
+
def _segment_skin_ycbcr(img: np.ndarray) -> np.ndarray:
|
| 191 |
+
"""
|
| 192 |
+
Segment skin using YCbCr range commonly used for hand/finger. [web:12][web:15]
|
| 193 |
+
Returns binary mask (uint8 0/255).
|
| 194 |
+
"""
|
| 195 |
+
ycbcr = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
|
| 196 |
+
# These ranges are a reasonable starting point for many Asian/Indian skin tones;
|
| 197 |
+
# tweak per competition dataset. [web:12]
|
| 198 |
+
lower = np.array([0, 133, 77], dtype=np.uint8)
|
| 199 |
+
upper = np.array([255, 173, 127], dtype=np.uint8)
|
| 200 |
+
mask = cv2.inRange(ycbcr, lower, upper)
|
| 201 |
+
|
| 202 |
+
# Morphology to clean noise
|
| 203 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 204 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
|
| 205 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=2)
|
| 206 |
+
|
| 207 |
+
return mask
|
| 208 |
+
|
| 209 |
+
def _find_largest_contour(
|
| 210 |
+
self,
|
| 211 |
+
mask: np.ndarray,
|
| 212 |
+
frame_area: float
|
| 213 |
+
) -> Optional[np.ndarray]:
|
| 214 |
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 215 |
+
if not contours:
|
| 216 |
+
return None
|
| 217 |
+
# Filter by area
|
| 218 |
+
min_area = self.min_contour_area_ratio * frame_area
|
| 219 |
+
valid = [c for c in contours if cv2.contourArea(c) >= min_area]
|
| 220 |
+
if not valid:
|
| 221 |
+
return None
|
| 222 |
+
# Largest contour
|
| 223 |
+
largest = max(valid, key=cv2.contourArea)
|
| 224 |
+
return largest
|
| 225 |
+
|
| 226 |
+
# ---------- Metrics ----------
|
| 227 |
+
|
| 228 |
+
@staticmethod
|
| 229 |
+
def _blur_score_laplacian(gray: np.ndarray) -> float:
|
| 230 |
+
# Standard variance-of-Laplacian focus measure. [web:7][web:10][web:16]
|
| 231 |
+
lap = cv2.Laplacian(gray, cv2.CV_64F)
|
| 232 |
+
return float(lap.var())
|
| 233 |
+
|
| 234 |
+
@staticmethod
|
| 235 |
+
def _orientation_pca(contour: np.ndarray) -> float:
|
| 236 |
+
"""
|
| 237 |
+
Compute orientation using PCA on contour points. [web:8][web:11][web:14]
|
| 238 |
+
|
| 239 |
+
:return: angle in degrees in range [-90, 90] w.r.t. x-axis.
|
| 240 |
+
"""
|
| 241 |
+
pts = contour.reshape(-1, 2).astype(np.float64)
|
| 242 |
+
mean, eigenvectors, eigenvalues = cv2.PCACompute2(pts, mean=np.empty(0))
|
| 243 |
+
# First principal component
|
| 244 |
+
vx, vy = eigenvectors[0]
|
| 245 |
+
angle_rad = np.arctan2(vy, vx)
|
| 246 |
+
angle_deg = np.degrees(angle_rad)
|
| 247 |
+
# Normalize angle for convenience
|
| 248 |
+
if angle_deg < -90:
|
| 249 |
+
angle_deg += 180
|
| 250 |
+
elif angle_deg > 90:
|
| 251 |
+
angle_deg -= 180
|
| 252 |
+
return float(angle_deg)
|
| 253 |
+
|
| 254 |
+
def _orientation_pass(self, angle_deg: float) -> bool:
|
| 255 |
+
"""
|
| 256 |
+
Check if orientation is close to expected vertical/horizontal.
|
| 257 |
+
|
| 258 |
+
vertical_expected=True -> near 90 or -90 degrees
|
| 259 |
+
vertical_expected=False -> near 0 degrees
|
| 260 |
+
"""
|
| 261 |
+
if self.vertical_expected:
|
| 262 |
+
# distance from ±90
|
| 263 |
+
dev = min(abs(abs(angle_deg) - 90.0), abs(angle_deg))
|
| 264 |
+
else:
|
| 265 |
+
# distance from 0
|
| 266 |
+
dev = abs(angle_deg)
|
| 267 |
+
return dev <= self.orientation_max_deviation
|
| 268 |
+
|
| 269 |
+
# ---------- Debug drawing ----------
|
| 270 |
+
|
| 271 |
+
@staticmethod
|
| 272 |
+
def _draw_axis(img, center, vec, length, color, thickness=2):
|
| 273 |
+
x0, y0 = center
|
| 274 |
+
x1 = int(x0 + length * vec[0])
|
| 275 |
+
y1 = int(y0 + length * vec[1])
|
| 276 |
+
cv2.arrowedLine(img, (x0, y0), (x1, y1), color, thickness, tipLength=0.2)
|
| 277 |
+
|
| 278 |
+
def _draw_debug(
|
| 279 |
+
self,
|
| 280 |
+
img: np.ndarray,
|
| 281 |
+
contour: np.ndarray,
|
| 282 |
+
bbox: Tuple[int, int, int, int],
|
| 283 |
+
angle_deg: float,
|
| 284 |
+
result: FingerQualityResult
|
| 285 |
+
) -> None:
|
| 286 |
+
x, y, w_box, h_box = bbox
|
| 287 |
+
# Bounding box
|
| 288 |
+
cv2.rectangle(img, (x, y), (x + w_box, y + h_box), (0, 255, 0), 2)
|
| 289 |
+
|
| 290 |
+
# Draw contour
|
| 291 |
+
cv2.drawContours(img, [contour], -1, (255, 0, 0), 2)
|
| 292 |
+
|
| 293 |
+
# PCA axis
|
| 294 |
+
pts = contour.reshape(-1, 2).astype(np.float64)
|
| 295 |
+
mean, eigenvectors, eigenvalues = cv2.PCACompute2(pts, mean=np.empty(0))
|
| 296 |
+
center = (int(mean[0, 0]), int(mean[0, 1]))
|
| 297 |
+
main_vec = eigenvectors[0]
|
| 298 |
+
self._draw_axis(img, center, main_vec, length=80, color=(0, 0, 255), thickness=2)
|
| 299 |
+
|
| 300 |
+
# Overlay text
|
| 301 |
+
text_lines = [
|
| 302 |
+
f"Blur: {result.blur_score:.1f} ({'OK' if result.blur_pass else 'BAD'})",
|
| 303 |
+
f"Illum: {result.illumination_score:.1f} ({'OK' if result.illumination_pass else 'BAD'})",
|
| 304 |
+
f"Coverage: {result.coverage_ratio*100:.1f}% ({'OK' if result.coverage_pass else 'BAD'})",
|
| 305 |
+
f"Angle: {angle_deg:.1f} deg ({'OK' if result.orientation_pass else 'BAD'})",
|
| 306 |
+
f"Quality: {result.quality_score:.2f} ({'PASS' if result.overall_pass else 'FAIL'})",
|
| 307 |
+
]
|
| 308 |
+
y0 = 25
|
| 309 |
+
for line in text_lines:
|
| 310 |
+
cv2.putText(
|
| 311 |
+
img,
|
| 312 |
+
line,
|
| 313 |
+
(10, y0),
|
| 314 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 315 |
+
0.6,
|
| 316 |
+
(0, 255, 0) if "OK" in line or "PASS" in line else (0, 0, 255),
|
| 317 |
+
2,
|
| 318 |
+
cv2.LINE_AA
|
| 319 |
+
)
|
| 320 |
+
y0 += 22
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
# 1) Load your finger image (replace with your path)
|
| 324 |
+
img = cv2.imread(r"finger_inputs\clear_thumb.jpeg")
|
| 325 |
+
if img is None:
|
| 326 |
+
raise RuntimeError("Image not found or path is wrong")
|
| 327 |
+
|
| 328 |
+
# 2) Create assessor (tune thresholds later if needed)
|
| 329 |
+
assessor = FingerQualityAssessor(
|
| 330 |
+
target_width=640,
|
| 331 |
+
blur_min=60.0,
|
| 332 |
+
illum_min=50.0,
|
| 333 |
+
illum_max=200.0,
|
| 334 |
+
coverage_min=0.10,
|
| 335 |
+
orientation_max_deviation=45.0,
|
| 336 |
+
vertical_expected=True
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
# 3) Run assessment.
|
| 340 |
+
result, debug_image = assessor.assess(img, draw_debug=True)
|
| 341 |
+
|
| 342 |
+
def _round_value(value: Any) -> Any:
|
| 343 |
+
"""
|
| 344 |
+
Recursively round floats to 2 decimal places for JSON output.
|
| 345 |
+
"""
|
| 346 |
+
if isinstance(value, float):
|
| 347 |
+
return round(value, 2)
|
| 348 |
+
if isinstance(value, dict):
|
| 349 |
+
return {k: _round_value(v) for k, v in value.items()}
|
| 350 |
+
if isinstance(value, (list, tuple)):
|
| 351 |
+
return [_round_value(v) for v in value]
|
| 352 |
+
return value
|
| 353 |
+
|
| 354 |
+
def finger_quality_result_to_json(result: FingerQualityResult) -> str:
|
| 355 |
+
"""
|
| 356 |
+
Convert FingerQualityResult to a JSON string suitable for frontend usage.
|
| 357 |
+
"""
|
| 358 |
+
data = asdict(result)
|
| 359 |
+
|
| 360 |
+
# Ensure bbox is frontend-friendly
|
| 361 |
+
if data["bbox"] is not None:
|
| 362 |
+
data["bbox"] = {
|
| 363 |
+
"x": data["bbox"][0],
|
| 364 |
+
"y": data["bbox"][1],
|
| 365 |
+
"width": data["bbox"][2],
|
| 366 |
+
"height": data["bbox"][3],
|
| 367 |
+
}
|
| 368 |
+
data = _round_value(data)
|
| 369 |
+
return json.dumps(data, indent=2)
|
| 370 |
+
|
| 371 |
+
quality_json = finger_quality_result_to_json(result)
|
| 372 |
+
print(quality_json)
|
| 373 |
+
|
| 374 |
+
with open("output_dir/finger_quality_result.json", "w") as f:
|
| 375 |
+
f.write(quality_json)
|
| 376 |
+
|
| 377 |
+
# 4) Print all scores and flags.
|
| 378 |
+
print("Blur score:", result.blur_score, "pass:", result.blur_pass)
|
| 379 |
+
print("Illumination:", result.illumination_score, "pass:", result.illumination_pass)
|
| 380 |
+
print("Coverage ratio:", result.coverage_ratio, "pass:", result.coverage_pass)
|
| 381 |
+
print("Orientation angle:", result.orientation_angle_deg, "pass:", result.orientation_pass)
|
| 382 |
+
print("Quality score:", result.quality_score, "OVERALL PASS:", result.overall_pass)
|
| 383 |
+
|
| 384 |
+
# 5) Show debug image with bounding box and text.
|
| 385 |
+
if debug_image is not None:
|
| 386 |
+
cv2.imshow("Finger Quality Debug", debug_image)
|
| 387 |
+
cv2.waitKey(0) # wait until key press to close window [web:18][web:24]
|
| 388 |
+
cv2.destroyAllWindows()
|
main.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from finger_detector import FingerDetector
|
| 6 |
+
from models import FingerQualityResult
|
| 7 |
+
from quality_analyzer import QualityAnalyzer, QualityConfig
|
| 8 |
+
from utils import finger_quality_result_to_json
|
| 9 |
+
from visualizer import Visualizer
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class FingerQualityAssessor:
|
| 13 |
+
"""
|
| 14 |
+
End-to-end finger quality computation on single-finger mobile images.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, config: QualityConfig):
|
| 18 |
+
self.config = config
|
| 19 |
+
self.detector = FingerDetector(min_contour_area_ratio=0.02)
|
| 20 |
+
self.analyzer = QualityAnalyzer(config)
|
| 21 |
+
|
| 22 |
+
def assess(self, bgr: np.ndarray, draw_debug: bool = False):
|
| 23 |
+
if bgr is None or bgr.size == 0:
|
| 24 |
+
raise ValueError("Input image is empty")
|
| 25 |
+
|
| 26 |
+
img = self.analyzer.resize_keep_aspect(bgr, self.config.target_width)
|
| 27 |
+
h, w = img.shape[:2]
|
| 28 |
+
frame_area = float(h * w)
|
| 29 |
+
|
| 30 |
+
mask = self.detector.segment_skin_ycbcr(img)
|
| 31 |
+
contour = self.detector.find_largest_contour(mask, frame_area)
|
| 32 |
+
if contour is None:
|
| 33 |
+
result = FingerQualityResult(
|
| 34 |
+
blur_score=0.0,
|
| 35 |
+
illumination_score=0.0,
|
| 36 |
+
coverage_ratio=0.0,
|
| 37 |
+
orientation_angle_deg=0.0,
|
| 38 |
+
blur_pass=False,
|
| 39 |
+
illumination_pass=False,
|
| 40 |
+
coverage_pass=False,
|
| 41 |
+
orientation_pass=False,
|
| 42 |
+
quality_score=0.0,
|
| 43 |
+
overall_pass=False,
|
| 44 |
+
bbox=None,
|
| 45 |
+
contour_area=0.0,
|
| 46 |
+
)
|
| 47 |
+
feedback = self.analyzer.generate_feedback(result)
|
| 48 |
+
return result, feedback, (img if draw_debug else None)
|
| 49 |
+
|
| 50 |
+
contour_area = float(cv2.contourArea(contour))
|
| 51 |
+
bbox = self.detector.bounding_box(contour)
|
| 52 |
+
x, y, w_box, h_box = bbox
|
| 53 |
+
|
| 54 |
+
roi = img[y:y + h_box, x:x + w_box]
|
| 55 |
+
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
|
| 56 |
+
mask_roi = mask[y:y + h_box, x:x + w_box]
|
| 57 |
+
|
| 58 |
+
blur_score = self.analyzer.blur_score_laplacian(roi_gray)
|
| 59 |
+
illumination_score = float(roi_gray.mean())
|
| 60 |
+
coverage_ratio = float(np.count_nonzero(mask_roi)) / float(frame_area)
|
| 61 |
+
|
| 62 |
+
orientation_angle_deg = self.detector.orientation_pca_deg(contour)
|
| 63 |
+
|
| 64 |
+
blur_pass = blur_score >= self.config.blur_min
|
| 65 |
+
illum_pass = self.config.illum_min <= illumination_score <= self.config.illum_max
|
| 66 |
+
coverage_pass = coverage_ratio >= self.config.coverage_min
|
| 67 |
+
orientation_pass = self.analyzer.orientation_pass(orientation_angle_deg)
|
| 68 |
+
|
| 69 |
+
quality_score = self.analyzer.compute_quality_score(
|
| 70 |
+
blur_score=blur_score,
|
| 71 |
+
illumination_score=illumination_score,
|
| 72 |
+
coverage_ratio=coverage_ratio,
|
| 73 |
+
orientation_ok=orientation_pass,
|
| 74 |
+
)
|
| 75 |
+
overall_pass = quality_score >= self.config.overall_quality_threshold
|
| 76 |
+
|
| 77 |
+
result = FingerQualityResult(
|
| 78 |
+
blur_score=float(blur_score),
|
| 79 |
+
illumination_score=float(illumination_score),
|
| 80 |
+
coverage_ratio=float(coverage_ratio),
|
| 81 |
+
orientation_angle_deg=float(orientation_angle_deg),
|
| 82 |
+
blur_pass=bool(blur_pass),
|
| 83 |
+
illumination_pass=bool(illum_pass),
|
| 84 |
+
coverage_pass=bool(coverage_pass),
|
| 85 |
+
orientation_pass=bool(orientation_pass),
|
| 86 |
+
quality_score=float(quality_score),
|
| 87 |
+
overall_pass=bool(overall_pass),
|
| 88 |
+
bbox=bbox,
|
| 89 |
+
contour_area=contour_area,
|
| 90 |
+
feedback=None
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
feedback = self.analyzer.generate_feedback(result)
|
| 94 |
+
|
| 95 |
+
# NEW: embed feedback into result
|
| 96 |
+
result.feedback = feedback
|
| 97 |
+
|
| 98 |
+
debug_img = None
|
| 99 |
+
if draw_debug and contour is not None and result.bbox is not None:
|
| 100 |
+
debug_img = Visualizer.draw_debug(img, contour, bbox, orientation_angle_deg, result)
|
| 101 |
+
|
| 102 |
+
return result, feedback, debug_img
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def main():
|
| 106 |
+
image_path = r"C:\SagarKV\sol9x\ContactlessFinger\TRACK_A\finger_inputs\OM_TH.jpg"
|
| 107 |
+
img = cv2.imread(image_path)
|
| 108 |
+
if img is None:
|
| 109 |
+
raise RuntimeError("Image not found or path is wrong")
|
| 110 |
+
|
| 111 |
+
config = QualityConfig(
|
| 112 |
+
target_width=640,
|
| 113 |
+
blur_min=60.0,
|
| 114 |
+
illum_min=50.0,
|
| 115 |
+
illum_max=200.0,
|
| 116 |
+
coverage_min=0.10,
|
| 117 |
+
orientation_max_deviation=45.0,
|
| 118 |
+
vertical_expected=True,
|
| 119 |
+
overall_quality_threshold=0.70,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
assessor = FingerQualityAssessor(config)
|
| 123 |
+
result, feedback, debug_image = assessor.assess(img, draw_debug=True)
|
| 124 |
+
|
| 125 |
+
os.makedirs("results", exist_ok=True)
|
| 126 |
+
quality_json = finger_quality_result_to_json(result)
|
| 127 |
+
with open("results/finger_quality_result.json", "w", encoding="utf-8") as f:
|
| 128 |
+
f.write(quality_json)
|
| 129 |
+
|
| 130 |
+
# Print JSON + feedback
|
| 131 |
+
print(quality_json)
|
| 132 |
+
for m in feedback.messages:
|
| 133 |
+
print(f"[{m.severity.upper()}] {m.category}: {m.message}")
|
| 134 |
+
print("Acceptable:", feedback.is_acceptable)
|
| 135 |
+
|
| 136 |
+
if debug_image is not None:
|
| 137 |
+
cv2.imshow("Finger Quality Debug", debug_image)
|
| 138 |
+
cv2.waitKey(0)
|
| 139 |
+
cv2.destroyAllWindows()
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
main()
|
models.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
from typing import Optional, Tuple, List
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class FeedbackMessage:
|
| 9 |
+
category: str
|
| 10 |
+
message: str
|
| 11 |
+
severity: str # "warning", "error", "success"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class QualityFeedback:
|
| 16 |
+
messages: List[FeedbackMessage] = field(default_factory=list)
|
| 17 |
+
is_acceptable: bool = True
|
| 18 |
+
|
| 19 |
+
def add(self, category: str, message: str, severity: str = "warning"):
|
| 20 |
+
self.messages.append(FeedbackMessage(category, message, severity))
|
| 21 |
+
if severity == "error":
|
| 22 |
+
self.is_acceptable = False
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class FingerQualityResult:
|
| 26 |
+
# Raw scores
|
| 27 |
+
blur_score: float
|
| 28 |
+
illumination_score: float
|
| 29 |
+
coverage_ratio: float
|
| 30 |
+
orientation_angle_deg: float # angle of main axis w.r.t. x-axis
|
| 31 |
+
|
| 32 |
+
# Per-metric pass/fail
|
| 33 |
+
blur_pass: bool
|
| 34 |
+
illumination_pass: bool
|
| 35 |
+
coverage_pass: bool
|
| 36 |
+
orientation_pass: bool
|
| 37 |
+
|
| 38 |
+
# Overall
|
| 39 |
+
quality_score: float # 0–1
|
| 40 |
+
overall_pass: bool
|
| 41 |
+
|
| 42 |
+
# Debug / geometry
|
| 43 |
+
bbox: Optional[Tuple[int, int, int, int]] # x, y, w, h
|
| 44 |
+
contour_area: float
|
| 45 |
+
|
| 46 |
+
# NEW: Feedback bundled into the same result JSON
|
| 47 |
+
feedback: Optional[QualityFeedback] = None
|
quality_analyzer.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from models import FingerQualityResult, QualityFeedback
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class QualityConfig:
|
| 10 |
+
target_width: int = 640
|
| 11 |
+
blur_min: float = 60.0
|
| 12 |
+
illum_min: float = 50.0
|
| 13 |
+
illum_max: float = 200.0
|
| 14 |
+
coverage_min: float = 0.10
|
| 15 |
+
orientation_max_deviation: float = 45.0
|
| 16 |
+
vertical_expected: bool = True
|
| 17 |
+
overall_quality_threshold: float = 0.70
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class QualityAnalyzer:
|
| 21 |
+
def __init__(self, config: QualityConfig):
|
| 22 |
+
self.config = config
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def resize_keep_aspect(img: np.ndarray, target_width: int) -> np.ndarray:
|
| 26 |
+
h, w = img.shape[:2]
|
| 27 |
+
if w == target_width:
|
| 28 |
+
return img
|
| 29 |
+
scale = target_width / float(w)
|
| 30 |
+
new_size = (target_width, int(round(h * scale)))
|
| 31 |
+
return cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def blur_score_laplacian(gray: np.ndarray) -> float:
|
| 35 |
+
# Variance-of-Laplacian focus measure. [page:1]
|
| 36 |
+
return float(cv2.Laplacian(gray, cv2.CV_64F).var())
|
| 37 |
+
|
| 38 |
+
def orientation_pass(self, angle_deg: float) -> bool:
|
| 39 |
+
if self.config.vertical_expected:
|
| 40 |
+
dev = min(abs(abs(angle_deg) - 90.0), abs(angle_deg))
|
| 41 |
+
else:
|
| 42 |
+
dev = abs(angle_deg)
|
| 43 |
+
return dev <= self.config.orientation_max_deviation
|
| 44 |
+
|
| 45 |
+
def compute_quality_score(
|
| 46 |
+
self,
|
| 47 |
+
blur_score: float,
|
| 48 |
+
illumination_score: float,
|
| 49 |
+
coverage_ratio: float,
|
| 50 |
+
orientation_ok: bool
|
| 51 |
+
) -> float:
|
| 52 |
+
blur_norm = np.clip(blur_score / (self.config.blur_min * 2.0), 0.0, 1.0)
|
| 53 |
+
|
| 54 |
+
illum_center = (self.config.illum_min + self.config.illum_max) / 2.0
|
| 55 |
+
illum_range = (self.config.illum_max - self.config.illum_min) / 2.0
|
| 56 |
+
illum_norm = 1.0 - np.clip(abs(illumination_score - illum_center) / (illum_range + 1e-6), 0.0, 1.0)
|
| 57 |
+
|
| 58 |
+
coverage_norm = np.clip(coverage_ratio / (self.config.coverage_min * 2.0), 0.0, 1.0)
|
| 59 |
+
orient_norm = 1.0 if orientation_ok else 0.0
|
| 60 |
+
|
| 61 |
+
w_blur, w_illum, w_cov, w_orient = 0.35, 0.25, 0.25, 0.15
|
| 62 |
+
return float(w_blur * blur_norm + w_illum * illum_norm + w_cov * coverage_norm + w_orient * orient_norm)
|
| 63 |
+
|
| 64 |
+
def generate_feedback(self, result: FingerQualityResult) -> QualityFeedback:
|
| 65 |
+
"""
|
| 66 |
+
User-friendly feedback with ranges consistent with your thresholds:
|
| 67 |
+
- blur_min (default 60): below ~0.6x is "very blurry", below threshold is "slightly blurry".
|
| 68 |
+
- illum_min/illum_max: too dark, too bright, or OK.
|
| 69 |
+
- coverage_min: very small (<0.5x), small (<threshold), too close (>0.70), OK.
|
| 70 |
+
- orientation_max_deviation: error if >1.5x deviation, warning if >threshold.
|
| 71 |
+
"""
|
| 72 |
+
fb = QualityFeedback()
|
| 73 |
+
|
| 74 |
+
# Blur
|
| 75 |
+
if result.blur_score < 0.6 * self.config.blur_min:
|
| 76 |
+
fb.add("blur", "Image is very blurry. Hold phone steady and tap to focus.", "error")
|
| 77 |
+
elif result.blur_score < self.config.blur_min:
|
| 78 |
+
fb.add("blur", "Image is slightly blurry. Try holding your phone steadier.", "warning")
|
| 79 |
+
else:
|
| 80 |
+
fb.add("blur", "Image sharpness is good.", "success")
|
| 81 |
+
|
| 82 |
+
# Illumination
|
| 83 |
+
if result.illumination_score < 0.8 * self.config.illum_min:
|
| 84 |
+
fb.add("light", "Image is too dark. Move to a well-lit area.", "error")
|
| 85 |
+
elif result.illumination_score < self.config.illum_min:
|
| 86 |
+
fb.add("light", "Lighting is dim. Move closer to a light source.", "warning")
|
| 87 |
+
elif result.illumination_score > self.config.illum_max:
|
| 88 |
+
fb.add("light", "Image is overexposed. Avoid direct bright light.", "warning")
|
| 89 |
+
else:
|
| 90 |
+
fb.add("light", "Lighting conditions are good.", "success")
|
| 91 |
+
|
| 92 |
+
# Coverage
|
| 93 |
+
if result.coverage_ratio < 0.5 * self.config.coverage_min:
|
| 94 |
+
fb.add("position", "Finger too small. Move phone closer to your finger.", "error")
|
| 95 |
+
elif result.coverage_ratio < self.config.coverage_min:
|
| 96 |
+
fb.add("position", "Finger appears small. Move the camera closer.", "warning")
|
| 97 |
+
elif result.coverage_ratio > 0.70:
|
| 98 |
+
fb.add("position", "Finger too close. Move phone slightly away.", "warning")
|
| 99 |
+
else:
|
| 100 |
+
fb.add("position", "Finger positioning is good.", "success")
|
| 101 |
+
|
| 102 |
+
# Orientation
|
| 103 |
+
# compute deviation using same logic as pass/fail
|
| 104 |
+
angle_deg = result.orientation_angle_deg
|
| 105 |
+
if self.config.vertical_expected:
|
| 106 |
+
dev = min(abs(abs(angle_deg) - 90.0), abs(angle_deg))
|
| 107 |
+
else:
|
| 108 |
+
dev = abs(angle_deg)
|
| 109 |
+
|
| 110 |
+
if dev > 1.5 * self.config.orientation_max_deviation:
|
| 111 |
+
fb.add("orientation", "Finger is tilted too much. Align finger straight.", "error")
|
| 112 |
+
elif dev > self.config.orientation_max_deviation:
|
| 113 |
+
fb.add("orientation", "Finger is slightly tilted. Try to keep it straighter.", "warning")
|
| 114 |
+
else:
|
| 115 |
+
fb.add("orientation", "Finger orientation is correct.", "success")
|
| 116 |
+
|
| 117 |
+
# Overall
|
| 118 |
+
if result.overall_pass:
|
| 119 |
+
fb.add("overall", "Capture is acceptable.", "success")
|
| 120 |
+
else:
|
| 121 |
+
fb.add("overall", "Capture is not acceptable. Fix the issues above and retake.", "error")
|
| 122 |
+
|
| 123 |
+
return fb
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
opencv-python-headless
|
| 3 |
+
numpy
|
results/finger_quality_result.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"blur_score": 79.14,
|
| 3 |
+
"illumination_score": 134.96,
|
| 4 |
+
"coverage_ratio": 0.73,
|
| 5 |
+
"orientation_angle_deg": -70.94,
|
| 6 |
+
"blur_pass": true,
|
| 7 |
+
"illumination_pass": true,
|
| 8 |
+
"coverage_pass": true,
|
| 9 |
+
"orientation_pass": true,
|
| 10 |
+
"quality_score": 0.85,
|
| 11 |
+
"overall_pass": true,
|
| 12 |
+
"bbox": {
|
| 13 |
+
"x": 0,
|
| 14 |
+
"y": 26,
|
| 15 |
+
"width": 640,
|
| 16 |
+
"height": 905
|
| 17 |
+
},
|
| 18 |
+
"contour_area": 432000.5,
|
| 19 |
+
"feedback": {
|
| 20 |
+
"messages": [
|
| 21 |
+
{
|
| 22 |
+
"category": "blur",
|
| 23 |
+
"message": "Image sharpness is good.",
|
| 24 |
+
"severity": "success"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"category": "light",
|
| 28 |
+
"message": "Lighting conditions are good.",
|
| 29 |
+
"severity": "success"
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"category": "position",
|
| 33 |
+
"message": "Finger too close. Move phone slightly away.",
|
| 34 |
+
"severity": "warning"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"category": "orientation",
|
| 38 |
+
"message": "Finger orientation is correct.",
|
| 39 |
+
"severity": "success"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"category": "overall",
|
| 43 |
+
"message": "Capture is acceptable.",
|
| 44 |
+
"severity": "success"
|
| 45 |
+
}
|
| 46 |
+
],
|
| 47 |
+
"is_acceptable": true
|
| 48 |
+
}
|
| 49 |
+
}
|
utils.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from dataclasses import asdict
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
from models import FingerQualityResult
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _round_value(value: Any) -> Any:
|
| 9 |
+
if isinstance(value, float):
|
| 10 |
+
return round(value, 2)
|
| 11 |
+
if isinstance(value, dict):
|
| 12 |
+
return {k: _round_value(v) for k, v in value.items()}
|
| 13 |
+
if isinstance(value, (list, tuple)):
|
| 14 |
+
return [_round_value(v) for v in value]
|
| 15 |
+
return value
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def finger_quality_result_to_json(result: FingerQualityResult) -> str:
|
| 19 |
+
data = asdict(result)
|
| 20 |
+
|
| 21 |
+
# Make bbox frontend-friendly
|
| 22 |
+
if data["bbox"] is not None:
|
| 23 |
+
data["bbox"] = {
|
| 24 |
+
"x": data["bbox"][0],
|
| 25 |
+
"y": data["bbox"][1],
|
| 26 |
+
"width": data["bbox"][2],
|
| 27 |
+
"height": data["bbox"][3],
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
data = _round_value(data)
|
| 31 |
+
return json.dumps(data, indent=2)
|
visualizer.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from models import FingerQualityResult
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Visualizer:
|
| 9 |
+
@staticmethod
|
| 10 |
+
def draw_axis(img, center, vec, length, color, thickness=2):
|
| 11 |
+
x0, y0 = center
|
| 12 |
+
x1 = int(x0 + length * vec[0])
|
| 13 |
+
y1 = int(y0 + length * vec[1])
|
| 14 |
+
cv2.arrowedLine(img, (x0, y0), (x1, y1), color, thickness, tipLength=0.2)
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def draw_debug(
|
| 18 |
+
img: np.ndarray,
|
| 19 |
+
contour: np.ndarray,
|
| 20 |
+
bbox: Tuple[int, int, int, int],
|
| 21 |
+
angle_deg: float,
|
| 22 |
+
result: FingerQualityResult
|
| 23 |
+
) -> np.ndarray:
|
| 24 |
+
out = img.copy()
|
| 25 |
+
x, y, w_box, h_box = bbox
|
| 26 |
+
|
| 27 |
+
cv2.rectangle(out, (x, y), (x + w_box, y + h_box), (0, 255, 0), 2)
|
| 28 |
+
cv2.drawContours(out, [contour], -1, (255, 0, 0), 2)
|
| 29 |
+
|
| 30 |
+
pts = contour.reshape(-1, 2).astype(np.float64)
|
| 31 |
+
mean, eigenvectors, _ = cv2.PCACompute2(pts, mean=np.empty(0))
|
| 32 |
+
center = (int(mean[0, 0]), int(mean[0, 1]))
|
| 33 |
+
main_vec = eigenvectors[0]
|
| 34 |
+
Visualizer.draw_axis(out, center, main_vec, length=80, color=(0, 0, 255), thickness=2)
|
| 35 |
+
|
| 36 |
+
text_lines = [
|
| 37 |
+
f"Blur: {result.blur_score:.1f} ({'OK' if result.blur_pass else 'BAD'})",
|
| 38 |
+
f"Illum: {result.illumination_score:.1f} ({'OK' if result.illumination_pass else 'BAD'})",
|
| 39 |
+
f"Coverage: {result.coverage_ratio*100:.1f}% ({'OK' if result.coverage_pass else 'BAD'})",
|
| 40 |
+
f"Angle: {angle_deg:.1f} deg ({'OK' if result.orientation_pass else 'BAD'})",
|
| 41 |
+
f"Quality: {result.quality_score:.2f} ({'PASS' if result.overall_pass else 'FAIL'})",
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
y0 = 25
|
| 45 |
+
for line in text_lines:
|
| 46 |
+
color = (0, 255, 0) if ("OK" in line or "PASS" in line) else (0, 0, 255)
|
| 47 |
+
cv2.putText(out, line, (10, y0), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2, cv2.LINE_AA)
|
| 48 |
+
y0 += 22
|
| 49 |
+
|
| 50 |
+
return out
|