Commit ·
f4e5bc9
1
Parent(s): 774fae3
Update backend and UI; add models
Browse files- .gitattributes +1 -0
- .gitignore +7 -0
- backend/app.py +218 -0
- backend/inference.py +214 -0
- backend/models/AW_yolo.pt +3 -0
- backend/models/cervix_yolo.pt +3 -0
- backend/requirements.txt +10 -0
- src/components/AceticAnnotator.tsx +519 -61
- src/components/ImageAnnotator.tsx +154 -26
- src/pages/AcetowhiteExamPage.tsx +152 -17
- src/pages/BiopsyMarking.tsx +13 -3
- src/pages/GreenFilterPage.tsx +285 -20
- src/pages/GuidedCapturePage.tsx +464 -130
- src/pages/LugolExamPage.tsx +86 -1
- src/utils/filterUtils.ts +152 -0
.gitattributes
CHANGED
|
@@ -5,3 +5,4 @@
|
|
| 5 |
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 5 |
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -12,6 +12,13 @@ dist
|
|
| 12 |
dist-ssr
|
| 13 |
*.local
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
# Editor directories and files
|
| 16 |
.vscode/*
|
| 17 |
!.vscode/extensions.json
|
|
|
|
| 12 |
dist-ssr
|
| 13 |
*.local
|
| 14 |
|
| 15 |
+
# Python
|
| 16 |
+
__pycache__/
|
| 17 |
+
*.py[cod]
|
| 18 |
+
*.pyo
|
| 19 |
+
*.pyd
|
| 20 |
+
.venv/
|
| 21 |
+
|
| 22 |
# Editor directories and files
|
| 23 |
.vscode/*
|
| 24 |
!.vscode/extensions.json
|
backend/app.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
| 2 |
+
from fastapi.responses import JSONResponse
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
import tempfile
|
| 7 |
+
import os
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import uvicorn
|
| 11 |
+
from inference import infer_aw_contour, analyze_frame, analyze_video_frame
|
| 12 |
+
|
| 13 |
+
app = FastAPI(title="Pathora Colposcopy API", version="1.0.0")
|
| 14 |
+
|
| 15 |
+
# Add CORS middleware to allow requests from frontend
|
| 16 |
+
app.add_middleware(
|
| 17 |
+
CORSMiddleware,
|
| 18 |
+
allow_origins=["*"],
|
| 19 |
+
allow_credentials=True,
|
| 20 |
+
allow_methods=["*"],
|
| 21 |
+
allow_headers=["*"],
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@app.get("/health")
|
| 26 |
+
async def health_check():
|
| 27 |
+
"""Health check endpoint"""
|
| 28 |
+
return {"status": "healthy", "service": "Pathora Colposcopy API"}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@app.post("/api/infer-aw-contour")
|
| 32 |
+
async def infer_aw_contour_endpoint(file: UploadFile = File(...), conf_threshold: float = 0.4):
|
| 33 |
+
"""
|
| 34 |
+
Inference endpoint for Acetowhite contour detection
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
file: Image file (jpg, png, etc.)
|
| 38 |
+
conf_threshold: Confidence threshold for YOLO model (0.0-1.0)
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
JSON with base64 encoded result image
|
| 42 |
+
"""
|
| 43 |
+
try:
|
| 44 |
+
# Read image file
|
| 45 |
+
image_data = await file.read()
|
| 46 |
+
|
| 47 |
+
# Try to open image - this will work regardless of content type
|
| 48 |
+
try:
|
| 49 |
+
image = Image.open(BytesIO(image_data))
|
| 50 |
+
except Exception as e:
|
| 51 |
+
raise HTTPException(status_code=400, detail=f"Invalid image file: {str(e)}")
|
| 52 |
+
|
| 53 |
+
# Convert to numpy array and BGR format (OpenCV uses BGR)
|
| 54 |
+
# Handle different image modes
|
| 55 |
+
if image.mode == 'RGBA':
|
| 56 |
+
# Convert RGBA to RGB
|
| 57 |
+
image = image.convert('RGB')
|
| 58 |
+
elif image.mode != 'RGB':
|
| 59 |
+
# Convert other modes to RGB
|
| 60 |
+
image = image.convert('RGB')
|
| 61 |
+
|
| 62 |
+
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 63 |
+
|
| 64 |
+
# Run inference - returns dict with 'overlay', 'contours', 'detections', etc.
|
| 65 |
+
result = infer_aw_contour(frame, conf_threshold=conf_threshold)
|
| 66 |
+
|
| 67 |
+
# Convert result overlay back to RGB for JSON serialization
|
| 68 |
+
if result["overlay"] is not None:
|
| 69 |
+
result_rgb = cv2.cvtColor(result["overlay"], cv2.COLOR_BGR2RGB)
|
| 70 |
+
result_image = Image.fromarray(result_rgb)
|
| 71 |
+
|
| 72 |
+
# Encode to base64
|
| 73 |
+
buffer = BytesIO()
|
| 74 |
+
result_image.save(buffer, format="PNG")
|
| 75 |
+
buffer.seek(0)
|
| 76 |
+
import base64
|
| 77 |
+
image_base64 = base64.b64encode(buffer.getvalue()).decode()
|
| 78 |
+
else:
|
| 79 |
+
image_base64 = None
|
| 80 |
+
|
| 81 |
+
return JSONResponse({
|
| 82 |
+
"status": "success",
|
| 83 |
+
"message": "Inference completed successfully",
|
| 84 |
+
"result_image": image_base64,
|
| 85 |
+
"contours": result["contours"],
|
| 86 |
+
"detections": result["detections"],
|
| 87 |
+
"confidence_threshold": conf_threshold
|
| 88 |
+
})
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
raise HTTPException(status_code=500, detail=f"Error during inference: {str(e)}")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@app.post("/api/batch-infer")
|
| 95 |
+
async def batch_infer(files: list[UploadFile] = File(...), conf_threshold: float = 0.4):
|
| 96 |
+
"""
|
| 97 |
+
Batch inference endpoint for multiple images
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
files: List of image files
|
| 101 |
+
conf_threshold: Confidence threshold for YOLO model
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
JSON with results for all images
|
| 105 |
+
"""
|
| 106 |
+
results = []
|
| 107 |
+
|
| 108 |
+
for file in files:
|
| 109 |
+
try:
|
| 110 |
+
image_data = await file.read()
|
| 111 |
+
image = Image.open(BytesIO(image_data))
|
| 112 |
+
|
| 113 |
+
# Handle different image modes
|
| 114 |
+
if image.mode == 'RGBA':
|
| 115 |
+
image = image.convert('RGB')
|
| 116 |
+
elif image.mode != 'RGB':
|
| 117 |
+
image = image.convert('RGB')
|
| 118 |
+
|
| 119 |
+
frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 120 |
+
|
| 121 |
+
# Run inference - returns dict with 'overlay', 'contours', 'detections', etc.
|
| 122 |
+
result = infer_aw_contour(frame, conf_threshold=conf_threshold)
|
| 123 |
+
|
| 124 |
+
if result["overlay"] is not None:
|
| 125 |
+
result_rgb = cv2.cvtColor(result["overlay"], cv2.COLOR_BGR2RGB)
|
| 126 |
+
result_image = Image.fromarray(result_rgb)
|
| 127 |
+
|
| 128 |
+
buffer = BytesIO()
|
| 129 |
+
result_image.save(buffer, format="PNG")
|
| 130 |
+
buffer.seek(0)
|
| 131 |
+
import base64
|
| 132 |
+
image_base64 = base64.b64encode(buffer.getvalue()).decode()
|
| 133 |
+
else:
|
| 134 |
+
image_base64 = None
|
| 135 |
+
|
| 136 |
+
results.append({
|
| 137 |
+
"filename": file.filename,
|
| 138 |
+
"status": "success",
|
| 139 |
+
"result_image": image_base64,
|
| 140 |
+
"contours": result["contours"],
|
| 141 |
+
"detections": result["detections"]
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
results.append({
|
| 146 |
+
"filename": file.filename,
|
| 147 |
+
"status": "error",
|
| 148 |
+
"error": str(e)
|
| 149 |
+
})
|
| 150 |
+
|
| 151 |
+
return JSONResponse({
|
| 152 |
+
"status": "completed",
|
| 153 |
+
"total_files": len(results),
|
| 154 |
+
"results": results
|
| 155 |
+
})
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@app.post("/infer/image")
|
| 159 |
+
async def infer_image(file: UploadFile = File(...)):
|
| 160 |
+
"""
|
| 161 |
+
Single image inference endpoint for cervix detection/quality.
|
| 162 |
+
"""
|
| 163 |
+
try:
|
| 164 |
+
contents = await file.read()
|
| 165 |
+
nparr = np.frombuffer(contents, np.uint8)
|
| 166 |
+
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 167 |
+
|
| 168 |
+
result = analyze_frame(frame)
|
| 169 |
+
|
| 170 |
+
return JSONResponse(content=result)
|
| 171 |
+
|
| 172 |
+
except Exception as e:
|
| 173 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
@app.post("/infer/video")
|
| 177 |
+
async def infer_video(file: UploadFile = File(...)):
|
| 178 |
+
"""
|
| 179 |
+
Video inference endpoint for cervix detection/quality (frame-by-frame).
|
| 180 |
+
"""
|
| 181 |
+
try:
|
| 182 |
+
with tempfile.NamedTemporaryFile(delete=False) as tmp:
|
| 183 |
+
tmp.write(await file.read())
|
| 184 |
+
temp_path = tmp.name
|
| 185 |
+
|
| 186 |
+
cap = cv2.VideoCapture(temp_path)
|
| 187 |
+
|
| 188 |
+
responses = []
|
| 189 |
+
frame_count = 0
|
| 190 |
+
|
| 191 |
+
while True:
|
| 192 |
+
ret, frame = cap.read()
|
| 193 |
+
if not ret:
|
| 194 |
+
break
|
| 195 |
+
|
| 196 |
+
result = analyze_video_frame(frame)
|
| 197 |
+
responses.append({
|
| 198 |
+
"frame": frame_count,
|
| 199 |
+
"status": result["status"],
|
| 200 |
+
"quality_percent": result["quality_percent"]
|
| 201 |
+
})
|
| 202 |
+
|
| 203 |
+
frame_count += 1
|
| 204 |
+
|
| 205 |
+
cap.release()
|
| 206 |
+
os.remove(temp_path)
|
| 207 |
+
|
| 208 |
+
return JSONResponse(content={
|
| 209 |
+
"total_frames": frame_count,
|
| 210 |
+
"results": responses
|
| 211 |
+
})
|
| 212 |
+
|
| 213 |
+
except Exception as e:
|
| 214 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
if __name__ == "__main__":
|
| 218 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
backend/inference.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#------------------------------------------------
|
| 2 |
+
# Acetowhite Contour Inference
|
| 3 |
+
# -----------------------------------------------
|
| 4 |
+
import os
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from ultralytics import YOLO
|
| 8 |
+
|
| 9 |
+
# MODEL LOAD (Safe Backend Path)
|
| 10 |
+
AW_MODEL_PATH = os.path.join(os.path.dirname(__file__), "models", "AW_yolo.pt")
|
| 11 |
+
aw_model = YOLO(AW_MODEL_PATH)
|
| 12 |
+
|
| 13 |
+
print("Acetowhite model loaded from:", AW_MODEL_PATH)
|
| 14 |
+
|
| 15 |
+
# CONFIGURABLE PARAMETERS
|
| 16 |
+
MIN_AREA = 150 # minimum contour area (px)
|
| 17 |
+
SMOOTHING_EPSILON = 0.002 # polygon smoothing factor
|
| 18 |
+
DEFAULT_CONF = 0.4 # default confidence threshold
|
| 19 |
+
IMG_SIZE = 640 # inference size
|
| 20 |
+
|
| 21 |
+
# MAIN INFERENCE FUNCTION
|
| 22 |
+
def infer_aw_contour(frame, conf_threshold=DEFAULT_CONF):
|
| 23 |
+
|
| 24 |
+
if frame is None:
|
| 25 |
+
return {
|
| 26 |
+
"overlay": None,
|
| 27 |
+
"contours": [],
|
| 28 |
+
"detections": 0,
|
| 29 |
+
"frame_width": 0,
|
| 30 |
+
"frame_height": 0
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
results = aw_model.predict(
|
| 34 |
+
frame,
|
| 35 |
+
conf=conf_threshold,
|
| 36 |
+
imgsz=IMG_SIZE,
|
| 37 |
+
verbose=False
|
| 38 |
+
)[0]
|
| 39 |
+
|
| 40 |
+
overlay = frame.copy()
|
| 41 |
+
contours_list = []
|
| 42 |
+
detection_count = 0
|
| 43 |
+
|
| 44 |
+
if results.masks is not None and len(results.masks.xy) > 0:
|
| 45 |
+
|
| 46 |
+
for idx, polygon in enumerate(results.masks.xy):
|
| 47 |
+
|
| 48 |
+
confidence = float(results.boxes.conf[idx])
|
| 49 |
+
|
| 50 |
+
# Skip low-confidence masks (extra safety layer)
|
| 51 |
+
if confidence < conf_threshold:
|
| 52 |
+
continue
|
| 53 |
+
|
| 54 |
+
contour = polygon.astype(np.int32)
|
| 55 |
+
|
| 56 |
+
area = cv2.contourArea(contour)
|
| 57 |
+
|
| 58 |
+
if area < MIN_AREA:
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
# Optional smoothing
|
| 62 |
+
epsilon = SMOOTHING_EPSILON * cv2.arcLength(contour, True)
|
| 63 |
+
contour = cv2.approxPolyDP(contour, epsilon, True)
|
| 64 |
+
|
| 65 |
+
# Draw clean contour
|
| 66 |
+
cv2.polylines(
|
| 67 |
+
overlay,
|
| 68 |
+
[contour],
|
| 69 |
+
isClosed=True,
|
| 70 |
+
color=(0, 255, 0),
|
| 71 |
+
thickness=2
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
contours_list.append({
|
| 75 |
+
"points": contour.tolist(),
|
| 76 |
+
"area": float(area),
|
| 77 |
+
"confidence": round(confidence, 3)
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
detection_count += 1
|
| 81 |
+
|
| 82 |
+
return {
|
| 83 |
+
"overlay": overlay,
|
| 84 |
+
"contours": contours_list,
|
| 85 |
+
"detections": detection_count,
|
| 86 |
+
"frame_width": frame.shape[1],
|
| 87 |
+
"frame_height": frame.shape[0]
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
#-----------------------------------------------
|
| 91 |
+
# Cervical and Image Quality Check Inference
|
| 92 |
+
# ----------------------------------------------
|
| 93 |
+
import cv2
|
| 94 |
+
import numpy as np
|
| 95 |
+
from ultralytics import YOLO
|
| 96 |
+
from collections import deque
|
| 97 |
+
|
| 98 |
+
cervix_MODEL_PATH = os.path.join(os.path.dirname(__file__), "models", "cervix_yolo.pt")
|
| 99 |
+
cervix_model = YOLO(cervix_MODEL_PATH)
|
| 100 |
+
|
| 101 |
+
print("Cervix model loaded from:", cervix_MODEL_PATH)
|
| 102 |
+
print("Classes:", cervix_model.model.names)
|
| 103 |
+
|
| 104 |
+
# Stability buffer for video
|
| 105 |
+
detect_history = deque(maxlen=10)
|
| 106 |
+
|
| 107 |
+
# QUALITY COMPONENT FUNCTIONS
|
| 108 |
+
def compute_focus(gray_roi):
|
| 109 |
+
focus = cv2.Laplacian(gray_roi, cv2.CV_64F).var()
|
| 110 |
+
return min(focus / 200, 1.0)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def compute_exposure(gray_roi):
|
| 114 |
+
exposure = np.mean(gray_roi)
|
| 115 |
+
if 70 <= exposure <= 180:
|
| 116 |
+
return 1.0
|
| 117 |
+
return max(0, 1 - abs(exposure - 125) / 125)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def compute_glare(gray_roi):
|
| 121 |
+
_, thresh = cv2.threshold(gray_roi, 240, 255, cv2.THRESH_BINARY)
|
| 122 |
+
glare_ratio = np.sum(thresh == 255) / gray_roi.size
|
| 123 |
+
|
| 124 |
+
if glare_ratio < 0.01:
|
| 125 |
+
return 1.0
|
| 126 |
+
elif glare_ratio < 0.03:
|
| 127 |
+
return 0.7
|
| 128 |
+
elif glare_ratio < 0.06:
|
| 129 |
+
return 0.4
|
| 130 |
+
else:
|
| 131 |
+
return 0.1
|
| 132 |
+
|
| 133 |
+
# MAIN FRAME ANALYSIS
|
| 134 |
+
|
| 135 |
+
def analyze_frame(frame, conf_threshold=0.3):
|
| 136 |
+
|
| 137 |
+
if frame is None:
|
| 138 |
+
return {
|
| 139 |
+
"detected": False,
|
| 140 |
+
"detection_confidence": 0.0,
|
| 141 |
+
"quality_score": 0.0,
|
| 142 |
+
"quality_percent": 0
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 146 |
+
|
| 147 |
+
results = cervix_model.predict(
|
| 148 |
+
frame,
|
| 149 |
+
conf=conf_threshold,
|
| 150 |
+
imgsz=640,
|
| 151 |
+
verbose=False
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
r = results[0]
|
| 155 |
+
|
| 156 |
+
if r.boxes is None or len(r.boxes) == 0:
|
| 157 |
+
return {
|
| 158 |
+
"detected": False,
|
| 159 |
+
"detection_confidence": 0.0,
|
| 160 |
+
"quality_score": 0.0,
|
| 161 |
+
"quality_percent": 0
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
# Take highest confidence box
|
| 165 |
+
box = r.boxes.xyxy.cpu().numpy()[0].astype(int)
|
| 166 |
+
detection_conf = float(r.boxes.conf.cpu().numpy()[0])
|
| 167 |
+
|
| 168 |
+
x1, y1, x2, y2 = box
|
| 169 |
+
roi = gray[y1:y2, x1:x2]
|
| 170 |
+
|
| 171 |
+
if roi.size == 0:
|
| 172 |
+
return {
|
| 173 |
+
"detected": False,
|
| 174 |
+
"detection_confidence": detection_conf,
|
| 175 |
+
"quality_score": 0.0,
|
| 176 |
+
"quality_percent": 0
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
# ---- Quality components ----
|
| 180 |
+
focus_n = compute_focus(roi)
|
| 181 |
+
exposure_n = compute_exposure(roi)
|
| 182 |
+
glare_n = compute_glare(roi)
|
| 183 |
+
|
| 184 |
+
quality_score = (
|
| 185 |
+
0.35 * focus_n +
|
| 186 |
+
0.30 * exposure_n +
|
| 187 |
+
0.35 * glare_n
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
return {
|
| 191 |
+
"detected": True,
|
| 192 |
+
"detection_confidence": round(detection_conf, 3),
|
| 193 |
+
"quality_score": round(float(quality_score), 3),
|
| 194 |
+
"quality_percent": int(quality_score * 100),
|
| 195 |
+
"focus_score": round(float(focus_n), 3),
|
| 196 |
+
"exposure_score": round(float(exposure_n), 3),
|
| 197 |
+
"glare_score": round(float(glare_n), 3)
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
# VIDEO STABILITY ANALYSIS
|
| 201 |
+
|
| 202 |
+
def analyze_video_frame(frame, conf_threshold=0.3):
|
| 203 |
+
result = analyze_frame(frame, conf_threshold)
|
| 204 |
+
detect_history.append(1 if result["detected"] else 0)
|
| 205 |
+
stable_count = sum(detect_history)
|
| 206 |
+
|
| 207 |
+
if stable_count >= 7:
|
| 208 |
+
result["status"] = "Cervix Detected (Stable)"
|
| 209 |
+
elif stable_count > 0:
|
| 210 |
+
result["status"] = "Cervix Detected (Unstable)"
|
| 211 |
+
else:
|
| 212 |
+
result["status"] = "Searching Cervix"
|
| 213 |
+
|
| 214 |
+
return result
|
backend/models/AW_yolo.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46b5513bc0cc3b416c2f8e89f220cf2715a2282c956a022b0c5ab5e38d54723b
|
| 3 |
+
size 6768948
|
backend/models/cervix_yolo.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:baf68aff5a06a22124cda181a6c50204efafe2150121b66642c83ba4ea916898
|
| 3 |
+
size 6249194
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.109.0
|
| 2 |
+
uvicorn==0.27.0
|
| 3 |
+
opencv-python==4.9.0.80
|
| 4 |
+
numpy==1.24.3
|
| 5 |
+
torch==2.2.0
|
| 6 |
+
torchvision==0.17.0
|
| 7 |
+
ultralytics==8.1.0
|
| 8 |
+
pillow==10.2.0
|
| 9 |
+
python-multipart==0.0.6
|
| 10 |
+
setuptools>=69.0.0
|
src/components/AceticAnnotator.tsx
CHANGED
|
@@ -31,12 +31,27 @@ interface AceticAnnotatorProps {
|
|
| 31 |
|
| 32 |
export interface AceticAnnotatorHandle {
|
| 33 |
addAIAnnotations: (aiAnnotations: Annotation[]) => void;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
}
|
| 35 |
|
| 36 |
const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotatorProps>(({ imageUrl, imageUrls, onAnnotationsChange }, ref) => {
|
| 37 |
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
| 38 |
const containerRef = useRef<HTMLDivElement | null>(null);
|
| 39 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
const [tool, setTool] = useState<ShapeType>('rect');
|
| 41 |
const [color, setColor] = useState('#05998c');
|
| 42 |
const [labelInput, setLabelInput] = useState('');
|
|
@@ -63,6 +78,25 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 63 |
const [annotationIdentified, setAnnotationIdentified] = useState<Record<string, boolean>>({});
|
| 64 |
const [annotationAccepted, setAnnotationAccepted] = useState<Record<string, boolean>>({});
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
// Predefined label options for Acetic Acid step (ONLY CHANGE from ImageAnnotator)
|
| 67 |
const labelOptions = [
|
| 68 |
'Acetowhite',
|
|
@@ -120,33 +154,384 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 120 |
}));
|
| 121 |
};
|
| 122 |
|
| 123 |
-
//
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
useImperativeHandle(ref, () => ({
|
| 125 |
addAIAnnotations: (aiAnnotations: Annotation[]) => {
|
| 126 |
-
setAnnotations(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
}
|
| 128 |
}));
|
| 129 |
|
| 130 |
const images = imageUrls || (imageUrl ? [imageUrl] : []);
|
| 131 |
const currentImageUrl = images[selectedImageIndex];
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
useEffect(() => {
|
| 134 |
const img = new Image();
|
| 135 |
img.src = currentImageUrl;
|
| 136 |
img.onload = () => {
|
| 137 |
setImageDimensions({ width: img.width, height: img.height });
|
| 138 |
setImageLoaded(true);
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
};
|
| 141 |
}, [currentImageUrl]);
|
| 142 |
|
| 143 |
useEffect(() => {
|
| 144 |
if (imageLoaded) drawCanvas();
|
| 145 |
-
}, [
|
| 146 |
|
| 147 |
useEffect(() => {
|
| 148 |
if (onAnnotationsChange) onAnnotationsChange(annotations);
|
| 149 |
}, [annotations, onAnnotationsChange]);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
const clearAnnotations = () => setAnnotations([]);
|
| 152 |
const deleteLastAnnotation = () => setAnnotations(prev => prev.slice(0, -1));
|
|
@@ -232,44 +617,82 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 232 |
|
| 233 |
const cancelPolygon = () => setPolygonPoints([]);
|
| 234 |
|
| 235 |
-
const getBoundsFromPoints = (pts: Point[]) => {
|
| 236 |
-
const xs = pts.map(p => p.x);
|
| 237 |
-
const ys = pts.map(p => p.y);
|
| 238 |
-
const minX = Math.min(...xs);
|
| 239 |
-
const minY = Math.min(...ys);
|
| 240 |
-
const maxX = Math.max(...xs);
|
| 241 |
-
const maxY = Math.max(...ys);
|
| 242 |
-
return { x: minX, y: minY, width: maxX - minX, height: maxY - minY };
|
| 243 |
-
};
|
| 244 |
-
|
| 245 |
const drawCanvas = () => {
|
| 246 |
const canvas = canvasRef.current;
|
| 247 |
const container = containerRef.current;
|
| 248 |
if (!canvas || !container) return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
const ctx = canvas.getContext('2d');
|
| 250 |
if (!ctx) return;
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
canvas.width = containerWidth;
|
| 258 |
canvas.height = canvasHeight;
|
| 259 |
-
|
| 260 |
-
|
|
|
|
|
|
|
|
|
|
| 261 |
|
| 262 |
-
|
| 263 |
-
|
|
|
|
|
|
|
| 264 |
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
|
|
|
| 273 |
};
|
| 274 |
|
| 275 |
const drawPreviewPolygon = (ctx: CanvasRenderingContext2D, pts: Point[], canvasWidth: number, canvasHeight: number, col: string) => {
|
|
@@ -289,11 +712,13 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 289 |
};
|
| 290 |
|
| 291 |
const drawAnnotation = (ctx: CanvasRenderingContext2D, annotation: Annotation, canvasWidth: number, canvasHeight: number) => {
|
| 292 |
-
const scaleX = canvasWidth / imageDimensions.width
|
| 293 |
-
const scaleY = canvasHeight / imageDimensions.height
|
|
|
|
| 294 |
ctx.strokeStyle = annotation.color;
|
| 295 |
ctx.lineWidth = 3;
|
| 296 |
ctx.setLineDash([]);
|
|
|
|
| 297 |
if (annotation.type === 'rect') {
|
| 298 |
ctx.strokeRect(annotation.x * scaleX, annotation.y * scaleY, annotation.width * scaleX, annotation.height * scaleY);
|
| 299 |
ctx.fillStyle = annotation.color + '20';
|
|
@@ -307,17 +732,34 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 307 |
ctx.stroke();
|
| 308 |
ctx.fillStyle = annotation.color + '20';
|
| 309 |
ctx.fill();
|
| 310 |
-
} else if (annotation.type === 'polygon' && annotation.points) {
|
|
|
|
|
|
|
| 311 |
ctx.beginPath();
|
|
|
|
| 312 |
annotation.points.forEach((p, i) => {
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
ctx.closePath();
|
| 318 |
ctx.stroke();
|
| 319 |
ctx.fillStyle = annotation.color + '20';
|
| 320 |
ctx.fill();
|
|
|
|
|
|
|
| 321 |
}
|
| 322 |
};
|
| 323 |
|
|
@@ -380,30 +822,39 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 380 |
</div>
|
| 381 |
)}
|
| 382 |
|
| 383 |
-
{/* Tab Switcher */}
|
| 384 |
-
<div className="flex justify-center">
|
| 385 |
-
<div className="
|
| 386 |
-
<
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
|
|
|
|
|
|
| 406 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 407 |
</div>
|
| 408 |
|
| 409 |
{/* Annotate Tab */}
|
|
@@ -461,6 +912,13 @@ const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotat
|
|
| 461 |
<button onClick={clearAnnotations} disabled={annotations.length === 0} className="px-3 md:px-4 py-1 md:py-2 text-xs md:text-sm font-medium text-red-600 bg-white border border-red-200 rounded-lg hover:bg-red-50 disabled:opacity-50 disabled:cursor-not-allowed transition-colors">Clear All</button>
|
| 462 |
</div>
|
| 463 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
<div className="flex flex-col">
|
| 466 |
<div>
|
|
|
|
| 31 |
|
| 32 |
export interface AceticAnnotatorHandle {
|
| 33 |
addAIAnnotations: (aiAnnotations: Annotation[]) => void;
|
| 34 |
+
setImageIndex: (index: number) => void;
|
| 35 |
+
getCurrentImageIndex: () => number;
|
| 36 |
+
clearAIAnnotations: () => void;
|
| 37 |
+
clearAllAnnotations: () => void;
|
| 38 |
+
resetViewport: () => void;
|
| 39 |
+
waitForImageReady: () => Promise<void>;
|
| 40 |
+
runAIAssist: () => Promise<void>;
|
| 41 |
}
|
| 42 |
|
| 43 |
const AceticAnnotatorComponent = forwardRef<AceticAnnotatorHandle, AceticAnnotatorProps>(({ imageUrl, imageUrls, onAnnotationsChange }, ref) => {
|
| 44 |
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
| 45 |
const containerRef = useRef<HTMLDivElement | null>(null);
|
| 46 |
+
const imageReadyResolveRef = useRef<(() => void) | null>(null);
|
| 47 |
+
const emptyAnnotationsRef = useRef<Annotation[]>([]);
|
| 48 |
+
const cachedImagesRef = useRef<Record<string, HTMLImageElement>>({});
|
| 49 |
+
const pendingDrawAnimationRef = useRef<number | null>(null);
|
| 50 |
+
const lastDrawStateRef = useRef<string>('');
|
| 51 |
+
|
| 52 |
+
// Store annotations per image index
|
| 53 |
+
const [annotationsByImage, setAnnotationsByImage] = useState<Record<number, Annotation[]>>({});
|
| 54 |
+
|
| 55 |
const [tool, setTool] = useState<ShapeType>('rect');
|
| 56 |
const [color, setColor] = useState('#05998c');
|
| 57 |
const [labelInput, setLabelInput] = useState('');
|
|
|
|
| 78 |
const [annotationIdentified, setAnnotationIdentified] = useState<Record<string, boolean>>({});
|
| 79 |
const [annotationAccepted, setAnnotationAccepted] = useState<Record<string, boolean>>({});
|
| 80 |
|
| 81 |
+
// AI state
|
| 82 |
+
const [isAILoading, setIsAILoading] = useState(false);
|
| 83 |
+
const [aiError, setAIError] = useState<string | null>(null);
|
| 84 |
+
|
| 85 |
+
// Get annotations for current image
|
| 86 |
+
const annotations = annotationsByImage[selectedImageIndex] ?? emptyAnnotationsRef.current;
|
| 87 |
+
|
| 88 |
+
// Helper to update annotations for current image
|
| 89 |
+
const setAnnotations = (updater: Annotation[] | ((prev: Annotation[]) => Annotation[])) => {
|
| 90 |
+
setAnnotationsByImage(prev => {
|
| 91 |
+
const currentAnnotations = prev[selectedImageIndex] || [];
|
| 92 |
+
const newAnnotations = typeof updater === 'function' ? updater(currentAnnotations) : updater;
|
| 93 |
+
return {
|
| 94 |
+
...prev,
|
| 95 |
+
[selectedImageIndex]: newAnnotations
|
| 96 |
+
};
|
| 97 |
+
});
|
| 98 |
+
};
|
| 99 |
+
|
| 100 |
// Predefined label options for Acetic Acid step (ONLY CHANGE from ImageAnnotator)
|
| 101 |
const labelOptions = [
|
| 102 |
'Acetowhite',
|
|
|
|
| 154 |
}));
|
| 155 |
};
|
| 156 |
|
| 157 |
+
// Helper function to get bounding box from polygon points
|
| 158 |
+
const getBoundsFromPoints = (points: any[]) => {
|
| 159 |
+
if (!points || !Array.isArray(points) || points.length === 0) {
|
| 160 |
+
console.warn('⚠️ Invalid points array in getBoundsFromPoints:', points);
|
| 161 |
+
return { x: 0, y: 0, width: 0, height: 0 };
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
// Handle both array [x, y] and object {x, y} formats
|
| 165 |
+
const getX = (p: any) => Array.isArray(p) ? p[0] : (p?.x ?? 0);
|
| 166 |
+
const getY = (p: any) => Array.isArray(p) ? p[1] : (p?.y ?? 0);
|
| 167 |
+
|
| 168 |
+
const xs = points.map(getX).filter(x => typeof x === 'number' && !isNaN(x));
|
| 169 |
+
const ys = points.map(getY).filter(y => typeof y === 'number' && !isNaN(y));
|
| 170 |
+
|
| 171 |
+
if (xs.length === 0 || ys.length === 0) {
|
| 172 |
+
console.warn('⚠️ No valid coordinates in points:', points);
|
| 173 |
+
return { x: 0, y: 0, width: 0, height: 0 };
|
| 174 |
+
}
|
| 175 |
+
const minX = Math.min(...xs);
|
| 176 |
+
const minY = Math.min(...ys);
|
| 177 |
+
const maxX = Math.max(...xs);
|
| 178 |
+
const maxY = Math.max(...ys);
|
| 179 |
+
return { x: minX, y: minY, width: maxX - minX, height: maxY - minY };
|
| 180 |
+
};
|
| 181 |
+
|
| 182 |
+
// AI Assist - Run acetowhite contour detection on current image
|
| 183 |
+
const runAIAssist = async () => {
|
| 184 |
+
const images = imageUrls || (imageUrl ? [imageUrl] : []);
|
| 185 |
+
if (images.length === 0) {
|
| 186 |
+
setAIError('No images available to analyze');
|
| 187 |
+
return;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
const currentImageUrl = images[selectedImageIndex];
|
| 191 |
+
|
| 192 |
+
setIsAILoading(true);
|
| 193 |
+
setAIError(null);
|
| 194 |
+
|
| 195 |
+
try {
|
| 196 |
+
// Ensure image is loaded first
|
| 197 |
+
if (!imageLoaded || imageDimensions.width === 0 || imageDimensions.height === 0) {
|
| 198 |
+
console.log('⏳ Waiting for image to load before AI processing...');
|
| 199 |
+
await new Promise<void>((resolve) => {
|
| 200 |
+
imageReadyResolveRef.current = resolve;
|
| 201 |
+
});
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
const response = await fetch(currentImageUrl);
|
| 205 |
+
const blob = await response.blob();
|
| 206 |
+
|
| 207 |
+
const formData = new FormData();
|
| 208 |
+
formData.append('file', blob, 'image.jpg');
|
| 209 |
+
formData.append('conf_threshold', '0.4');
|
| 210 |
+
|
| 211 |
+
console.log('🚀 Sending to backend with image dimensions:', imageDimensions);
|
| 212 |
+
|
| 213 |
+
const backendResponse = await fetch('http://localhost:8000/api/infer-aw-contour', {
|
| 214 |
+
method: 'POST',
|
| 215 |
+
body: formData,
|
| 216 |
+
});
|
| 217 |
+
|
| 218 |
+
if (!backendResponse.ok) {
|
| 219 |
+
throw new Error(`Backend error: ${backendResponse.statusText}`);
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
const result = await backendResponse.json();
|
| 223 |
+
|
| 224 |
+
console.log('📥 Backend response received');
|
| 225 |
+
|
| 226 |
+
if (result.status !== 'success') {
|
| 227 |
+
throw new Error(result.error || 'AI inference failed');
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
if (!result.contours || !Array.isArray(result.contours)) {
|
| 231 |
+
console.warn('⚠️ Invalid contours format:', result.contours);
|
| 232 |
+
setAIError('Invalid response format from server');
|
| 233 |
+
setIsAILoading(false);
|
| 234 |
+
return;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
const contours = result.contours;
|
| 238 |
+
if (contours.length === 0) {
|
| 239 |
+
console.log('ℹ️ No contours detected');
|
| 240 |
+
setAIError('No contours detected for this image');
|
| 241 |
+
setIsAILoading(false);
|
| 242 |
+
return;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
console.log(`✅ Processing ${contours.length} contour(s)`);
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
const aiAnnotations = contours
|
| 249 |
+
.filter((contour: any) => contour !== null && contour !== undefined)
|
| 250 |
+
.map((contour: any, idx: number) => {
|
| 251 |
+
let points: { x: number; y: number }[];
|
| 252 |
+
let area: number | undefined;
|
| 253 |
+
let confidence: number | undefined;
|
| 254 |
+
|
| 255 |
+
if (!contour) {
|
| 256 |
+
console.warn(`⚠️ Contour ${idx} is null or undefined`);
|
| 257 |
+
return null;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
// Handle two formats:
|
| 261 |
+
// 1. OLD: contour is an array of [x, y] pairs: [[x,y], [x,y], ...]
|
| 262 |
+
// 2. NEW: contour is an object: {points: [[x,y], ...], area: ..., confidence: ...}
|
| 263 |
+
|
| 264 |
+
if (Array.isArray(contour)) {
|
| 265 |
+
// OLD FORMAT: contour is directly an array of points
|
| 266 |
+
try {
|
| 267 |
+
// Use points directly as-is from backend
|
| 268 |
+
points = contour as any[];
|
| 269 |
+
|
| 270 |
+
// Handle OpenCV format: [[[x, y]], [[x, y]], ...] (nested arrays)
|
| 271 |
+
if (Array.isArray(contour[0]) && Array.isArray(contour[0][0])) {
|
| 272 |
+
points = contour.map((p: any) => {
|
| 273 |
+
if (Array.isArray(p) && Array.isArray(p[0])) {
|
| 274 |
+
return p[0]; // Extract [x, y] from [[x, y]]
|
| 275 |
+
}
|
| 276 |
+
return p;
|
| 277 |
+
});
|
| 278 |
+
}
|
| 279 |
+
} catch (e) {
|
| 280 |
+
console.warn(`⚠️ Error processing contour ${idx}:`, e);
|
| 281 |
+
return null;
|
| 282 |
+
}
|
| 283 |
+
area = undefined;
|
| 284 |
+
confidence = undefined;
|
| 285 |
+
} else if (contour && typeof contour === 'object' && contour.points && Array.isArray(contour.points)) {
|
| 286 |
+
// NEW FORMAT: contour is an object with points property
|
| 287 |
+
try {
|
| 288 |
+
// Use points directly as-is from backend
|
| 289 |
+
points = contour.points as any[];
|
| 290 |
+
|
| 291 |
+
// Handle OpenCV format: [[[x, y]], [[x, y]], ...] (nested arrays)
|
| 292 |
+
if (Array.isArray(contour.points[0]) && Array.isArray(contour.points[0][0])) {
|
| 293 |
+
points = contour.points.map((p: any) => {
|
| 294 |
+
if (Array.isArray(p) && Array.isArray(p[0])) {
|
| 295 |
+
return p[0]; // Extract [x, y] from [[x, y]]
|
| 296 |
+
}
|
| 297 |
+
return p;
|
| 298 |
+
});
|
| 299 |
+
}
|
| 300 |
+
} catch (e) {
|
| 301 |
+
console.warn(`⚠️ Error processing contour points ${idx}:`, e);
|
| 302 |
+
return null;
|
| 303 |
+
}
|
| 304 |
+
area = contour.area;
|
| 305 |
+
confidence = contour.confidence;
|
| 306 |
+
} else {
|
| 307 |
+
console.warn(`⚠️ Invalid contour format at index ${idx}:`, contour);
|
| 308 |
+
return null;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
if (!points || points.length < 3) {
|
| 312 |
+
console.warn(`⚠️ Contour ${idx} has insufficient points (${points?.length || 0})`);
|
| 313 |
+
return null;
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
// Normalize point scale if backend returns normalized (0-1) or percent (0-100) coordinates
|
| 317 |
+
let scaledPoints = points;
|
| 318 |
+
|
| 319 |
+
if (imageDimensions.width > 0 && imageDimensions.height > 0) {
|
| 320 |
+
// Helper to extract x from various formats
|
| 321 |
+
const getX = (p: any) => {
|
| 322 |
+
if (Array.isArray(p) && typeof p[0] === 'number') return p[0];
|
| 323 |
+
if (Array.isArray(p) && Array.isArray(p[0])) return p[0][0];
|
| 324 |
+
if (typeof p === 'object' && p !== null) return p.x;
|
| 325 |
+
return undefined;
|
| 326 |
+
};
|
| 327 |
+
|
| 328 |
+
// Helper to extract y from various formats
|
| 329 |
+
const getY = (p: any) => {
|
| 330 |
+
if (Array.isArray(p) && typeof p[0] === 'number') return p[1];
|
| 331 |
+
if (Array.isArray(p) && Array.isArray(p[0])) return p[0][1];
|
| 332 |
+
if (typeof p === 'object' && p !== null) return p.y;
|
| 333 |
+
return undefined;
|
| 334 |
+
};
|
| 335 |
+
|
| 336 |
+
const xValues = points.map(getX).filter(x => typeof x === 'number' && !isNaN(x));
|
| 337 |
+
const yValues = points.map(getY).filter(y => typeof y === 'number' && !isNaN(y));
|
| 338 |
+
|
| 339 |
+
if (xValues.length === 0 || yValues.length === 0) {
|
| 340 |
+
// Fallback: assume [x, y] or [[x, y]] array format
|
| 341 |
+
scaledPoints = points.map((p) => {
|
| 342 |
+
let x, y;
|
| 343 |
+
if (Array.isArray(p) && Array.isArray(p[0])) {
|
| 344 |
+
x = Number(p[0][0]);
|
| 345 |
+
y = Number(p[0][1]);
|
| 346 |
+
} else if (Array.isArray(p) && p.length >= 2) {
|
| 347 |
+
x = Number(p[0]);
|
| 348 |
+
y = Number(p[1]);
|
| 349 |
+
} else {
|
| 350 |
+
x = NaN;
|
| 351 |
+
y = NaN;
|
| 352 |
+
}
|
| 353 |
+
return { x, y };
|
| 354 |
+
});
|
| 355 |
+
} else {
|
| 356 |
+
const maxX = Math.max(...xValues);
|
| 357 |
+
const maxY = Math.max(...yValues);
|
| 358 |
+
|
| 359 |
+
if (maxX <= 1 && maxY <= 1) {
|
| 360 |
+
scaledPoints = points.map(p => {
|
| 361 |
+
const x = getX(p);
|
| 362 |
+
const y = getY(p);
|
| 363 |
+
return { x: x * imageDimensions.width, y: y * imageDimensions.height };
|
| 364 |
+
});
|
| 365 |
+
} else if (maxX <= 100 && maxY <= 100) {
|
| 366 |
+
scaledPoints = points.map(p => {
|
| 367 |
+
const x = getX(p);
|
| 368 |
+
const y = getY(p);
|
| 369 |
+
return { x: (x / 100) * imageDimensions.width, y: (y / 100) * imageDimensions.height };
|
| 370 |
+
});
|
| 371 |
+
} else {
|
| 372 |
+
scaledPoints = points.map(p => {
|
| 373 |
+
const x = getX(p);
|
| 374 |
+
const y = getY(p);
|
| 375 |
+
return { x: Number(x), y: Number(y) };
|
| 376 |
+
});
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
} else {
|
| 380 |
+
// No image dimensions yet, just convert to {x, y} format
|
| 381 |
+
scaledPoints = points.map(p => {
|
| 382 |
+
let x, y;
|
| 383 |
+
if (Array.isArray(p) && Array.isArray(p[0])) {
|
| 384 |
+
x = Number(p[0][0]);
|
| 385 |
+
y = Number(p[0][1]);
|
| 386 |
+
} else if (Array.isArray(p) && p.length >= 2) {
|
| 387 |
+
x = Number(p[0]);
|
| 388 |
+
y = Number(p[1]);
|
| 389 |
+
} else if (typeof p === 'object' && p !== null) {
|
| 390 |
+
x = Number(p.x);
|
| 391 |
+
y = Number(p.y);
|
| 392 |
+
} else {
|
| 393 |
+
x = NaN;
|
| 394 |
+
y = NaN;
|
| 395 |
+
}
|
| 396 |
+
return { x, y };
|
| 397 |
+
});
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
const bounds = getBoundsFromPoints(scaledPoints);
|
| 401 |
+
const annotation = {
|
| 402 |
+
id: `ai_${Date.now()}_${idx}`,
|
| 403 |
+
type: 'polygon' as const,
|
| 404 |
+
x: bounds.x,
|
| 405 |
+
y: bounds.y,
|
| 406 |
+
width: bounds.width,
|
| 407 |
+
height: bounds.height,
|
| 408 |
+
color: '#05998c',
|
| 409 |
+
label: 'Acetowhite',
|
| 410 |
+
points: scaledPoints,
|
| 411 |
+
source: 'ai' as const,
|
| 412 |
+
identified: false,
|
| 413 |
+
accepted: false,
|
| 414 |
+
confidence: confidence,
|
| 415 |
+
area: area
|
| 416 |
+
};
|
| 417 |
+
|
| 418 |
+
return annotation;
|
| 419 |
+
})
|
| 420 |
+
.filter(Boolean) as Annotation[];
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
if (aiAnnotations.length > 0) {
|
| 424 |
+
// Replace all annotations with new AI annotations
|
| 425 |
+
// The useEffect will automatically trigger drawCanvas when annotationsByImage updates
|
| 426 |
+
console.log(`🎨 Setting ${aiAnnotations.length} AI annotations and triggering redraw`);
|
| 427 |
+
setAnnotations(aiAnnotations);
|
| 428 |
+
} else {
|
| 429 |
+
setAIError('No valid contours detected');
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
setIsAILoading(false);
|
| 433 |
+
} catch (error) {
|
| 434 |
+
console.error('AI assist error:', error);
|
| 435 |
+
setAIError(error instanceof Error ? error.message : 'Failed to run AI analysis');
|
| 436 |
+
setIsAILoading(false);
|
| 437 |
+
}
|
| 438 |
+
};
|
| 439 |
+
|
| 440 |
+
// Expose annotator methods via ref
|
| 441 |
useImperativeHandle(ref, () => ({
|
| 442 |
addAIAnnotations: (aiAnnotations: Annotation[]) => {
|
| 443 |
+
setAnnotations(aiAnnotations);
|
| 444 |
+
},
|
| 445 |
+
setImageIndex: (index: number) => {
|
| 446 |
+
setSelectedImageIndex(index);
|
| 447 |
+
},
|
| 448 |
+
getCurrentImageIndex: () => {
|
| 449 |
+
return selectedImageIndex;
|
| 450 |
+
},
|
| 451 |
+
clearAIAnnotations: () => {
|
| 452 |
+
setAnnotations(prev => prev.filter(ann => ann.source !== 'ai'));
|
| 453 |
+
},
|
| 454 |
+
clearAllAnnotations: () => {
|
| 455 |
+
setAnnotations([]);
|
| 456 |
+
setPolygonPoints([]);
|
| 457 |
+
setCurrentAnnotation(null);
|
| 458 |
+
},
|
| 459 |
+
resetViewport: () => {
|
| 460 |
+
setSelectedImageIndex(0);
|
| 461 |
+
setAnnotationsByImage({}); // Clear all images' annotations
|
| 462 |
+
setPolygonPoints([]);
|
| 463 |
+
setCurrentAnnotation(null);
|
| 464 |
+
},
|
| 465 |
+
runAIAssist: async () => {
|
| 466 |
+
await runAIAssist();
|
| 467 |
+
},
|
| 468 |
+
waitForImageReady: () => {
|
| 469 |
+
return new Promise<void>((resolve) => {
|
| 470 |
+
if (imageLoaded) {
|
| 471 |
+
console.log('✅ Image already ready');
|
| 472 |
+
resolve();
|
| 473 |
+
} else {
|
| 474 |
+
console.log('⏳ Waiting for image to load');
|
| 475 |
+
imageReadyResolveRef.current = resolve;
|
| 476 |
+
}
|
| 477 |
+
});
|
| 478 |
}
|
| 479 |
}));
|
| 480 |
|
| 481 |
const images = imageUrls || (imageUrl ? [imageUrl] : []);
|
| 482 |
const currentImageUrl = images[selectedImageIndex];
|
| 483 |
|
| 484 |
+
// Reset drawing state when switching images (but keep annotations per-image)
|
| 485 |
+
useEffect(() => {
|
| 486 |
+
|
| 487 |
+
// Clear temporary drawing state only
|
| 488 |
+
setPolygonPoints([]);
|
| 489 |
+
setCurrentAnnotation(null);
|
| 490 |
+
setAnnotationIdentified({});
|
| 491 |
+
setAnnotationAccepted({});
|
| 492 |
+
setImageLoaded(false);
|
| 493 |
+
|
| 494 |
+
// Immediately clear the canvas (will be redrawn with correct image's annotations)
|
| 495 |
+
const canvas = canvasRef.current;
|
| 496 |
+
if (canvas) {
|
| 497 |
+
const ctx = canvas.getContext('2d');
|
| 498 |
+
if (ctx) {
|
| 499 |
+
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
| 500 |
+
}
|
| 501 |
+
}
|
| 502 |
+
}, [selectedImageIndex]);
|
| 503 |
+
|
| 504 |
useEffect(() => {
|
| 505 |
const img = new Image();
|
| 506 |
img.src = currentImageUrl;
|
| 507 |
img.onload = () => {
|
| 508 |
setImageDimensions({ width: img.width, height: img.height });
|
| 509 |
setImageLoaded(true);
|
| 510 |
+
// Resolve waitForImageReady promise if pending
|
| 511 |
+
if (imageReadyResolveRef.current) {
|
| 512 |
+
imageReadyResolveRef.current();
|
| 513 |
+
imageReadyResolveRef.current = null;
|
| 514 |
+
}
|
| 515 |
+
// drawCanvas will be called by useEffect when imageLoaded changes
|
| 516 |
};
|
| 517 |
}, [currentImageUrl]);
|
| 518 |
|
| 519 |
useEffect(() => {
|
| 520 |
if (imageLoaded) drawCanvas();
|
| 521 |
+
}, [annotationsByImage, selectedImageIndex, currentAnnotation, polygonPoints, imageLoaded, imageDimensions]);
|
| 522 |
|
| 523 |
useEffect(() => {
|
| 524 |
if (onAnnotationsChange) onAnnotationsChange(annotations);
|
| 525 |
}, [annotations, onAnnotationsChange]);
|
| 526 |
+
|
| 527 |
+
// Cleanup pending draw on unmount
|
| 528 |
+
useEffect(() => {
|
| 529 |
+
return () => {
|
| 530 |
+
if (pendingDrawAnimationRef.current) {
|
| 531 |
+
cancelAnimationFrame(pendingDrawAnimationRef.current);
|
| 532 |
+
}
|
| 533 |
+
};
|
| 534 |
+
}, []);
|
| 535 |
|
| 536 |
const clearAnnotations = () => setAnnotations([]);
|
| 537 |
const deleteLastAnnotation = () => setAnnotations(prev => prev.slice(0, -1));
|
|
|
|
| 617 |
|
| 618 |
const cancelPolygon = () => setPolygonPoints([]);
|
| 619 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 620 |
const drawCanvas = () => {
|
| 621 |
const canvas = canvasRef.current;
|
| 622 |
const container = containerRef.current;
|
| 623 |
if (!canvas || !container) return;
|
| 624 |
+
|
| 625 |
+
// Cancel previous animation frame if pending
|
| 626 |
+
if (pendingDrawAnimationRef.current) {
|
| 627 |
+
cancelAnimationFrame(pendingDrawAnimationRef.current);
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
// Use requestAnimationFrame for smooth, optimized drawing
|
| 631 |
+
pendingDrawAnimationRef.current = requestAnimationFrame(() => {
|
| 632 |
+
performDraw();
|
| 633 |
+
});
|
| 634 |
+
};
|
| 635 |
+
|
| 636 |
+
const performDraw = () => {
|
| 637 |
+
const canvas = canvasRef.current;
|
| 638 |
+
const container = containerRef.current;
|
| 639 |
+
if (!canvas || !container) return;
|
| 640 |
+
|
| 641 |
const ctx = canvas.getContext('2d');
|
| 642 |
if (!ctx) return;
|
| 643 |
+
|
| 644 |
+
// Create state key to prevent redundant draws
|
| 645 |
+
const currentAnnotations = annotationsByImage[selectedImageIndex] || [];
|
| 646 |
+
const stateKey = `${selectedImageIndex}-${currentAnnotations.length}-${polygonPoints.length}-${currentAnnotation?.id}`;
|
| 647 |
+
if (lastDrawStateRef.current === stateKey && canvas.width > 0) {
|
| 648 |
+
return; // Skip if nothing changed
|
| 649 |
+
}
|
| 650 |
+
lastDrawStateRef.current = stateKey;
|
| 651 |
+
|
| 652 |
+
// Get or create cached image
|
| 653 |
+
let img = cachedImagesRef.current[currentImageUrl];
|
| 654 |
+
if (!img) {
|
| 655 |
+
img = new Image();
|
| 656 |
+
img.src = currentImageUrl;
|
| 657 |
+
cachedImagesRef.current[currentImageUrl] = img;
|
| 658 |
+
}
|
| 659 |
+
|
| 660 |
+
// If image not loaded yet, wait for it
|
| 661 |
+
if (!img.complete) {
|
| 662 |
+
img.onload = () => performDraw();
|
| 663 |
+
return;
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
// Resize canvas if needed
|
| 667 |
+
const containerWidth = container.clientWidth;
|
| 668 |
+
const aspectRatio = img.height / img.width || 1;
|
| 669 |
+
const canvasHeight = containerWidth * aspectRatio;
|
| 670 |
+
|
| 671 |
+
if (canvas.width !== containerWidth || canvas.height !== canvasHeight) {
|
| 672 |
canvas.width = containerWidth;
|
| 673 |
canvas.height = canvasHeight;
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
// Draw image
|
| 677 |
+
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
| 678 |
+
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
|
| 679 |
|
| 680 |
+
// Draw saved annotations
|
| 681 |
+
currentAnnotations.forEach((a) => {
|
| 682 |
+
drawAnnotation(ctx, a, canvas.width, canvas.height);
|
| 683 |
+
});
|
| 684 |
|
| 685 |
+
// Draw polygon being created
|
| 686 |
+
if (polygonPoints.length > 0) {
|
| 687 |
+
drawPreviewPolygon(ctx, polygonPoints, canvas.width, canvas.height, color);
|
| 688 |
+
}
|
| 689 |
|
| 690 |
+
// Draw current temp annotation
|
| 691 |
+
if (currentAnnotation) {
|
| 692 |
+
drawAnnotation(ctx, currentAnnotation, canvas.width, canvas.height);
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
pendingDrawAnimationRef.current = null;
|
| 696 |
};
|
| 697 |
|
| 698 |
const drawPreviewPolygon = (ctx: CanvasRenderingContext2D, pts: Point[], canvasWidth: number, canvasHeight: number, col: string) => {
|
|
|
|
| 712 |
};
|
| 713 |
|
| 714 |
const drawAnnotation = (ctx: CanvasRenderingContext2D, annotation: Annotation, canvasWidth: number, canvasHeight: number) => {
|
| 715 |
+
const scaleX = imageDimensions.width > 0 ? canvasWidth / imageDimensions.width : 1;
|
| 716 |
+
const scaleY = imageDimensions.height > 0 ? canvasHeight / imageDimensions.height : 1;
|
| 717 |
+
|
| 718 |
ctx.strokeStyle = annotation.color;
|
| 719 |
ctx.lineWidth = 3;
|
| 720 |
ctx.setLineDash([]);
|
| 721 |
+
|
| 722 |
if (annotation.type === 'rect') {
|
| 723 |
ctx.strokeRect(annotation.x * scaleX, annotation.y * scaleY, annotation.width * scaleX, annotation.height * scaleY);
|
| 724 |
ctx.fillStyle = annotation.color + '20';
|
|
|
|
| 732 |
ctx.stroke();
|
| 733 |
ctx.fillStyle = annotation.color + '20';
|
| 734 |
ctx.fill();
|
| 735 |
+
} else if (annotation.type === 'polygon' && annotation.points && Array.isArray(annotation.points) && annotation.points.length > 0) {
|
| 736 |
+
console.log(`🎨 Drawing polygon with ${annotation.points.length} points, scale: ${scaleX}x${scaleY}`);
|
| 737 |
+
|
| 738 |
ctx.beginPath();
|
| 739 |
+
let validPointsCount = 0;
|
| 740 |
annotation.points.forEach((p, i) => {
|
| 741 |
+
if (p && typeof p.x === 'number' && typeof p.y === 'number' && Number.isFinite(p.x) && Number.isFinite(p.y)) {
|
| 742 |
+
const x = p.x * scaleX;
|
| 743 |
+
const y = p.y * scaleY;
|
| 744 |
+
if (i === 0) ctx.moveTo(x, y);
|
| 745 |
+
else ctx.lineTo(x, y);
|
| 746 |
+
validPointsCount++;
|
| 747 |
+
} else {
|
| 748 |
+
console.warn(`⚠️ Invalid point at index ${i}:`, p);
|
| 749 |
+
}
|
| 750 |
});
|
| 751 |
+
|
| 752 |
+
if (validPointsCount < 3) {
|
| 753 |
+
console.warn(`⚠️ Polygon has only ${validPointsCount} valid points, skipping draw`);
|
| 754 |
+
return;
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
ctx.closePath();
|
| 758 |
ctx.stroke();
|
| 759 |
ctx.fillStyle = annotation.color + '20';
|
| 760 |
ctx.fill();
|
| 761 |
+
|
| 762 |
+
console.log(`✅ Polygon drawn with ${validPointsCount} points`);
|
| 763 |
}
|
| 764 |
};
|
| 765 |
|
|
|
|
| 822 |
</div>
|
| 823 |
)}
|
| 824 |
|
| 825 |
+
{/* Tab Switcher with AI Assist Button */}
|
| 826 |
+
<div className="flex justify-between items-center">
|
| 827 |
+
<div className="flex justify-center flex-1">
|
| 828 |
+
<div className="inline-flex bg-gray-100 rounded-lg p-1 border border-gray-300">
|
| 829 |
+
<button
|
| 830 |
+
onClick={() => setActiveTab('annotate')}
|
| 831 |
+
className={`px-6 py-2 rounded-md font-semibold text-sm transition-all ${
|
| 832 |
+
activeTab === 'annotate'
|
| 833 |
+
? 'bg-[#05998c] text-white shadow-sm'
|
| 834 |
+
: 'bg-transparent text-gray-700 hover:text-gray-900'
|
| 835 |
+
}`}
|
| 836 |
+
>
|
| 837 |
+
Annotate
|
| 838 |
+
</button>
|
| 839 |
+
<button
|
| 840 |
+
onClick={() => setActiveTab('findings')}
|
| 841 |
+
className={`px-6 py-2 rounded-md font-semibold text-sm transition-all ${
|
| 842 |
+
activeTab === 'findings'
|
| 843 |
+
? 'bg-[#05998c] text-white shadow-sm'
|
| 844 |
+
: 'bg-transparent text-gray-700 hover:text-gray-900'
|
| 845 |
+
}`}
|
| 846 |
+
>
|
| 847 |
+
Findings
|
| 848 |
+
</button>
|
| 849 |
+
</div>
|
| 850 |
</div>
|
| 851 |
+
<button
|
| 852 |
+
onClick={runAIAssist}
|
| 853 |
+
disabled={isAILoading}
|
| 854 |
+
className="px-5 py-2.5 text-sm md:text-base font-bold text-white bg-blue-600 border border-blue-600 rounded-lg hover:bg-blue-700 disabled:opacity-50 disabled:cursor-not-allowed transition-colors shadow-md hover:shadow-lg"
|
| 855 |
+
>
|
| 856 |
+
{isAILoading ? 'Analyzing...' : '✨ AI Assist'}
|
| 857 |
+
</button>
|
| 858 |
</div>
|
| 859 |
|
| 860 |
{/* Annotate Tab */}
|
|
|
|
| 912 |
<button onClick={clearAnnotations} disabled={annotations.length === 0} className="px-3 md:px-4 py-1 md:py-2 text-xs md:text-sm font-medium text-red-600 bg-white border border-red-200 rounded-lg hover:bg-red-50 disabled:opacity-50 disabled:cursor-not-allowed transition-colors">Clear All</button>
|
| 913 |
</div>
|
| 914 |
</div>
|
| 915 |
+
|
| 916 |
+
{/* AI Error Message */}
|
| 917 |
+
{aiError && (
|
| 918 |
+
<div className="bg-red-50 border border-red-200 text-red-700 px-3 py-2 rounded-lg text-xs md:text-sm">
|
| 919 |
+
{aiError}
|
| 920 |
+
</div>
|
| 921 |
+
)}
|
| 922 |
|
| 923 |
<div className="flex flex-col">
|
| 924 |
<div>
|
src/components/ImageAnnotator.tsx
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import React, { useEffect, useState, useRef, useImperativeHandle, forwardRef } from 'react';
|
| 2 |
-
import { Wrench, Trash2, Circle as CircleIcon, Hexagon, Square as SquareIcon, ChevronDown, ChevronUp } from 'lucide-react';
|
| 3 |
|
| 4 |
type ShapeType = 'rect' | 'circle' | 'polygon';
|
| 5 |
|
|
@@ -27,15 +27,26 @@ interface ImageAnnotatorProps {
|
|
| 27 |
imageUrl?: string;
|
| 28 |
imageUrls?: string[];
|
| 29 |
onAnnotationsChange?: (annotations: Annotation[]) => void;
|
|
|
|
|
|
|
| 30 |
}
|
| 31 |
|
| 32 |
export interface ImageAnnotatorHandle {
|
| 33 |
addAIAnnotations: (aiAnnotations: Annotation[]) => void;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
}
|
| 35 |
|
| 36 |
-
const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorProps>(({ imageUrl, imageUrls, onAnnotationsChange }, ref) => {
|
| 37 |
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
| 38 |
const containerRef = useRef<HTMLDivElement | null>(null);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
const [annotations, setAnnotations] = useState<Annotation[]>([]);
|
| 40 |
const [tool, setTool] = useState<ShapeType>('rect');
|
| 41 |
const [color, setColor] = useState('#05998c');
|
|
@@ -79,23 +90,66 @@ const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorP
|
|
| 79 |
label.toLowerCase().includes(labelInput.toLowerCase())
|
| 80 |
);
|
| 81 |
|
| 82 |
-
// Expose
|
| 83 |
useImperativeHandle(ref, () => ({
|
| 84 |
addAIAnnotations: (aiAnnotations: Annotation[]) => {
|
| 85 |
-
setAnnotations(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
}
|
| 87 |
}));
|
| 88 |
|
| 89 |
const images = imageUrls || (imageUrl ? [imageUrl] : []);
|
| 90 |
const currentImageUrl = images[selectedImageIndex];
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
useEffect(() => {
|
| 93 |
const img = new Image();
|
| 94 |
img.src = currentImageUrl;
|
| 95 |
img.onload = () => {
|
|
|
|
| 96 |
setImageDimensions({ width: img.width, height: img.height });
|
| 97 |
setImageLoaded(true);
|
| 98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
};
|
| 100 |
}, [currentImageUrl]);
|
| 101 |
|
|
@@ -107,6 +161,15 @@ const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorP
|
|
| 107 |
if (onAnnotationsChange) onAnnotationsChange(annotations);
|
| 108 |
}, [annotations, onAnnotationsChange]);
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
const clearAnnotations = () => setAnnotations([]);
|
| 111 |
const deleteLastAnnotation = () => setAnnotations(prev => prev.slice(0, -1));
|
| 112 |
|
|
@@ -205,30 +268,73 @@ const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorP
|
|
| 205 |
const canvas = canvasRef.current;
|
| 206 |
const container = containerRef.current;
|
| 207 |
if (!canvas || !container) return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
const ctx = canvas.getContext('2d');
|
| 209 |
if (!ctx) return;
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
canvas.width = containerWidth;
|
| 217 |
canvas.height = canvasHeight;
|
| 218 |
-
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
-
|
| 222 |
-
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
| 232 |
};
|
| 233 |
|
| 234 |
const drawPreviewPolygon = (ctx: CanvasRenderingContext2D, pts: Point[], canvasWidth: number, canvasHeight: number, col: string) => {
|
|
@@ -266,12 +372,14 @@ const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorP
|
|
| 266 |
ctx.stroke();
|
| 267 |
ctx.fillStyle = annotation.color + '20';
|
| 268 |
ctx.fill();
|
| 269 |
-
} else if (annotation.type === 'polygon' && annotation.points) {
|
| 270 |
ctx.beginPath();
|
| 271 |
annotation.points.forEach((p, i) => {
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
|
|
|
|
|
|
| 275 |
});
|
| 276 |
ctx.closePath();
|
| 277 |
ctx.stroke();
|
|
@@ -383,6 +491,26 @@ const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorP
|
|
| 383 |
)}
|
| 384 |
</div>
|
| 385 |
<input aria-label="Annotation color" type="color" value={color} onChange={e => setColor(e.target.value)} className="w-10 h-8 p-0 border rounded" />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
<button onClick={deleteLastAnnotation} disabled={annotations.length === 0} className="px-3 md:px-4 py-1 md:py-2 text-xs md:text-sm font-medium text-gray-600 bg-white border border-gray-200 rounded-lg hover:bg-gray-50 disabled:opacity-50 disabled:cursor-not-allowed transition-colors flex items-center justify-center gap-1 md:gap-2">
|
| 387 |
<Trash2 className="w-4 h-4" />
|
| 388 |
<span className="hidden md:inline">Undo</span>
|
|
|
|
| 1 |
import React, { useEffect, useState, useRef, useImperativeHandle, forwardRef } from 'react';
|
| 2 |
+
import { Wrench, Trash2, Circle as CircleIcon, Hexagon, Square as SquareIcon, ChevronDown, ChevronUp, Zap, Loader } from 'lucide-react';
|
| 3 |
|
| 4 |
type ShapeType = 'rect' | 'circle' | 'polygon';
|
| 5 |
|
|
|
|
| 27 |
imageUrl?: string;
|
| 28 |
imageUrls?: string[];
|
| 29 |
onAnnotationsChange?: (annotations: Annotation[]) => void;
|
| 30 |
+
onAIAssist?: () => Promise<void>;
|
| 31 |
+
isAILoading?: boolean;
|
| 32 |
}
|
| 33 |
|
| 34 |
export interface ImageAnnotatorHandle {
|
| 35 |
addAIAnnotations: (aiAnnotations: Annotation[]) => void;
|
| 36 |
+
setImageIndex: (index: number) => void;
|
| 37 |
+
clearAIAnnotations: () => void;
|
| 38 |
+
clearAllAnnotations: () => void;
|
| 39 |
+
resetViewport: () => void;
|
| 40 |
+
waitForImageReady: () => Promise<void>;
|
| 41 |
}
|
| 42 |
|
| 43 |
+
const ImageAnnotatorComponent = forwardRef<ImageAnnotatorHandle, ImageAnnotatorProps>(({ imageUrl, imageUrls, onAnnotationsChange, onAIAssist, isAILoading = false }, ref) => {
|
| 44 |
const canvasRef = useRef<HTMLCanvasElement | null>(null);
|
| 45 |
const containerRef = useRef<HTMLDivElement | null>(null);
|
| 46 |
+
const imageReadyResolveRef = useRef<(() => void) | null>(null);
|
| 47 |
+
const cachedImagesRef = useRef<Record<string, HTMLImageElement>>({});
|
| 48 |
+
const pendingDrawAnimationRef = useRef<number | null>(null);
|
| 49 |
+
const lastDrawStateRef = useRef<string>('');
|
| 50 |
const [annotations, setAnnotations] = useState<Annotation[]>([]);
|
| 51 |
const [tool, setTool] = useState<ShapeType>('rect');
|
| 52 |
const [color, setColor] = useState('#05998c');
|
|
|
|
| 90 |
label.toLowerCase().includes(labelInput.toLowerCase())
|
| 91 |
);
|
| 92 |
|
| 93 |
+
// Expose annotator methods via ref
|
| 94 |
useImperativeHandle(ref, () => ({
|
| 95 |
addAIAnnotations: (aiAnnotations: Annotation[]) => {
|
| 96 |
+
setAnnotations(aiAnnotations);
|
| 97 |
+
},
|
| 98 |
+
setImageIndex: (index: number) => {
|
| 99 |
+
setSelectedImageIndex(index);
|
| 100 |
+
},
|
| 101 |
+
clearAIAnnotations: () => {
|
| 102 |
+
setAnnotations(prev => prev.filter(ann => ann.source !== 'ai'));
|
| 103 |
+
},
|
| 104 |
+
clearAllAnnotations: () => {
|
| 105 |
+
setAnnotations([]);
|
| 106 |
+
setPolygonPoints([]);
|
| 107 |
+
setCurrentAnnotation(null);
|
| 108 |
+
},
|
| 109 |
+
resetViewport: () => {
|
| 110 |
+
setSelectedImageIndex(0);
|
| 111 |
+
setAnnotations([]);
|
| 112 |
+
setPolygonPoints([]);
|
| 113 |
+
setCurrentAnnotation(null);
|
| 114 |
+
},
|
| 115 |
+
waitForImageReady: () => {
|
| 116 |
+
return new Promise<void>((resolve) => {
|
| 117 |
+
if (imageLoaded) {
|
| 118 |
+
resolve();
|
| 119 |
+
} else {
|
| 120 |
+
imageReadyResolveRef.current = resolve;
|
| 121 |
+
}
|
| 122 |
+
});
|
| 123 |
}
|
| 124 |
}));
|
| 125 |
|
| 126 |
const images = imageUrls || (imageUrl ? [imageUrl] : []);
|
| 127 |
const currentImageUrl = images[selectedImageIndex];
|
| 128 |
|
| 129 |
+
// Clear annotations when switching to a different image
|
| 130 |
+
useEffect(() => {
|
| 131 |
+
console.log('🖼️ Image switched, clearing annotations');
|
| 132 |
+
setAnnotations([]);
|
| 133 |
+
setPolygonPoints([]);
|
| 134 |
+
setCurrentAnnotation(null);
|
| 135 |
+
setImageLoaded(false);
|
| 136 |
+
}, [selectedImageIndex]);
|
| 137 |
+
|
| 138 |
useEffect(() => {
|
| 139 |
const img = new Image();
|
| 140 |
img.src = currentImageUrl;
|
| 141 |
img.onload = () => {
|
| 142 |
+
console.log('✅ Image loaded:', { width: img.width, height: img.height });
|
| 143 |
setImageDimensions({ width: img.width, height: img.height });
|
| 144 |
setImageLoaded(true);
|
| 145 |
+
// Resolve waitForImageReady promise if pending
|
| 146 |
+
if (imageReadyResolveRef.current) {
|
| 147 |
+
console.log('🔔 Image ready resolver called');
|
| 148 |
+
imageReadyResolveRef.current();
|
| 149 |
+
imageReadyResolveRef.current = null;
|
| 150 |
+
}
|
| 151 |
+
// Force immediate canvas redraw with new dimensions
|
| 152 |
+
setTimeout(() => drawCanvas(), 0);
|
| 153 |
};
|
| 154 |
}, [currentImageUrl]);
|
| 155 |
|
|
|
|
| 161 |
if (onAnnotationsChange) onAnnotationsChange(annotations);
|
| 162 |
}, [annotations, onAnnotationsChange]);
|
| 163 |
|
| 164 |
+
// Cleanup pending draw on unmount
|
| 165 |
+
useEffect(() => {
|
| 166 |
+
return () => {
|
| 167 |
+
if (pendingDrawAnimationRef.current) {
|
| 168 |
+
cancelAnimationFrame(pendingDrawAnimationRef.current);
|
| 169 |
+
}
|
| 170 |
+
};
|
| 171 |
+
}, []);
|
| 172 |
+
|
| 173 |
const clearAnnotations = () => setAnnotations([]);
|
| 174 |
const deleteLastAnnotation = () => setAnnotations(prev => prev.slice(0, -1));
|
| 175 |
|
|
|
|
| 268 |
const canvas = canvasRef.current;
|
| 269 |
const container = containerRef.current;
|
| 270 |
if (!canvas || !container) return;
|
| 271 |
+
|
| 272 |
+
// Cancel previous animation frame if pending
|
| 273 |
+
if (pendingDrawAnimationRef.current) {
|
| 274 |
+
cancelAnimationFrame(pendingDrawAnimationRef.current);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
// Use requestAnimationFrame for smooth, optimized drawing
|
| 278 |
+
pendingDrawAnimationRef.current = requestAnimationFrame(() => {
|
| 279 |
+
performDraw();
|
| 280 |
+
});
|
| 281 |
+
};
|
| 282 |
+
|
| 283 |
+
const performDraw = () => {
|
| 284 |
+
const canvas = canvasRef.current;
|
| 285 |
+
const container = containerRef.current;
|
| 286 |
+
if (!canvas || !container) return;
|
| 287 |
+
|
| 288 |
const ctx = canvas.getContext('2d');
|
| 289 |
if (!ctx) return;
|
| 290 |
+
|
| 291 |
+
// Create state key to prevent redundant draws
|
| 292 |
+
const stateKey = `${selectedImageIndex}-${annotations.length}-${polygonPoints.length}-${currentAnnotation?.id}`;
|
| 293 |
+
if (lastDrawStateRef.current === stateKey && canvas.width > 0) {
|
| 294 |
+
return; // Skip if nothing changed
|
| 295 |
+
}
|
| 296 |
+
lastDrawStateRef.current = stateKey;
|
| 297 |
+
|
| 298 |
+
// Get or create cached image
|
| 299 |
+
let img = cachedImagesRef.current[currentImageUrl];
|
| 300 |
+
if (!img) {
|
| 301 |
+
img = new Image();
|
| 302 |
+
img.src = currentImageUrl;
|
| 303 |
+
cachedImagesRef.current[currentImageUrl] = img;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
// If image not loaded yet, wait for it
|
| 307 |
+
if (!img.complete) {
|
| 308 |
+
img.onload = () => performDraw();
|
| 309 |
+
return;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
// Resize canvas if needed
|
| 313 |
+
const containerWidth = container.clientWidth;
|
| 314 |
+
const aspectRatio = img.height / img.width || 1;
|
| 315 |
+
const canvasHeight = containerWidth * aspectRatio;
|
| 316 |
+
|
| 317 |
+
if (canvas.width !== containerWidth || canvas.height !== canvasHeight) {
|
| 318 |
canvas.width = containerWidth;
|
| 319 |
canvas.height = canvasHeight;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
// Draw image
|
| 323 |
+
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
| 324 |
+
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
|
| 325 |
|
| 326 |
+
// Draw saved annotations
|
| 327 |
+
annotations.forEach(a => drawAnnotation(ctx, a, canvas.width, canvas.height));
|
| 328 |
|
| 329 |
+
// Draw polygon being created
|
| 330 |
+
if (polygonPoints.length > 0) {
|
| 331 |
+
drawPreviewPolygon(ctx, polygonPoints, canvas.width, canvas.height, color);
|
| 332 |
+
}
|
| 333 |
|
| 334 |
+
// Draw current temp annotation
|
| 335 |
+
if (currentAnnotation) drawAnnotation(ctx, currentAnnotation, canvas.width, canvas.height);
|
| 336 |
+
|
| 337 |
+
pendingDrawAnimationRef.current = null;
|
| 338 |
};
|
| 339 |
|
| 340 |
const drawPreviewPolygon = (ctx: CanvasRenderingContext2D, pts: Point[], canvasWidth: number, canvasHeight: number, col: string) => {
|
|
|
|
| 372 |
ctx.stroke();
|
| 373 |
ctx.fillStyle = annotation.color + '20';
|
| 374 |
ctx.fill();
|
| 375 |
+
} else if (annotation.type === 'polygon' && annotation.points && Array.isArray(annotation.points) && annotation.points.length > 0) {
|
| 376 |
ctx.beginPath();
|
| 377 |
annotation.points.forEach((p, i) => {
|
| 378 |
+
if (p && typeof p.x === 'number' && typeof p.y === 'number') {
|
| 379 |
+
const x = p.x * scaleX;
|
| 380 |
+
const y = p.y * scaleY;
|
| 381 |
+
if (i === 0) ctx.moveTo(x, y); else ctx.lineTo(x, y);
|
| 382 |
+
}
|
| 383 |
});
|
| 384 |
ctx.closePath();
|
| 385 |
ctx.stroke();
|
|
|
|
| 491 |
)}
|
| 492 |
</div>
|
| 493 |
<input aria-label="Annotation color" type="color" value={color} onChange={e => setColor(e.target.value)} className="w-10 h-8 p-0 border rounded" />
|
| 494 |
+
{onAIAssist && (
|
| 495 |
+
<button
|
| 496 |
+
onClick={onAIAssist}
|
| 497 |
+
disabled={isAILoading}
|
| 498 |
+
className="px-4 md:px-6 py-2 md:py-3 text-sm md:text-base font-bold text-white bg-gradient-to-r from-blue-600 to-blue-700 border border-blue-700 rounded-lg hover:from-blue-700 hover:to-blue-800 disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-lg hover:shadow-xl flex items-center justify-center gap-2"
|
| 499 |
+
title="Run AI model to automatically detect annotations"
|
| 500 |
+
>
|
| 501 |
+
{isAILoading ? (
|
| 502 |
+
<>
|
| 503 |
+
<Loader className="w-5 h-5 animate-spin" />
|
| 504 |
+
<span>Analyzing...</span>
|
| 505 |
+
</>
|
| 506 |
+
) : (
|
| 507 |
+
<>
|
| 508 |
+
<Zap className="w-5 h-5" />
|
| 509 |
+
<span>AI Assist</span>
|
| 510 |
+
</>
|
| 511 |
+
)}
|
| 512 |
+
</button>
|
| 513 |
+
)}
|
| 514 |
<button onClick={deleteLastAnnotation} disabled={annotations.length === 0} className="px-3 md:px-4 py-1 md:py-2 text-xs md:text-sm font-medium text-gray-600 bg-white border border-gray-200 rounded-lg hover:bg-gray-50 disabled:opacity-50 disabled:cursor-not-allowed transition-colors flex items-center justify-center gap-1 md:gap-2">
|
| 515 |
<Trash2 className="w-4 h-4" />
|
| 516 |
<span className="hidden md:inline">Undo</span>
|
src/pages/AcetowhiteExamPage.tsx
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
import { useState, useEffect } from 'react';
|
| 2 |
-
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, Save, ChevronRight } from 'lucide-react';
|
| 3 |
-
import {
|
| 4 |
import { ImagingObservations } from '../components/ImagingObservations';
|
| 5 |
|
| 6 |
type CapturedItem = {
|
|
@@ -18,12 +18,17 @@ type Props = {
|
|
| 18 |
};
|
| 19 |
|
| 20 |
export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
|
|
|
|
|
| 21 |
const [capturedItems, setCapturedItems] = useState<CapturedItem[]>([]);
|
| 22 |
const [isRecording, setIsRecording] = useState(false);
|
| 23 |
const [selectedImage, setSelectedImage] = useState<string | null>(null);
|
| 24 |
const [annotations, setAnnotations] = useState<any[]>([]);
|
| 25 |
const [observations, setObservations] = useState({});
|
| 26 |
const [showExitWarning, setShowExitWarning] = useState(false);
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
// Timer states
|
| 29 |
const [timerStarted, setTimerStarted] = useState(false);
|
|
@@ -131,6 +136,76 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 131 |
}
|
| 132 |
};
|
| 133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
const selectedItem = selectedImage
|
| 135 |
? capturedItems.find(item => item.id === selectedImage)
|
| 136 |
: null;
|
|
@@ -251,12 +326,13 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 251 |
<h4 className="text-sm font-semibold text-gray-700 mb-3">Select Image to Annotate</h4>
|
| 252 |
<div className="grid grid-cols-3 gap-3">
|
| 253 |
{imageCaptures.map(item => (
|
| 254 |
-
<div
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
|
|
|
| 260 |
<img src={item.url} alt="Capture" className="w-full h-full object-cover" />
|
| 261 |
{item.annotations && item.annotations.length > 0 && (
|
| 262 |
<div className="absolute top-1 right-1 bg-green-500 text-white p-1 rounded">
|
|
@@ -264,9 +340,12 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 264 |
</div>
|
| 265 |
)}
|
| 266 |
</div>
|
| 267 |
-
<
|
| 268 |
-
|
| 269 |
-
|
|
|
|
|
|
|
|
|
|
| 270 |
</div>
|
| 271 |
))}
|
| 272 |
</div>
|
|
@@ -405,6 +484,57 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 405 |
{isRecording ? 'Stop' : 'Record'}
|
| 406 |
</button>
|
| 407 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
<button onClick={onNext} className="w-full flex items-center justify-center gap-2 px-6 md:px-8 py-2 md:py-3 rounded-xl bg-gray-600 text-white font-bold shadow-lg shadow-gray-500/20 hover:bg-slate-700 hover:shadow-gray-500/30 transition-all text-sm md:text-base">
|
| 409 |
<Save className="w-4 h-4 md:w-5 md:h-5" />
|
| 410 |
<span className="hidden lg:inline">Next</span>
|
|
@@ -492,7 +622,8 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 492 |
</div>
|
| 493 |
</div>
|
| 494 |
) : (
|
| 495 |
-
// Annotation View
|
|
|
|
| 496 |
<div>
|
| 497 |
<div className="mb-4 flex items-center justify-between">
|
| 498 |
<button
|
|
@@ -522,16 +653,20 @@ export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
|
| 522 |
|
| 523 |
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6">
|
| 524 |
<div className="lg:col-span-2">
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
|
|
|
|
|
|
|
|
|
| 529 |
</div>
|
| 530 |
<div className="lg:col-span-1">
|
| 531 |
<ImagingObservations onObservationsChange={setObservations} />
|
| 532 |
</div>
|
| 533 |
</div>
|
| 534 |
</div>
|
|
|
|
| 535 |
)}
|
| 536 |
|
| 537 |
{/* Exit Warning Dialog */}
|
|
|
|
| 1 |
+
import { useState, useEffect, useRef } from 'react';
|
| 2 |
+
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, Save, ChevronRight, Sparkles } from 'lucide-react';
|
| 3 |
+
import { AceticAnnotator, AceticAnnotatorHandle } from '../components/AceticAnnotator';
|
| 4 |
import { ImagingObservations } from '../components/ImagingObservations';
|
| 5 |
|
| 6 |
type CapturedItem = {
|
|
|
|
| 18 |
};
|
| 19 |
|
| 20 |
export function AcetowhiteExamPage({ goBack, onNext }: Props) {
|
| 21 |
+
const annotatorRef = useRef<AceticAnnotatorHandle>(null);
|
| 22 |
+
const fileInputRef = useRef<HTMLInputElement>(null);
|
| 23 |
const [capturedItems, setCapturedItems] = useState<CapturedItem[]>([]);
|
| 24 |
const [isRecording, setIsRecording] = useState(false);
|
| 25 |
const [selectedImage, setSelectedImage] = useState<string | null>(null);
|
| 26 |
const [annotations, setAnnotations] = useState<any[]>([]);
|
| 27 |
const [observations, setObservations] = useState({});
|
| 28 |
const [showExitWarning, setShowExitWarning] = useState(false);
|
| 29 |
+
const [isLiveAILoading, setIsLiveAILoading] = useState(false);
|
| 30 |
+
const [liveAIResults, setLiveAIResults] = useState<{ cervixDetected: boolean; quality: string; confidence: number } | null>(null);
|
| 31 |
+
const [liveAIError, setLiveAIError] = useState<string | null>(null);
|
| 32 |
|
| 33 |
// Timer states
|
| 34 |
const [timerStarted, setTimerStarted] = useState(false);
|
|
|
|
| 136 |
}
|
| 137 |
};
|
| 138 |
|
| 139 |
+
const handleUploadClick = () => {
|
| 140 |
+
fileInputRef.current?.click();
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
| 144 |
+
const files = e.target.files;
|
| 145 |
+
if (files && files.length > 0) {
|
| 146 |
+
Array.from(files).forEach(file => {
|
| 147 |
+
const isVideo = file.type.startsWith('video/');
|
| 148 |
+
const newCapture: CapturedItem = {
|
| 149 |
+
id: Date.now().toString() + Math.random(),
|
| 150 |
+
type: isVideo ? 'video' : 'image',
|
| 151 |
+
url: URL.createObjectURL(file),
|
| 152 |
+
timestamp: new Date()
|
| 153 |
+
};
|
| 154 |
+
setCapturedItems(prev => [...prev, newCapture]);
|
| 155 |
+
});
|
| 156 |
+
}
|
| 157 |
+
e.target.value = '';
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
const mapQualityLabel = (score: number) => {
|
| 161 |
+
if (score >= 0.8) return 'Excellent';
|
| 162 |
+
if (score >= 0.6) return 'Good';
|
| 163 |
+
return 'Bad';
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
// Main page live feed AI Assist handler - quality check only
|
| 167 |
+
const handleLiveAIAssist = async () => {
|
| 168 |
+
setLiveAIError(null);
|
| 169 |
+
setLiveAIResults(null);
|
| 170 |
+
const imageItems = capturedItems.filter(item => item.type === 'image');
|
| 171 |
+
const targetItem = imageItems[0];
|
| 172 |
+
|
| 173 |
+
setIsLiveAILoading(true);
|
| 174 |
+
|
| 175 |
+
try {
|
| 176 |
+
const response = await fetch(targetItem ? targetItem.url : cervixImageUrl);
|
| 177 |
+
const blob = await response.blob();
|
| 178 |
+
|
| 179 |
+
const formData = new FormData();
|
| 180 |
+
formData.append('file', blob, 'image.jpg');
|
| 181 |
+
const backendResponse = await fetch('http://localhost:8000/infer/image', {
|
| 182 |
+
method: 'POST',
|
| 183 |
+
body: formData,
|
| 184 |
+
});
|
| 185 |
+
|
| 186 |
+
if (!backendResponse.ok) {
|
| 187 |
+
throw new Error(`Backend error: ${backendResponse.statusText}`);
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
const result = await backendResponse.json();
|
| 191 |
+
|
| 192 |
+
const qualityScore = typeof result.quality_score === 'number'
|
| 193 |
+
? result.quality_score
|
| 194 |
+
: (typeof result.quality_percent === 'number' ? result.quality_percent / 100 : 0);
|
| 195 |
+
|
| 196 |
+
setLiveAIResults({
|
| 197 |
+
cervixDetected: Boolean(result.detected),
|
| 198 |
+
quality: mapQualityLabel(qualityScore),
|
| 199 |
+
confidence: qualityScore
|
| 200 |
+
});
|
| 201 |
+
setIsLiveAILoading(false);
|
| 202 |
+
} catch (error) {
|
| 203 |
+
console.error('Live AI assist error:', error);
|
| 204 |
+
setLiveAIError(error instanceof Error ? error.message : 'Failed to check image quality');
|
| 205 |
+
setIsLiveAILoading(false);
|
| 206 |
+
}
|
| 207 |
+
};
|
| 208 |
+
|
| 209 |
const selectedItem = selectedImage
|
| 210 |
? capturedItems.find(item => item.id === selectedImage)
|
| 211 |
: null;
|
|
|
|
| 326 |
<h4 className="text-sm font-semibold text-gray-700 mb-3">Select Image to Annotate</h4>
|
| 327 |
<div className="grid grid-cols-3 gap-3">
|
| 328 |
{imageCaptures.map(item => (
|
| 329 |
+
<div key={item.id} className="relative group">
|
| 330 |
+
<div
|
| 331 |
+
onClick={() => setSelectedImage(item.id)}
|
| 332 |
+
className={`aspect-square bg-gray-100 rounded-lg overflow-hidden border-2 transition-all cursor-pointer ${
|
| 333 |
+
selectedImage === item.id ? 'border-blue-600 ring-2 ring-blue-300' : 'border-transparent hover:border-[#05998c]'
|
| 334 |
+
}`}
|
| 335 |
+
>
|
| 336 |
<img src={item.url} alt="Capture" className="w-full h-full object-cover" />
|
| 337 |
{item.annotations && item.annotations.length > 0 && (
|
| 338 |
<div className="absolute top-1 right-1 bg-green-500 text-white p-1 rounded">
|
|
|
|
| 340 |
</div>
|
| 341 |
)}
|
| 342 |
</div>
|
| 343 |
+
<button
|
| 344 |
+
onClick={() => handleDeleteCapture(item.id)}
|
| 345 |
+
className="absolute top-1 right-1 bg-red-500 text-white p-1 rounded opacity-0 group-hover:opacity-100 transition-opacity"
|
| 346 |
+
>
|
| 347 |
+
<X className="w-3 h-3" />
|
| 348 |
+
</button>
|
| 349 |
</div>
|
| 350 |
))}
|
| 351 |
</div>
|
|
|
|
| 484 |
{isRecording ? 'Stop' : 'Record'}
|
| 485 |
</button>
|
| 486 |
</div>
|
| 487 |
+
|
| 488 |
+
{/* Centered AI Assist Button */}
|
| 489 |
+
<button
|
| 490 |
+
onClick={handleLiveAIAssist}
|
| 491 |
+
disabled={isLiveAILoading}
|
| 492 |
+
className="w-full flex items-center justify-center gap-2 px-6 py-4 rounded-lg bg-gradient-to-r from-blue-600 to-blue-700 text-white font-bold hover:from-blue-700 hover:to-blue-800 transition-all shadow-lg hover:shadow-xl disabled:opacity-50 disabled:cursor-not-allowed text-base"
|
| 493 |
+
>
|
| 494 |
+
<Sparkles className="w-6 h-6" />
|
| 495 |
+
{isLiveAILoading ? 'Checking...' : 'AI Assist'}
|
| 496 |
+
</button>
|
| 497 |
+
|
| 498 |
+
<div className="flex gap-2">
|
| 499 |
+
<button
|
| 500 |
+
onClick={handleUploadClick}
|
| 501 |
+
className="flex-1 flex items-center justify-center gap-2 px-6 py-3 rounded-lg bg-green-600 text-white font-semibold hover:bg-green-700 transition-colors"
|
| 502 |
+
>
|
| 503 |
+
Upload Image
|
| 504 |
+
</button>
|
| 505 |
+
<input
|
| 506 |
+
ref={fileInputRef}
|
| 507 |
+
type="file"
|
| 508 |
+
accept="image/*,video/*"
|
| 509 |
+
multiple
|
| 510 |
+
className="hidden"
|
| 511 |
+
onChange={handleFileUpload}
|
| 512 |
+
/>
|
| 513 |
+
</div>
|
| 514 |
+
|
| 515 |
+
{/* Live AI Results Panel */}
|
| 516 |
+
{liveAIResults && (
|
| 517 |
+
<div className="p-4 bg-green-50 border border-green-300 rounded-lg">
|
| 518 |
+
<div className="flex items-center gap-3 mb-3">
|
| 519 |
+
<div className="w-3 h-3 bg-green-500 rounded-full"></div>
|
| 520 |
+
<h4 className="font-bold text-green-800">Quality Check Results</h4>
|
| 521 |
+
</div>
|
| 522 |
+
<div className="space-y-2 text-sm">
|
| 523 |
+
<p className="text-gray-700">
|
| 524 |
+
<span className="font-semibold">Cervix Detected:</span> {liveAIResults.cervixDetected ? 'Yes' : 'No'} ({((liveAIResults.cervixDetected ? liveAIResults.confidence : 0) * 100).toFixed(1)}%)
|
| 525 |
+
</p>
|
| 526 |
+
<p className="text-gray-700">
|
| 527 |
+
<span className="font-semibold">Quality:</span> {liveAIResults.quality} ({(liveAIResults.confidence * 100).toFixed(1)}%)
|
| 528 |
+
</p>
|
| 529 |
+
</div>
|
| 530 |
+
</div>
|
| 531 |
+
)}
|
| 532 |
+
|
| 533 |
+
{liveAIError && (
|
| 534 |
+
<div className="text-sm text-red-600 bg-red-50 border border-red-200 rounded-lg p-3 text-center">
|
| 535 |
+
{liveAIError}
|
| 536 |
+
</div>
|
| 537 |
+
)}
|
| 538 |
<button onClick={onNext} className="w-full flex items-center justify-center gap-2 px-6 md:px-8 py-2 md:py-3 rounded-xl bg-gray-600 text-white font-bold shadow-lg shadow-gray-500/20 hover:bg-slate-700 hover:shadow-gray-500/30 transition-all text-sm md:text-base">
|
| 539 |
<Save className="w-4 h-4 md:w-5 md:h-5" />
|
| 540 |
<span className="hidden lg:inline">Next</span>
|
|
|
|
| 622 |
</div>
|
| 623 |
</div>
|
| 624 |
) : (
|
| 625 |
+
// Selected Image Annotation View
|
| 626 |
+
selectedImage && (
|
| 627 |
<div>
|
| 628 |
<div className="mb-4 flex items-center justify-between">
|
| 629 |
<button
|
|
|
|
| 653 |
|
| 654 |
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6">
|
| 655 |
<div className="lg:col-span-2">
|
| 656 |
+
{selectedItem && (
|
| 657 |
+
<AceticAnnotator
|
| 658 |
+
ref={annotatorRef}
|
| 659 |
+
imageUrl={selectedItem.url}
|
| 660 |
+
onAnnotationsChange={setAnnotations}
|
| 661 |
+
/>
|
| 662 |
+
)}
|
| 663 |
</div>
|
| 664 |
<div className="lg:col-span-1">
|
| 665 |
<ImagingObservations onObservationsChange={setObservations} />
|
| 666 |
</div>
|
| 667 |
</div>
|
| 668 |
</div>
|
| 669 |
+
)
|
| 670 |
)}
|
| 671 |
|
| 672 |
{/* Exit Warning Dialog */}
|
src/pages/BiopsyMarking.tsx
CHANGED
|
@@ -282,6 +282,13 @@ const BiopsyMarking: React.FC<BiopsyMarkingProps> = ({ onBack, onNext, capturedI
|
|
| 282 |
<Trash2 className="h-4 w-4 mr-1" />
|
| 283 |
Clear
|
| 284 |
</Button>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
<Button
|
| 286 |
size="sm"
|
| 287 |
className="h-8 px-3 bg-gray-600 text-white hover:bg-slate-700"
|
|
@@ -483,10 +490,13 @@ const BiopsyMarking: React.FC<BiopsyMarkingProps> = ({ onBack, onNext, capturedI
|
|
| 483 |
<div className="bg-white rounded-xl shadow-sm border border-gray-100 p-6">
|
| 484 |
<div className="flex items-center justify-between mb-4">
|
| 485 |
<h3 className="font-bold text-[#0A2540]">Images</h3>
|
| 486 |
-
<
|
| 487 |
-
|
|
|
|
|
|
|
|
|
|
| 488 |
Upload
|
| 489 |
-
</
|
| 490 |
<input
|
| 491 |
ref={fileInputRef}
|
| 492 |
type="file"
|
|
|
|
| 282 |
<Trash2 className="h-4 w-4 mr-1" />
|
| 283 |
Clear
|
| 284 |
</Button>
|
| 285 |
+
<button
|
| 286 |
+
onClick={handleUpload}
|
| 287 |
+
className="h-8 px-3 bg-blue-600 text-white hover:bg-blue-700 rounded transition-colors flex items-center gap-2"
|
| 288 |
+
>
|
| 289 |
+
<Upload className="h-4 w-4" />
|
| 290 |
+
Upload
|
| 291 |
+
</button>
|
| 292 |
<Button
|
| 293 |
size="sm"
|
| 294 |
className="h-8 px-3 bg-gray-600 text-white hover:bg-slate-700"
|
|
|
|
| 490 |
<div className="bg-white rounded-xl shadow-sm border border-gray-100 p-6">
|
| 491 |
<div className="flex items-center justify-between mb-4">
|
| 492 |
<h3 className="font-bold text-[#0A2540]">Images</h3>
|
| 493 |
+
<button
|
| 494 |
+
onClick={handleUpload}
|
| 495 |
+
className="h-8 px-3 bg-blue-600 text-white hover:bg-blue-700 rounded transition-colors flex items-center gap-2 text-xs"
|
| 496 |
+
>
|
| 497 |
+
<Upload className="h-3.5 w-3.5" />
|
| 498 |
Upload
|
| 499 |
+
</button>
|
| 500 |
<input
|
| 501 |
ref={fileInputRef}
|
| 502 |
type="file"
|
src/pages/GreenFilterPage.tsx
CHANGED
|
@@ -1,12 +1,14 @@
|
|
| 1 |
-
import { useState } from 'react';
|
| 2 |
-
import { Camera, Video, ArrowLeft, ArrowRight, Info, X, Save, ChevronRight } from 'lucide-react';
|
| 3 |
import { ImageAnnotator } from '../components/ImageAnnotator';
|
| 4 |
import { ImagingObservations } from '../components/ImagingObservations';
|
|
|
|
| 5 |
|
| 6 |
type CapturedItem = {
|
| 7 |
id: string;
|
| 8 |
type: 'image' | 'video';
|
| 9 |
url: string;
|
|
|
|
| 10 |
timestamp: Date;
|
| 11 |
annotations?: any[];
|
| 12 |
};
|
|
@@ -17,20 +19,81 @@ type Props = {
|
|
| 17 |
};
|
| 18 |
|
| 19 |
export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
|
|
|
|
|
|
|
|
| 20 |
const [capturedItems, setCapturedItems] = useState<CapturedItem[]>([]);
|
| 21 |
const [isRecording, setIsRecording] = useState(false);
|
| 22 |
const [selectedImage, setSelectedImage] = useState<string | null>(null);
|
| 23 |
const [showExitWarning, setShowExitWarning] = useState(false);
|
| 24 |
const [greenApplied, setGreenApplied] = useState(false);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
const baseImageUrl = "/C87Aceto_(1).jpg";
|
| 26 |
-
const greenImageUrl = "/greenC87Aceto_(1).jpg";
|
| 27 |
-
const liveFeedImage = greenApplied ? greenImageUrl : baseImageUrl;
|
| 28 |
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
const newCapture: CapturedItem = {
|
| 31 |
id: Date.now().toString(),
|
| 32 |
type: 'image',
|
| 33 |
-
url:
|
|
|
|
| 34 |
timestamp: new Date()
|
| 35 |
};
|
| 36 |
setCapturedItems(prev => [...prev, newCapture]);
|
|
@@ -44,32 +107,83 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 44 |
const newCapture: CapturedItem = {
|
| 45 |
id: Date.now().toString(),
|
| 46 |
type: 'video',
|
| 47 |
-
url:
|
|
|
|
| 48 |
timestamp: new Date()
|
| 49 |
};
|
| 50 |
setCapturedItems(prev => [...prev, newCapture]);
|
| 51 |
}
|
| 52 |
};
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
const handleSelectImage = (item: CapturedItem) => {
|
| 55 |
-
setSelectedImage(item.url);
|
| 56 |
};
|
| 57 |
|
| 58 |
const handleAnnotationsChange = (newAnnotations: any[]) => {
|
| 59 |
if (selectedImage) {
|
| 60 |
setCapturedItems(prev => prev.map(item =>
|
| 61 |
-
item.url === selectedImage ? { ...item, annotations: newAnnotations } : item
|
| 62 |
));
|
| 63 |
}
|
| 64 |
};
|
| 65 |
|
| 66 |
const handleDeleteImage = (id: string) => {
|
| 67 |
setCapturedItems(prev => prev.filter(item => item.id !== id));
|
| 68 |
-
if (selectedImage === capturedItems.find(item => item.id === id)?.url) {
|
| 69 |
setSelectedImage(null);
|
| 70 |
}
|
| 71 |
};
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
const handleConfirmExit = () => {
|
| 74 |
if (capturedItems.length > 0) {
|
| 75 |
setShowExitWarning(true);
|
|
@@ -78,6 +192,73 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 78 |
}
|
| 79 |
};
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
return (
|
| 82 |
<div className="min-h-screen bg-gradient-to-br from-gray-50 to-blue-50 p-4 md:p-6">
|
| 83 |
{/* Header */}
|
|
@@ -130,11 +311,37 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 130 |
|
| 131 |
{/* Image Display */}
|
| 132 |
<div className="relative bg-gray-900 rounded-xl overflow-hidden mb-6 border-2 border-gray-700">
|
| 133 |
-
<
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
</div>
|
| 139 |
|
| 140 |
{/* Capture Controls */}
|
|
@@ -169,7 +376,57 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 169 |
</>
|
| 170 |
)}
|
| 171 |
</button>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
<button className="w-full flex items-center justify-center gap-2 px-4 py-3 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors">
|
| 174 |
Next
|
| 175 |
<ArrowRight className="w-4 h-4" />
|
|
@@ -180,14 +437,21 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 180 |
{/* Selected Image Tools */}
|
| 181 |
{selectedImage && (
|
| 182 |
<div className="space-y-4">
|
| 183 |
-
<div className="flex justify-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
<button className="px-6 py-2 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors flex items-center gap-2">
|
| 185 |
Next
|
| 186 |
<ArrowRight className="w-4 h-4" />
|
| 187 |
</button>
|
| 188 |
</div>
|
| 189 |
<ImageAnnotator
|
| 190 |
-
imageUrl={
|
| 191 |
onAnnotationsChange={handleAnnotationsChange}
|
| 192 |
/>
|
| 193 |
<ImagingObservations />
|
|
@@ -200,8 +464,7 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 200 |
<div className="space-y-6">
|
| 201 |
<div className="bg-white rounded-xl shadow-lg p-6 border border-gray-200">
|
| 202 |
{/* Green Filter Toggle */}
|
| 203 |
-
|
| 204 |
-
<div className="mb-6 flex items-center justify-between gap-4">
|
| 205 |
<div className="flex-1 bg-[#05998c] text-white px-6 py-2 rounded-lg">
|
| 206 |
<span className="font-bold">Green Filter</span>
|
| 207 |
</div>
|
|
@@ -223,7 +486,7 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 223 |
</span>
|
| 224 |
</button>
|
| 225 |
</div>
|
| 226 |
-
|
| 227 |
|
| 228 |
<h3 className="text-lg font-bold text-gray-800 mb-4">
|
| 229 |
Captured Items ({capturedItems.length})
|
|
@@ -309,6 +572,8 @@ export function GreenFilterPage({ goBack, onNext }: Props) {
|
|
| 309 |
</div>
|
| 310 |
</div>
|
| 311 |
)}
|
|
|
|
|
|
|
| 312 |
</div>
|
| 313 |
);
|
| 314 |
}
|
|
|
|
| 1 |
+
import { useState, useEffect, useRef } from 'react';
|
| 2 |
+
import { Camera, Video, ArrowLeft, ArrowRight, Info, X, Save, ChevronRight, Sparkles, Upload } from 'lucide-react';
|
| 3 |
import { ImageAnnotator } from '../components/ImageAnnotator';
|
| 4 |
import { ImagingObservations } from '../components/ImagingObservations';
|
| 5 |
+
import { applyGreenFilter } from '../utils/filterUtils';
|
| 6 |
|
| 7 |
type CapturedItem = {
|
| 8 |
id: string;
|
| 9 |
type: 'image' | 'video';
|
| 10 |
url: string;
|
| 11 |
+
originalUrl: string;
|
| 12 |
timestamp: Date;
|
| 13 |
annotations?: any[];
|
| 14 |
};
|
|
|
|
| 19 |
};
|
| 20 |
|
| 21 |
export function GreenFilterPage({ goBack, onNext }: Props) {
|
| 22 |
+
const liveVideoRef = useRef<HTMLVideoElement>(null);
|
| 23 |
+
const canvasRef = useRef<HTMLCanvasElement>(null);
|
| 24 |
+
const fileInputRef = useRef<HTMLInputElement>(null);
|
| 25 |
const [capturedItems, setCapturedItems] = useState<CapturedItem[]>([]);
|
| 26 |
const [isRecording, setIsRecording] = useState(false);
|
| 27 |
const [selectedImage, setSelectedImage] = useState<string | null>(null);
|
| 28 |
const [showExitWarning, setShowExitWarning] = useState(false);
|
| 29 |
const [greenApplied, setGreenApplied] = useState(false);
|
| 30 |
+
const [isLiveAILoading, setIsLiveAILoading] = useState(false);
|
| 31 |
+
const [liveAIResults, setLiveAIResults] = useState<{ cervixDetected: boolean; quality: string; confidence: number } | null>(null);
|
| 32 |
+
const [liveAIError, setLiveAIError] = useState<string | null>(null);
|
| 33 |
+
const [displayedImageUrl, setDisplayedImageUrl] = useState<string>("/C87Aceto_(1).jpg");
|
| 34 |
const baseImageUrl = "/C87Aceto_(1).jpg";
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
useEffect(() => {
|
| 37 |
+
const applyFilter = async () => {
|
| 38 |
+
const imageToFilter = selectedImage || baseImageUrl;
|
| 39 |
+
if (greenApplied) {
|
| 40 |
+
try {
|
| 41 |
+
const filteredUrl = await applyGreenFilter(imageToFilter, 'dataUrl');
|
| 42 |
+
setDisplayedImageUrl(filteredUrl as string);
|
| 43 |
+
} catch (error) {
|
| 44 |
+
console.error('Error applying green filter:', error);
|
| 45 |
+
setDisplayedImageUrl(imageToFilter);
|
| 46 |
+
}
|
| 47 |
+
} else {
|
| 48 |
+
setDisplayedImageUrl(imageToFilter);
|
| 49 |
+
}
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
applyFilter();
|
| 53 |
+
}, [greenApplied, selectedImage, baseImageUrl]);
|
| 54 |
+
|
| 55 |
+
const handleCaptureImage = async () => {
|
| 56 |
+
let rawUrl: string;
|
| 57 |
+
let displayUrl: string;
|
| 58 |
+
|
| 59 |
+
if (selectedImage) {
|
| 60 |
+
displayUrl = displayedImageUrl;
|
| 61 |
+
rawUrl = selectedImage;
|
| 62 |
+
} else {
|
| 63 |
+
rawUrl = displayedImageUrl;
|
| 64 |
+
if (liveVideoRef.current && canvasRef.current) {
|
| 65 |
+
const canvas = canvasRef.current;
|
| 66 |
+
const ctx = canvas.getContext('2d');
|
| 67 |
+
const vw = liveVideoRef.current.videoWidth;
|
| 68 |
+
const vh = liveVideoRef.current.videoHeight;
|
| 69 |
+
if (ctx && vw > 0 && vh > 0) {
|
| 70 |
+
canvas.width = vw;
|
| 71 |
+
canvas.height = vh;
|
| 72 |
+
ctx.drawImage(liveVideoRef.current, 0, 0);
|
| 73 |
+
try {
|
| 74 |
+
rawUrl = canvas.toDataURL('image/png');
|
| 75 |
+
} catch (secErr) {
|
| 76 |
+
console.warn('Canvas tainted, falling back to displayedImageUrl', secErr);
|
| 77 |
+
rawUrl = displayedImageUrl;
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
displayUrl = rawUrl;
|
| 83 |
+
if (greenApplied && rawUrl) {
|
| 84 |
+
try {
|
| 85 |
+
displayUrl = await applyGreenFilter(rawUrl, 'dataUrl') as string;
|
| 86 |
+
} catch {
|
| 87 |
+
displayUrl = rawUrl;
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
const newCapture: CapturedItem = {
|
| 93 |
id: Date.now().toString(),
|
| 94 |
type: 'image',
|
| 95 |
+
url: displayUrl,
|
| 96 |
+
originalUrl: rawUrl,
|
| 97 |
timestamp: new Date()
|
| 98 |
};
|
| 99 |
setCapturedItems(prev => [...prev, newCapture]);
|
|
|
|
| 107 |
const newCapture: CapturedItem = {
|
| 108 |
id: Date.now().toString(),
|
| 109 |
type: 'video',
|
| 110 |
+
url: '/live.mp4',
|
| 111 |
+
originalUrl: '/live.mp4',
|
| 112 |
timestamp: new Date()
|
| 113 |
};
|
| 114 |
setCapturedItems(prev => [...prev, newCapture]);
|
| 115 |
}
|
| 116 |
};
|
| 117 |
|
| 118 |
+
const handleUploadClick = () => {
|
| 119 |
+
fileInputRef.current?.click();
|
| 120 |
+
};
|
| 121 |
+
|
| 122 |
+
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
| 123 |
+
const files = e.target.files;
|
| 124 |
+
if (files && files.length > 0) {
|
| 125 |
+
Array.from(files).forEach(async (file) => {
|
| 126 |
+
const isVideo = file.type.startsWith('video/');
|
| 127 |
+
const objectUrl = URL.createObjectURL(file);
|
| 128 |
+
|
| 129 |
+
if (isVideo) {
|
| 130 |
+
const newCapture: CapturedItem = {
|
| 131 |
+
id: Date.now().toString() + Math.random(),
|
| 132 |
+
type: 'video',
|
| 133 |
+
url: objectUrl,
|
| 134 |
+
originalUrl: objectUrl,
|
| 135 |
+
timestamp: new Date()
|
| 136 |
+
};
|
| 137 |
+
setCapturedItems(prev => [...prev, newCapture]);
|
| 138 |
+
} else {
|
| 139 |
+
let displayUrl = objectUrl;
|
| 140 |
+
if (greenApplied) {
|
| 141 |
+
try {
|
| 142 |
+
displayUrl = await applyGreenFilter(objectUrl, 'dataUrl') as string;
|
| 143 |
+
} catch (error) {
|
| 144 |
+
console.error('Error applying filter:', error);
|
| 145 |
+
displayUrl = objectUrl;
|
| 146 |
+
}
|
| 147 |
+
}
|
| 148 |
+
const newCapture: CapturedItem = {
|
| 149 |
+
id: Date.now().toString() + Math.random(),
|
| 150 |
+
type: 'image',
|
| 151 |
+
url: displayUrl,
|
| 152 |
+
originalUrl: objectUrl,
|
| 153 |
+
timestamp: new Date()
|
| 154 |
+
};
|
| 155 |
+
setCapturedItems(prev => [...prev, newCapture]);
|
| 156 |
+
}
|
| 157 |
+
});
|
| 158 |
+
}
|
| 159 |
+
e.target.value = '';
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
const handleSelectImage = (item: CapturedItem) => {
|
| 163 |
+
setSelectedImage(item.originalUrl || item.url);
|
| 164 |
};
|
| 165 |
|
| 166 |
const handleAnnotationsChange = (newAnnotations: any[]) => {
|
| 167 |
if (selectedImage) {
|
| 168 |
setCapturedItems(prev => prev.map(item =>
|
| 169 |
+
(item.originalUrl || item.url) === selectedImage ? { ...item, annotations: newAnnotations } : item
|
| 170 |
));
|
| 171 |
}
|
| 172 |
};
|
| 173 |
|
| 174 |
const handleDeleteImage = (id: string) => {
|
| 175 |
setCapturedItems(prev => prev.filter(item => item.id !== id));
|
| 176 |
+
if (selectedImage === (capturedItems.find(item => item.id === id)?.originalUrl || capturedItems.find(item => item.id === id)?.url)) {
|
| 177 |
setSelectedImage(null);
|
| 178 |
}
|
| 179 |
};
|
| 180 |
|
| 181 |
+
const mapQualityLabel = (score: number) => {
|
| 182 |
+
if (score >= 0.8) return 'Excellent';
|
| 183 |
+
if (score >= 0.6) return 'Good';
|
| 184 |
+
return 'Bad';
|
| 185 |
+
};
|
| 186 |
+
|
| 187 |
const handleConfirmExit = () => {
|
| 188 |
if (capturedItems.length > 0) {
|
| 189 |
setShowExitWarning(true);
|
|
|
|
| 192 |
}
|
| 193 |
};
|
| 194 |
|
| 195 |
+
const handleGreenFilterMainAIAssist = async () => {
|
| 196 |
+
setLiveAIError(null);
|
| 197 |
+
setLiveAIResults(null);
|
| 198 |
+
const imageItems = capturedItems.filter(item => item.type === 'image');
|
| 199 |
+
const targetItem = imageItems[0];
|
| 200 |
+
|
| 201 |
+
setIsLiveAILoading(true);
|
| 202 |
+
|
| 203 |
+
try {
|
| 204 |
+
let blob: Blob;
|
| 205 |
+
if (targetItem) {
|
| 206 |
+
const response = await fetch(targetItem.url);
|
| 207 |
+
blob = await response.blob();
|
| 208 |
+
} else {
|
| 209 |
+
const video = liveVideoRef.current;
|
| 210 |
+
if (!video || video.videoWidth === 0 || video.videoHeight === 0) {
|
| 211 |
+
throw new Error('Live feed is not ready yet.');
|
| 212 |
+
}
|
| 213 |
+
const canvas = document.createElement('canvas');
|
| 214 |
+
canvas.width = video.videoWidth;
|
| 215 |
+
canvas.height = video.videoHeight;
|
| 216 |
+
const ctx = canvas.getContext('2d');
|
| 217 |
+
if (!ctx) {
|
| 218 |
+
throw new Error('Failed to capture live frame.');
|
| 219 |
+
}
|
| 220 |
+
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
|
| 221 |
+
blob = await new Promise<Blob>((resolve, reject) => {
|
| 222 |
+
canvas.toBlob(result => {
|
| 223 |
+
if (!result) {
|
| 224 |
+
reject(new Error('Failed to capture live frame.'));
|
| 225 |
+
return;
|
| 226 |
+
}
|
| 227 |
+
resolve(result);
|
| 228 |
+
}, 'image/jpeg', 0.95);
|
| 229 |
+
});
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
const formData = new FormData();
|
| 233 |
+
formData.append('file', blob, 'image.jpg');
|
| 234 |
+
const backendResponse = await fetch('http://localhost:8000/infer/image', {
|
| 235 |
+
method: 'POST',
|
| 236 |
+
body: formData,
|
| 237 |
+
});
|
| 238 |
+
|
| 239 |
+
if (!backendResponse.ok) {
|
| 240 |
+
throw new Error(`Backend error: ${backendResponse.statusText}`);
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
const result = await backendResponse.json();
|
| 244 |
+
|
| 245 |
+
const qualityScore = typeof result.quality_score === 'number'
|
| 246 |
+
? result.quality_score
|
| 247 |
+
: (typeof result.quality_percent === 'number' ? result.quality_percent / 100 : 0);
|
| 248 |
+
|
| 249 |
+
setLiveAIResults({
|
| 250 |
+
cervixDetected: Boolean(result.detected),
|
| 251 |
+
quality: mapQualityLabel(qualityScore),
|
| 252 |
+
confidence: qualityScore
|
| 253 |
+
});
|
| 254 |
+
setIsLiveAILoading(false);
|
| 255 |
+
} catch (error) {
|
| 256 |
+
console.error('Live AI assist error:', error);
|
| 257 |
+
setLiveAIError(error instanceof Error ? error.message : 'Failed to check image quality');
|
| 258 |
+
setIsLiveAILoading(false);
|
| 259 |
+
}
|
| 260 |
+
};
|
| 261 |
+
|
| 262 |
return (
|
| 263 |
<div className="min-h-screen bg-gradient-to-br from-gray-50 to-blue-50 p-4 md:p-6">
|
| 264 |
{/* Header */}
|
|
|
|
| 311 |
|
| 312 |
{/* Image Display */}
|
| 313 |
<div className="relative bg-gray-900 rounded-xl overflow-hidden mb-6 border-2 border-gray-700">
|
| 314 |
+
<div className="aspect-video relative flex items-center justify-center">
|
| 315 |
+
{selectedImage ? (
|
| 316 |
+
<img
|
| 317 |
+
src={displayedImageUrl}
|
| 318 |
+
alt="Selected"
|
| 319 |
+
className="w-full h-full object-contain"
|
| 320 |
+
/>
|
| 321 |
+
) : (
|
| 322 |
+
<>
|
| 323 |
+
<video
|
| 324 |
+
ref={liveVideoRef}
|
| 325 |
+
src="/live.mp4"
|
| 326 |
+
autoPlay
|
| 327 |
+
loop
|
| 328 |
+
muted
|
| 329 |
+
crossOrigin="anonymous"
|
| 330 |
+
className="w-full h-full object-cover"
|
| 331 |
+
style={greenApplied ? { filter: 'saturate(0.3) hue-rotate(120deg) brightness(1.1)' } : {}}
|
| 332 |
+
/>
|
| 333 |
+
{greenApplied && (
|
| 334 |
+
<div className="absolute inset-0 bg-green-500 opacity-5 pointer-events-none z-10" />
|
| 335 |
+
)}
|
| 336 |
+
</>
|
| 337 |
+
)}
|
| 338 |
+
{!selectedImage && (
|
| 339 |
+
<div className="absolute top-4 left-4 flex items-center gap-2 bg-red-500 text-white px-3 py-1 rounded-full text-sm font-semibold z-20">
|
| 340 |
+
<div className="w-2 h-2 rounded-full bg-white/70"></div>
|
| 341 |
+
Live
|
| 342 |
+
</div>
|
| 343 |
+
)}
|
| 344 |
+
</div>
|
| 345 |
</div>
|
| 346 |
|
| 347 |
{/* Capture Controls */}
|
|
|
|
| 376 |
</>
|
| 377 |
)}
|
| 378 |
</button>
|
| 379 |
+
|
| 380 |
+
<button
|
| 381 |
+
onClick={handleUploadClick}
|
| 382 |
+
className="flex items-center gap-2 px-6 py-3 bg-blue-600 text-white rounded-lg hover:bg-blue-700 transition-colors shadow-md font-semibold"
|
| 383 |
+
>
|
| 384 |
+
<Upload className="w-5 h-5" />
|
| 385 |
+
Upload
|
| 386 |
+
</button>
|
| 387 |
+
<input
|
| 388 |
+
ref={fileInputRef}
|
| 389 |
+
type="file"
|
| 390 |
+
accept="image/*,video/*"
|
| 391 |
+
multiple
|
| 392 |
+
className="hidden"
|
| 393 |
+
onChange={handleFileUpload}
|
| 394 |
+
/>
|
| 395 |
</div>
|
| 396 |
+
|
| 397 |
+
{/* Centered AI Assist Button */}
|
| 398 |
+
<button
|
| 399 |
+
onClick={handleGreenFilterMainAIAssist}
|
| 400 |
+
disabled={isLiveAILoading}
|
| 401 |
+
className="w-full flex items-center justify-center gap-2 px-6 py-4 rounded-lg bg-gradient-to-r from-blue-600 to-blue-700 text-white font-bold hover:from-blue-700 hover:to-blue-800 transition-all shadow-lg hover:shadow-xl disabled:opacity-50 disabled:cursor-not-allowed text-base"
|
| 402 |
+
>
|
| 403 |
+
<Sparkles className="w-6 h-6" />
|
| 404 |
+
{isLiveAILoading ? 'Checking...' : 'AI Assist'}
|
| 405 |
+
</button>
|
| 406 |
+
|
| 407 |
+
{/* Live AI Results Panel */}
|
| 408 |
+
{liveAIResults && (
|
| 409 |
+
<div className="p-4 bg-green-50 border border-green-300 rounded-lg">
|
| 410 |
+
<div className="flex items-center gap-3 mb-3">
|
| 411 |
+
<div className="w-3 h-3 bg-green-500 rounded-full"></div>
|
| 412 |
+
<h4 className="font-bold text-green-800">Quality Check Results</h4>
|
| 413 |
+
</div>
|
| 414 |
+
<div className="space-y-2 text-sm">
|
| 415 |
+
<p className="text-gray-700">
|
| 416 |
+
<span className="font-semibold">Cervix Detected:</span> {liveAIResults.cervixDetected ? 'Yes' : 'No'} ({((liveAIResults.cervixDetected ? liveAIResults.confidence : 0) * 100).toFixed(1)}%)
|
| 417 |
+
</p>
|
| 418 |
+
<p className="text-gray-700">
|
| 419 |
+
<span className="font-semibold">Quality:</span> {liveAIResults.quality} ({(liveAIResults.confidence * 100).toFixed(1)}%)
|
| 420 |
+
</p>
|
| 421 |
+
</div>
|
| 422 |
+
</div>
|
| 423 |
+
)}
|
| 424 |
+
|
| 425 |
+
{liveAIError && (
|
| 426 |
+
<div className="text-sm text-red-600 bg-red-50 border border-red-200 rounded-lg p-3 text-center">
|
| 427 |
+
{liveAIError}
|
| 428 |
+
</div>
|
| 429 |
+
)}
|
| 430 |
<button className="w-full flex items-center justify-center gap-2 px-4 py-3 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors">
|
| 431 |
Next
|
| 432 |
<ArrowRight className="w-4 h-4" />
|
|
|
|
| 437 |
{/* Selected Image Tools */}
|
| 438 |
{selectedImage && (
|
| 439 |
<div className="space-y-4">
|
| 440 |
+
<div className="flex items-center justify-between gap-3">
|
| 441 |
+
<button
|
| 442 |
+
onClick={handleCaptureImage}
|
| 443 |
+
className="flex items-center gap-2 px-5 py-2 bg-[#05998c] text-white rounded-lg hover:bg-[#048a7d] transition-colors shadow-md font-semibold"
|
| 444 |
+
>
|
| 445 |
+
<Camera className="w-4 h-4" />
|
| 446 |
+
Capture This Image
|
| 447 |
+
</button>
|
| 448 |
<button className="px-6 py-2 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors flex items-center gap-2">
|
| 449 |
Next
|
| 450 |
<ArrowRight className="w-4 h-4" />
|
| 451 |
</button>
|
| 452 |
</div>
|
| 453 |
<ImageAnnotator
|
| 454 |
+
imageUrl={displayedImageUrl}
|
| 455 |
onAnnotationsChange={handleAnnotationsChange}
|
| 456 |
/>
|
| 457 |
<ImagingObservations />
|
|
|
|
| 464 |
<div className="space-y-6">
|
| 465 |
<div className="bg-white rounded-xl shadow-lg p-6 border border-gray-200">
|
| 466 |
{/* Green Filter Toggle */}
|
| 467 |
+
<div className="mb-6 flex items-center justify-between gap-4">
|
|
|
|
| 468 |
<div className="flex-1 bg-[#05998c] text-white px-6 py-2 rounded-lg">
|
| 469 |
<span className="font-bold">Green Filter</span>
|
| 470 |
</div>
|
|
|
|
| 486 |
</span>
|
| 487 |
</button>
|
| 488 |
</div>
|
| 489 |
+
|
| 490 |
|
| 491 |
<h3 className="text-lg font-bold text-gray-800 mb-4">
|
| 492 |
Captured Items ({capturedItems.length})
|
|
|
|
| 572 |
</div>
|
| 573 |
</div>
|
| 574 |
)}
|
| 575 |
+
|
| 576 |
+
<canvas ref={canvasRef} className="hidden" />
|
| 577 |
</div>
|
| 578 |
);
|
| 579 |
}
|
src/pages/GuidedCapturePage.tsx
CHANGED
|
@@ -1,11 +1,12 @@
|
|
| 1 |
import { useState, useEffect, useRef } from 'react';
|
| 2 |
-
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, FileText, Sparkles } from 'lucide-react';
|
| 3 |
import { ImageAnnotator, type ImageAnnotatorHandle } from '../components/ImageAnnotator';
|
| 4 |
import { AceticAnnotator, type AceticAnnotatorHandle } from '../components/AceticAnnotator';
|
| 5 |
import { ImagingObservations } from '../components/ImagingObservations';
|
| 6 |
import { BiopsyMarking, type BiopsyCapturedImage } from './BiopsyMarking';
|
| 7 |
import { Compare } from './Compare';
|
| 8 |
import { ReportPage } from './ReportPage';
|
|
|
|
| 9 |
|
| 10 |
// Simple UI Component replacements
|
| 11 |
const Button: React.FC<any> = ({ children, onClick, disabled, variant, size, className, ...props }) => {
|
|
@@ -21,6 +22,7 @@ type CapturedItem = {
|
|
| 21 |
id: string;
|
| 22 |
type: 'image' | 'video';
|
| 23 |
url: string;
|
|
|
|
| 24 |
timestamp: Date;
|
| 25 |
annotations?: any[];
|
| 26 |
observations?: any;
|
|
@@ -37,6 +39,9 @@ type Props = {
|
|
| 37 |
export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, onCapturedImagesChange, onModeChange }: Props) {
|
| 38 |
const imageAnnotatorRef = useRef<ImageAnnotatorHandle>(null);
|
| 39 |
const aceticAnnotatorRef = useRef<AceticAnnotatorHandle>(null);
|
|
|
|
|
|
|
|
|
|
| 40 |
const [currentStep, setCurrentStep] = useState<ExamStep>('native');
|
| 41 |
const [capturedItems, setCapturedItems] = useState<Record<ExamStep, CapturedItem[]>>({
|
| 42 |
native: [],
|
|
@@ -52,7 +57,11 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 52 |
const [_observations, setObservations] = useState({});
|
| 53 |
const [isAnnotatingMode, setIsAnnotatingMode] = useState(false);
|
| 54 |
const [isCompareMode, setIsCompareMode] = useState(false);
|
| 55 |
-
const [
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
const audibleAlert = true;
|
| 57 |
|
| 58 |
// Timer states for Acetowhite step
|
|
@@ -147,6 +156,10 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 147 |
setSelectedImage(null);
|
| 148 |
setAnnotations([]);
|
| 149 |
setObservations({});
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
// Reset timer when leaving acetowhite step
|
| 152 |
if (currentStep !== 'acetowhite') {
|
|
@@ -190,6 +203,103 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 190 |
}
|
| 191 |
}, [initialMode]);
|
| 192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
const handleAceticApplied = () => {
|
| 194 |
setAceticApplied(true);
|
| 195 |
setTimerStarted(true);
|
|
@@ -228,11 +338,51 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 228 |
|
| 229 |
|
| 230 |
|
| 231 |
-
const handleCaptureImage = () => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
const newCapture: CapturedItem = {
|
| 233 |
id: Date.now().toString(),
|
| 234 |
type: 'image',
|
| 235 |
-
url:
|
|
|
|
| 236 |
timestamp: new Date()
|
| 237 |
};
|
| 238 |
setCapturedItems(prev => ({
|
|
@@ -241,83 +391,128 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 241 |
}));
|
| 242 |
};
|
| 243 |
|
| 244 |
-
const handleToggleRecording = () => {
|
| 245 |
if (!isRecording) {
|
| 246 |
setIsRecording(true);
|
| 247 |
-
|
| 248 |
-
setIsRecording(false);
|
| 249 |
-
const newCapture: CapturedItem = {
|
| 250 |
-
id: Date.now().toString(),
|
| 251 |
-
type: 'video',
|
| 252 |
-
url: liveFeedImageUrl,
|
| 253 |
-
timestamp: new Date()
|
| 254 |
-
};
|
| 255 |
-
setCapturedItems(prev => ({
|
| 256 |
-
...prev,
|
| 257 |
-
[currentStep]: [...prev[currentStep], newCapture]
|
| 258 |
-
}));
|
| 259 |
}
|
| 260 |
-
};
|
| 261 |
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
y: 310,
|
| 288 |
-
width: 280,
|
| 289 |
-
height: 280,
|
| 290 |
-
color: '#6366f1',
|
| 291 |
-
label: 'SCJ',
|
| 292 |
-
source: 'ai' as const,
|
| 293 |
-
identified: false
|
| 294 |
-
},
|
| 295 |
-
{
|
| 296 |
-
id: 'ai-os-' + Date.now(),
|
| 297 |
-
type: 'polygon' as const,
|
| 298 |
-
x: 350,
|
| 299 |
-
y: 380,
|
| 300 |
-
width: 150,
|
| 301 |
-
height: 100,
|
| 302 |
-
color: '#eab308',
|
| 303 |
-
label: 'OS',
|
| 304 |
-
source: 'ai' as const,
|
| 305 |
-
identified: false,
|
| 306 |
-
points: [
|
| 307 |
-
{ x: 350, y: 380 },
|
| 308 |
-
{ x: 500, y: 380 },
|
| 309 |
-
{ x: 500, y: 480 },
|
| 310 |
-
{ x: 350, y: 480 }
|
| 311 |
-
]
|
| 312 |
}
|
| 313 |
-
];
|
| 314 |
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
|
|
|
|
|
|
|
|
|
| 319 |
}
|
| 320 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
};
|
| 322 |
|
| 323 |
const handleDeleteCapture = (id: string) => {
|
|
@@ -335,6 +530,15 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 335 |
? capturedItems[currentStep].find(item => item.id === selectedImage)
|
| 336 |
: null;
|
| 337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
const totalCaptures = capturedItems[currentStep].length;
|
| 339 |
const imageCaptures = capturedItems[currentStep].filter(item => item.type === 'image');
|
| 340 |
const videoCaptures = capturedItems[currentStep].filter(item => item.type === 'video');
|
|
@@ -390,6 +594,7 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 390 |
{stage === 'Report' && <FileText className="w-4 h-4" />}
|
| 391 |
<span>{stage}</span>
|
| 392 |
</span>
|
|
|
|
| 393 |
</div>
|
| 394 |
{idx < 3 && <div className="w-1.5 h-1.5 rounded-full bg-gray-300 mx-1" />}
|
| 395 |
</div>
|
|
@@ -450,6 +655,28 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 450 |
<h2 className="text-lg font-semibold text-slate-800 flex-1">
|
| 451 |
{steps.find(s => s.key === currentStep)?.label || 'Guided Capture'}
|
| 452 |
</h2>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
<button
|
| 454 |
onClick={() => {
|
| 455 |
const currentIndex = steps.findIndex(s => s.key === currentStep);
|
|
@@ -463,6 +690,69 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 463 |
Next
|
| 464 |
<ArrowRight className="h-4 w-4" />
|
| 465 |
</button>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 466 |
</div>
|
| 467 |
)}
|
| 468 |
|
|
@@ -508,39 +798,31 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 508 |
<button
|
| 509 |
onClick={() => {
|
| 510 |
setIsAnnotatingMode(false);
|
| 511 |
-
setAiDemoImageUrl(null);
|
| 512 |
}}
|
| 513 |
className="flex items-center gap-2 px-4 py-2 text-gray-600 hover:bg-gray-100 rounded-lg transition-colors"
|
| 514 |
>
|
| 515 |
<ArrowLeft className="w-4 h-4" />
|
| 516 |
Back to Live Feed
|
| 517 |
</button>
|
| 518 |
-
<button
|
| 519 |
-
onClick={handleAIAssist}
|
| 520 |
-
className="flex items-center gap-2 px-6 py-2 bg-[#05998c] text-white rounded-lg font-semibold hover:bg-[#047569] transition-colors"
|
| 521 |
-
>
|
| 522 |
-
<Sparkles className="w-5 h-5" />
|
| 523 |
-
AI Assist
|
| 524 |
-
</button>
|
| 525 |
</div>
|
| 526 |
|
| 527 |
<div>
|
| 528 |
{currentStep === 'acetowhite' ? (
|
| 529 |
<AceticAnnotator
|
| 530 |
ref={aceticAnnotatorRef}
|
| 531 |
-
imageUrls={
|
| 532 |
onAnnotationsChange={setAnnotations}
|
| 533 |
/>
|
| 534 |
) : (
|
| 535 |
<ImageAnnotator
|
| 536 |
ref={imageAnnotatorRef}
|
| 537 |
-
imageUrls={
|
| 538 |
onAnnotationsChange={setAnnotations}
|
| 539 |
/>
|
| 540 |
)}
|
| 541 |
</div>
|
| 542 |
</div>
|
| 543 |
-
) :
|
| 544 |
// Live Feed View
|
| 545 |
<>
|
| 546 |
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6 mb-6">
|
|
@@ -550,26 +832,87 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 550 |
{/* Live Video Feed */}
|
| 551 |
<div className="relative bg-gray-900 rounded-xl overflow-hidden shadow-2xl border-2 border-gray-700 mb-4">
|
| 552 |
<div className="aspect-video flex items-center justify-center">
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 558 |
|
| 559 |
|
| 560 |
-
{currentStep === 'acetowhite' && aceticApplied && (
|
| 561 |
<div className="absolute top-4 right-4 bg-black/70 text-white px-4 py-2 rounded-lg">
|
| 562 |
<p className="text-2xl font-mono font-bold">{formatTime(seconds)}</p>
|
| 563 |
</div>
|
| 564 |
)}
|
| 565 |
|
| 566 |
-
{currentStep === 'lugol' && lugolApplied && (
|
| 567 |
<div className="absolute top-4 right-4 bg-black/70 text-white px-4 py-2 rounded-lg">
|
| 568 |
<p className="text-2xl font-mono font-bold">{formatTime(lugolSeconds)}</p>
|
| 569 |
</div>
|
| 570 |
)}
|
| 571 |
|
| 572 |
-
{currentStep === 'acetowhite' && showFlash && (
|
| 573 |
<div className="absolute inset-0 bg-[#05998c]/30 animate-pulse flex items-center justify-center">
|
| 574 |
<div className="bg-white/90 px-6 py-4 rounded-lg">
|
| 575 |
<p className="text-2xl font-bold text-[#0A2540]">
|
|
@@ -579,7 +922,7 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 579 |
</div>
|
| 580 |
)}
|
| 581 |
|
| 582 |
-
{currentStep === 'lugol' && lugolShowFlash && (
|
| 583 |
<div className="absolute inset-0 bg-[#05998c]/30 animate-pulse flex items-center justify-center">
|
| 584 |
<div className="bg-white/90 px-6 py-4 rounded-lg">
|
| 585 |
<p className="text-2xl font-bold text-[#0A2540]">
|
|
@@ -889,7 +1232,13 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 889 |
{imageCaptures.map(item => (
|
| 890 |
<div key={item.id} className="relative group">
|
| 891 |
<div
|
| 892 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 893 |
>
|
| 894 |
<img src={item.url} alt="Capture" className="w-full h-full object-cover" />
|
| 895 |
{item.annotations && item.annotations.length > 0 && (
|
|
@@ -899,7 +1248,10 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 899 |
)}
|
| 900 |
</div>
|
| 901 |
<button
|
| 902 |
-
onClick={() =>
|
|
|
|
|
|
|
|
|
|
| 903 |
className="absolute top-1 right-1 bg-red-500 text-white p-1 rounded opacity-0 group-hover:opacity-100 transition-opacity"
|
| 904 |
>
|
| 905 |
<X className="w-3 h-3" />
|
|
@@ -916,7 +1268,16 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 916 |
<h4 className="text-xs font-semibold text-gray-500 uppercase mb-2">Videos ({videoCaptures.length})</h4>
|
| 917 |
<div className="space-y-2">
|
| 918 |
{videoCaptures.map(item => (
|
| 919 |
-
<div
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 920 |
<div className="w-12 h-12 bg-gray-200 rounded flex items-center justify-center">
|
| 921 |
<Video className="w-6 h-6 text-gray-500" />
|
| 922 |
</div>
|
|
@@ -925,7 +1286,10 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 925 |
<p className="text-xs text-gray-500">{item.timestamp.toLocaleTimeString()}</p>
|
| 926 |
</div>
|
| 927 |
<button
|
| 928 |
-
onClick={() =>
|
|
|
|
|
|
|
|
|
|
| 929 |
className="p-1 hover:bg-red-50 rounded text-red-500"
|
| 930 |
>
|
| 931 |
<X className="w-4 h-4" />
|
|
@@ -952,41 +1316,11 @@ export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, o
|
|
| 952 |
</div>
|
| 953 |
)}
|
| 954 |
</>
|
| 955 |
-
) : (
|
| 956 |
-
// Single Image Annotation View
|
| 957 |
-
<div>
|
| 958 |
-
<div className="mb-4 flex items-center justify-between">
|
| 959 |
-
<button
|
| 960 |
-
onClick={() => {
|
| 961 |
-
setSelectedImage(null);
|
| 962 |
-
setAnnotations([]);
|
| 963 |
-
setObservations({});
|
| 964 |
-
}}
|
| 965 |
-
className="flex items-center gap-2 px-4 py-2 text-gray-600 hover:bg-gray-100 rounded-lg transition-colors"
|
| 966 |
-
>
|
| 967 |
-
<ArrowLeft className="w-4 h-4" />
|
| 968 |
-
Back to Live Feed
|
| 969 |
-
</button>
|
| 970 |
-
<button
|
| 971 |
-
onClick={handleAIAssist}
|
| 972 |
-
className="flex items-center gap-2 px-6 py-2 bg-[#05998c] text-white rounded-lg font-semibold hover:bg-[#047569] transition-colors"
|
| 973 |
-
>
|
| 974 |
-
<Sparkles className="w-5 h-5" />
|
| 975 |
-
AI Assist
|
| 976 |
-
</button>
|
| 977 |
-
</div>
|
| 978 |
-
|
| 979 |
-
<div>
|
| 980 |
-
<ImageAnnotator
|
| 981 |
-
ref={imageAnnotatorRef}
|
| 982 |
-
imageUrl={selectedItem?.url || liveFeedImageUrl}
|
| 983 |
-
onAnnotationsChange={setAnnotations}
|
| 984 |
-
/>
|
| 985 |
-
</div>
|
| 986 |
-
</div>
|
| 987 |
)}
|
| 988 |
</div>
|
| 989 |
</div>
|
|
|
|
|
|
|
| 990 |
</div>
|
| 991 |
);
|
| 992 |
}
|
|
|
|
| 1 |
import { useState, useEffect, useRef } from 'react';
|
| 2 |
+
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, FileText, Sparkles, Upload } from 'lucide-react';
|
| 3 |
import { ImageAnnotator, type ImageAnnotatorHandle } from '../components/ImageAnnotator';
|
| 4 |
import { AceticAnnotator, type AceticAnnotatorHandle } from '../components/AceticAnnotator';
|
| 5 |
import { ImagingObservations } from '../components/ImagingObservations';
|
| 6 |
import { BiopsyMarking, type BiopsyCapturedImage } from './BiopsyMarking';
|
| 7 |
import { Compare } from './Compare';
|
| 8 |
import { ReportPage } from './ReportPage';
|
| 9 |
+
import { applyGreenFilter } from '../utils/filterUtils';
|
| 10 |
|
| 11 |
// Simple UI Component replacements
|
| 12 |
const Button: React.FC<any> = ({ children, onClick, disabled, variant, size, className, ...props }) => {
|
|
|
|
| 22 |
id: string;
|
| 23 |
type: 'image' | 'video';
|
| 24 |
url: string;
|
| 25 |
+
originalUrl: string;
|
| 26 |
timestamp: Date;
|
| 27 |
annotations?: any[];
|
| 28 |
observations?: any;
|
|
|
|
| 39 |
export function GuidedCapturePage({ onNext, onGoToPatientRecords, initialMode, onCapturedImagesChange, onModeChange }: Props) {
|
| 40 |
const imageAnnotatorRef = useRef<ImageAnnotatorHandle>(null);
|
| 41 |
const aceticAnnotatorRef = useRef<AceticAnnotatorHandle>(null);
|
| 42 |
+
const fileInputRef = useRef<HTMLInputElement>(null);
|
| 43 |
+
const videoRef = useRef<HTMLVideoElement>(null);
|
| 44 |
+
const canvasRef = useRef<HTMLCanvasElement>(null);
|
| 45 |
const [currentStep, setCurrentStep] = useState<ExamStep>('native');
|
| 46 |
const [capturedItems, setCapturedItems] = useState<Record<ExamStep, CapturedItem[]>>({
|
| 47 |
native: [],
|
|
|
|
| 57 |
const [_observations, setObservations] = useState({});
|
| 58 |
const [isAnnotatingMode, setIsAnnotatingMode] = useState(false);
|
| 59 |
const [isCompareMode, setIsCompareMode] = useState(false);
|
| 60 |
+
const [isLiveAILoading] = useState(false);
|
| 61 |
+
const [liveAIResults, setLiveAIResults] = useState<{ cervixDetected: boolean; detectionConfidence: number; quality: string; qualityConfidence: number } | null>(null);
|
| 62 |
+
const [liveAIError, setLiveAIError] = useState<string | null>(null);
|
| 63 |
+
const [isContinuousAIEnabled, setIsContinuousAIEnabled] = useState(false);
|
| 64 |
+
const continuousAIIntervalRef = useRef<NodeJS.Timeout | null>(null);
|
| 65 |
const audibleAlert = true;
|
| 66 |
|
| 67 |
// Timer states for Acetowhite step
|
|
|
|
| 156 |
setSelectedImage(null);
|
| 157 |
setAnnotations([]);
|
| 158 |
setObservations({});
|
| 159 |
+
// Clear AI Assist results when changing steps
|
| 160 |
+
setLiveAIResults(null);
|
| 161 |
+
setLiveAIError(null);
|
| 162 |
+
setIsContinuousAIEnabled(false);
|
| 163 |
|
| 164 |
// Reset timer when leaving acetowhite step
|
| 165 |
if (currentStep !== 'acetowhite') {
|
|
|
|
| 203 |
}
|
| 204 |
}, [initialMode]);
|
| 205 |
|
| 206 |
+
// Continuous AI Quality Checking on Live Feed
|
| 207 |
+
useEffect(() => {
|
| 208 |
+
// Compute selectedItem inline to avoid forward reference
|
| 209 |
+
const computedSelectedItem = selectedImage
|
| 210 |
+
? capturedItems[currentStep].find(item => item.id === selectedImage)
|
| 211 |
+
: null;
|
| 212 |
+
|
| 213 |
+
if (!isContinuousAIEnabled || !videoRef.current || !canvasRef.current || computedSelectedItem || isAnnotatingMode || isCompareMode) {
|
| 214 |
+
// Stop the interval if conditions are not met
|
| 215 |
+
if (continuousAIIntervalRef.current) {
|
| 216 |
+
clearInterval(continuousAIIntervalRef.current);
|
| 217 |
+
continuousAIIntervalRef.current = null;
|
| 218 |
+
}
|
| 219 |
+
return;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
const checkFrameQuality = async () => {
|
| 223 |
+
try {
|
| 224 |
+
const canvas = canvasRef.current;
|
| 225 |
+
const video = videoRef.current;
|
| 226 |
+
|
| 227 |
+
if (!canvas || !video) return;
|
| 228 |
+
|
| 229 |
+
const ctx = canvas.getContext('2d');
|
| 230 |
+
const vw = video.videoWidth;
|
| 231 |
+
const vh = video.videoHeight;
|
| 232 |
+
|
| 233 |
+
if (!ctx || vw <= 0 || vh <= 0) return;
|
| 234 |
+
|
| 235 |
+
canvas.width = vw;
|
| 236 |
+
canvas.height = vh;
|
| 237 |
+
ctx.drawImage(video, 0, 0);
|
| 238 |
+
|
| 239 |
+
canvas.toBlob(async (blob) => {
|
| 240 |
+
if (!blob) return;
|
| 241 |
+
|
| 242 |
+
try {
|
| 243 |
+
const formData = new FormData();
|
| 244 |
+
formData.append('file', blob, 'frame.jpg');
|
| 245 |
+
|
| 246 |
+
const response = await fetch('http://localhost:8000/infer/image', {
|
| 247 |
+
method: 'POST',
|
| 248 |
+
body: formData,
|
| 249 |
+
});
|
| 250 |
+
|
| 251 |
+
if (!response.ok) {
|
| 252 |
+
throw new Error(`Backend error: ${response.statusText}`);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
const result = await response.json();
|
| 256 |
+
|
| 257 |
+
const qualityScore = typeof result.quality_score === 'number'
|
| 258 |
+
? result.quality_score
|
| 259 |
+
: (typeof result.quality_percent === 'number' ? result.quality_percent / 100 : 0);
|
| 260 |
+
|
| 261 |
+
const detectionConf = typeof result.detection_confidence === 'number'
|
| 262 |
+
? result.detection_confidence
|
| 263 |
+
: 0;
|
| 264 |
+
|
| 265 |
+
setLiveAIResults({
|
| 266 |
+
cervixDetected: Boolean(result.detected),
|
| 267 |
+
detectionConfidence: detectionConf,
|
| 268 |
+
quality: mapQualityLabel(qualityScore),
|
| 269 |
+
qualityConfidence: qualityScore
|
| 270 |
+
});
|
| 271 |
+
setLiveAIError(null);
|
| 272 |
+
} catch (error) {
|
| 273 |
+
console.error('Continuous AI quality check error:', error);
|
| 274 |
+
// Don't set error for continuous checks to avoid cluttering UI
|
| 275 |
+
}
|
| 276 |
+
}, 'image/jpeg', 0.7);
|
| 277 |
+
} catch (error) {
|
| 278 |
+
console.error('Frame capture error:', error);
|
| 279 |
+
}
|
| 280 |
+
};
|
| 281 |
+
|
| 282 |
+
// Start continuous checking with 1-second interval
|
| 283 |
+
continuousAIIntervalRef.current = setInterval(checkFrameQuality, 1000);
|
| 284 |
+
|
| 285 |
+
return () => {
|
| 286 |
+
if (continuousAIIntervalRef.current) {
|
| 287 |
+
clearInterval(continuousAIIntervalRef.current);
|
| 288 |
+
continuousAIIntervalRef.current = null;
|
| 289 |
+
}
|
| 290 |
+
};
|
| 291 |
+
}, [isContinuousAIEnabled, selectedImage, isAnnotatingMode, isCompareMode, currentStep, capturedItems]);
|
| 292 |
+
|
| 293 |
+
// Cleanup continuous AI on unmount
|
| 294 |
+
useEffect(() => {
|
| 295 |
+
return () => {
|
| 296 |
+
if (continuousAIIntervalRef.current) {
|
| 297 |
+
clearInterval(continuousAIIntervalRef.current);
|
| 298 |
+
continuousAIIntervalRef.current = null;
|
| 299 |
+
}
|
| 300 |
+
};
|
| 301 |
+
}, []);
|
| 302 |
+
|
| 303 |
const handleAceticApplied = () => {
|
| 304 |
setAceticApplied(true);
|
| 305 |
setTimerStarted(true);
|
|
|
|
| 338 |
|
| 339 |
|
| 340 |
|
| 341 |
+
const handleCaptureImage = async () => {
|
| 342 |
+
let rawUrl: string;
|
| 343 |
+
let displayUrl: string;
|
| 344 |
+
|
| 345 |
+
if (selectedItem) {
|
| 346 |
+
// Capturing a selected image - use its original and displayed URLs
|
| 347 |
+
displayUrl = selectedItem.url;
|
| 348 |
+
rawUrl = selectedItem.originalUrl || selectedItem.url;
|
| 349 |
+
} else {
|
| 350 |
+
// Capturing from live feed - grab video frame from canvas
|
| 351 |
+
rawUrl = liveFeedImageUrl;
|
| 352 |
+
if (videoRef.current && canvasRef.current) {
|
| 353 |
+
const canvas = canvasRef.current;
|
| 354 |
+
const ctx = canvas.getContext('2d');
|
| 355 |
+
const vw = videoRef.current.videoWidth;
|
| 356 |
+
const vh = videoRef.current.videoHeight;
|
| 357 |
+
if (ctx && vw > 0 && vh > 0) {
|
| 358 |
+
canvas.width = vw;
|
| 359 |
+
canvas.height = vh;
|
| 360 |
+
ctx.drawImage(videoRef.current, 0, 0);
|
| 361 |
+
try {
|
| 362 |
+
rawUrl = canvas.toDataURL('image/png');
|
| 363 |
+
} catch (secErr) {
|
| 364 |
+
console.warn('Canvas capture failed, using fallback', secErr);
|
| 365 |
+
rawUrl = liveFeedImageUrl;
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
displayUrl = rawUrl;
|
| 371 |
+
// Apply green filter only for green filter step
|
| 372 |
+
if (currentStep === 'greenFilter' && greenApplied && rawUrl) {
|
| 373 |
+
try {
|
| 374 |
+
displayUrl = await applyGreenFilter(rawUrl, 'dataUrl') as string;
|
| 375 |
+
} catch {
|
| 376 |
+
displayUrl = rawUrl;
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
const newCapture: CapturedItem = {
|
| 382 |
id: Date.now().toString(),
|
| 383 |
type: 'image',
|
| 384 |
+
url: displayUrl,
|
| 385 |
+
originalUrl: rawUrl,
|
| 386 |
timestamp: new Date()
|
| 387 |
};
|
| 388 |
setCapturedItems(prev => ({
|
|
|
|
| 391 |
}));
|
| 392 |
};
|
| 393 |
|
| 394 |
+
const handleToggleRecording = async () => {
|
| 395 |
if (!isRecording) {
|
| 396 |
setIsRecording(true);
|
| 397 |
+
return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
}
|
|
|
|
| 399 |
|
| 400 |
+
setIsRecording(false);
|
| 401 |
+
|
| 402 |
+
let rawUrl: string;
|
| 403 |
+
let displayUrl: string;
|
| 404 |
+
|
| 405 |
+
if (selectedItem) {
|
| 406 |
+
displayUrl = selectedItem.url;
|
| 407 |
+
rawUrl = selectedItem.originalUrl || selectedItem.url;
|
| 408 |
+
} else {
|
| 409 |
+
rawUrl = liveFeedImageUrl;
|
| 410 |
+
if (videoRef.current && canvasRef.current) {
|
| 411 |
+
const canvas = canvasRef.current;
|
| 412 |
+
const ctx = canvas.getContext('2d');
|
| 413 |
+
const vw = videoRef.current.videoWidth;
|
| 414 |
+
const vh = videoRef.current.videoHeight;
|
| 415 |
+
if (ctx && vw > 0 && vh > 0) {
|
| 416 |
+
canvas.width = vw;
|
| 417 |
+
canvas.height = vh;
|
| 418 |
+
ctx.drawImage(videoRef.current, 0, 0);
|
| 419 |
+
try {
|
| 420 |
+
rawUrl = canvas.toDataURL('image/png');
|
| 421 |
+
} catch (secErr) {
|
| 422 |
+
console.warn('Canvas capture failed, using fallback', secErr);
|
| 423 |
+
}
|
| 424 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
}
|
|
|
|
| 426 |
|
| 427 |
+
displayUrl = rawUrl;
|
| 428 |
+
if (currentStep === 'greenFilter' && greenApplied && rawUrl) {
|
| 429 |
+
try {
|
| 430 |
+
displayUrl = await applyGreenFilter(rawUrl, 'dataUrl') as string;
|
| 431 |
+
} catch {
|
| 432 |
+
displayUrl = rawUrl;
|
| 433 |
+
}
|
| 434 |
}
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
const newCapture: CapturedItem = {
|
| 438 |
+
id: Date.now().toString(),
|
| 439 |
+
type: 'video',
|
| 440 |
+
url: displayUrl,
|
| 441 |
+
originalUrl: rawUrl,
|
| 442 |
+
timestamp: new Date()
|
| 443 |
+
};
|
| 444 |
+
setCapturedItems(prev => ({
|
| 445 |
+
...prev,
|
| 446 |
+
[currentStep]: [...prev[currentStep], newCapture]
|
| 447 |
+
}));
|
| 448 |
+
};
|
| 449 |
+
|
| 450 |
+
const handleUploadClick = () => {
|
| 451 |
+
fileInputRef.current?.click();
|
| 452 |
+
};
|
| 453 |
+
|
| 454 |
+
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
| 455 |
+
const files = e.target.files;
|
| 456 |
+
if (files && files.length > 0) {
|
| 457 |
+
Array.from(files).forEach(async (file) => {
|
| 458 |
+
const isVideo = file.type.startsWith('video/');
|
| 459 |
+
const objectUrl = URL.createObjectURL(file);
|
| 460 |
+
|
| 461 |
+
if (isVideo) {
|
| 462 |
+
const newCapture: CapturedItem = {
|
| 463 |
+
id: Date.now().toString() + Math.random(),
|
| 464 |
+
type: 'video',
|
| 465 |
+
url: objectUrl,
|
| 466 |
+
originalUrl: objectUrl,
|
| 467 |
+
timestamp: new Date()
|
| 468 |
+
};
|
| 469 |
+
console.log('File uploaded:', { name: file.name, type: file.type, isVideo, id: newCapture.id });
|
| 470 |
+
setCapturedItems(prev => ({
|
| 471 |
+
...prev,
|
| 472 |
+
[currentStep]: [...prev[currentStep], newCapture]
|
| 473 |
+
}));
|
| 474 |
+
} else {
|
| 475 |
+
// For images, always store raw objectUrl as originalUrl
|
| 476 |
+
let displayUrl = objectUrl;
|
| 477 |
+
if (currentStep === 'greenFilter' && greenApplied) {
|
| 478 |
+
try {
|
| 479 |
+
displayUrl = await applyGreenFilter(objectUrl, 'dataUrl') as string;
|
| 480 |
+
} catch (error) {
|
| 481 |
+
console.error('Error applying filter:', error);
|
| 482 |
+
displayUrl = objectUrl;
|
| 483 |
+
}
|
| 484 |
+
}
|
| 485 |
+
const newCapture: CapturedItem = {
|
| 486 |
+
id: Date.now().toString() + Math.random(),
|
| 487 |
+
type: 'image',
|
| 488 |
+
url: displayUrl,
|
| 489 |
+
originalUrl: objectUrl,
|
| 490 |
+
timestamp: new Date()
|
| 491 |
+
};
|
| 492 |
+
console.log('File uploaded:', { name: file.name, type: file.type, isVideo, id: newCapture.id });
|
| 493 |
+
setCapturedItems(prev => ({
|
| 494 |
+
...prev,
|
| 495 |
+
[currentStep]: [...prev[currentStep], newCapture]
|
| 496 |
+
}));
|
| 497 |
+
}
|
| 498 |
+
});
|
| 499 |
+
}
|
| 500 |
+
e.target.value = '';
|
| 501 |
+
};
|
| 502 |
+
|
| 503 |
+
const mapQualityLabel = (score: number) => {
|
| 504 |
+
if (score >= 0.8) return 'Excellent';
|
| 505 |
+
if (score >= 0.6) return 'Good';
|
| 506 |
+
return 'Bad';
|
| 507 |
+
};
|
| 508 |
+
|
| 509 |
+
const handleMainAIAssist = () => {
|
| 510 |
+
// Toggle continuous AI assist for live feed
|
| 511 |
+
setIsContinuousAIEnabled(prev => !prev);
|
| 512 |
+
if (!isContinuousAIEnabled) {
|
| 513 |
+
setLiveAIError(null);
|
| 514 |
+
setLiveAIResults(null);
|
| 515 |
+
}
|
| 516 |
};
|
| 517 |
|
| 518 |
const handleDeleteCapture = (id: string) => {
|
|
|
|
| 530 |
? capturedItems[currentStep].find(item => item.id === selectedImage)
|
| 531 |
: null;
|
| 532 |
|
| 533 |
+
// Debug: Log when selectedItem changes
|
| 534 |
+
useEffect(() => {
|
| 535 |
+
if (selectedItem) {
|
| 536 |
+
console.log('Selected item:', { id: selectedItem.id, type: selectedItem.type, url: selectedItem.url });
|
| 537 |
+
} else {
|
| 538 |
+
console.log('No item selected');
|
| 539 |
+
}
|
| 540 |
+
}, [selectedItem]);
|
| 541 |
+
|
| 542 |
const totalCaptures = capturedItems[currentStep].length;
|
| 543 |
const imageCaptures = capturedItems[currentStep].filter(item => item.type === 'image');
|
| 544 |
const videoCaptures = capturedItems[currentStep].filter(item => item.type === 'video');
|
|
|
|
| 594 |
{stage === 'Report' && <FileText className="w-4 h-4" />}
|
| 595 |
<span>{stage}</span>
|
| 596 |
</span>
|
| 597 |
+
|
| 598 |
</div>
|
| 599 |
{idx < 3 && <div className="w-1.5 h-1.5 rounded-full bg-gray-300 mx-1" />}
|
| 600 |
</div>
|
|
|
|
| 655 |
<h2 className="text-lg font-semibold text-slate-800 flex-1">
|
| 656 |
{steps.find(s => s.key === currentStep)?.label || 'Guided Capture'}
|
| 657 |
</h2>
|
| 658 |
+
<button
|
| 659 |
+
onClick={handleMainAIAssist}
|
| 660 |
+
disabled={isLiveAILoading}
|
| 661 |
+
className={`px-6 py-3 rounded-lg transition-all font-semibold flex items-center justify-center gap-2 shadow-lg hover:shadow-xl disabled:opacity-50 disabled:cursor-not-allowed min-w-max ${
|
| 662 |
+
isContinuousAIEnabled
|
| 663 |
+
? 'bg-gradient-to-r from-green-500 to-green-600 text-white hover:from-green-600 hover:to-green-700 animate-pulse'
|
| 664 |
+
: 'bg-gradient-to-r from-blue-600 to-blue-700 text-white hover:from-blue-700 hover:to-blue-800'
|
| 665 |
+
}`}
|
| 666 |
+
title={isContinuousAIEnabled ? 'Continuous AI quality checking is ON' : 'Enable continuous AI quality checking for live feed'}
|
| 667 |
+
>
|
| 668 |
+
<Sparkles className="h-5 w-5" />
|
| 669 |
+
{isContinuousAIEnabled ? 'AI Live ✓' : 'AI Assist'}
|
| 670 |
+
</button>
|
| 671 |
+
<div className="flex items-center gap-2">
|
| 672 |
+
<button
|
| 673 |
+
onClick={handleUploadClick}
|
| 674 |
+
className="h-8 px-3 bg-green-600 text-white hover:bg-green-700 rounded transition-colors flex items-center gap-2"
|
| 675 |
+
>
|
| 676 |
+
<Upload className="h-4 w-4" />
|
| 677 |
+
Upload
|
| 678 |
+
</button>
|
| 679 |
+
</div>
|
| 680 |
<button
|
| 681 |
onClick={() => {
|
| 682 |
const currentIndex = steps.findIndex(s => s.key === currentStep);
|
|
|
|
| 690 |
Next
|
| 691 |
<ArrowRight className="h-4 w-4" />
|
| 692 |
</button>
|
| 693 |
+
<input
|
| 694 |
+
ref={fileInputRef}
|
| 695 |
+
type="file"
|
| 696 |
+
accept="image/*,video/*"
|
| 697 |
+
multiple
|
| 698 |
+
className="hidden"
|
| 699 |
+
onChange={handleFileUpload}
|
| 700 |
+
/>
|
| 701 |
+
</div>
|
| 702 |
+
)}
|
| 703 |
+
|
| 704 |
+
{/* Quality Results Display */}
|
| 705 |
+
{liveAIResults && !isAnnotatingMode && !isCompareMode && currentStep !== 'report' && currentStep !== 'biopsyMarking' && (
|
| 706 |
+
<div className={`mb-4 p-4 rounded-lg border-2 transition-all ${
|
| 707 |
+
isContinuousAIEnabled
|
| 708 |
+
? 'bg-gradient-to-r from-green-50 to-emerald-50 border-green-300'
|
| 709 |
+
: 'bg-green-50 border-green-200'
|
| 710 |
+
}`}>
|
| 711 |
+
<div className="flex items-start gap-3">
|
| 712 |
+
<div className="flex-1">
|
| 713 |
+
<h3 className="font-semibold text-green-900 mb-3 flex items-center gap-2">
|
| 714 |
+
{isContinuousAIEnabled ? '🎥 Live Quality Monitoring' : 'Quality Check Results'}
|
| 715 |
+
{isContinuousAIEnabled && <span className="inline-block w-2 h-2 bg-green-500 rounded-full animate-pulse"></span>}
|
| 716 |
+
</h3>
|
| 717 |
+
<div className="space-y-3">
|
| 718 |
+
<div>
|
| 719 |
+
<div className="flex items-center justify-between mb-1">
|
| 720 |
+
<p className="text-sm text-gray-600">Cervix Detected</p>
|
| 721 |
+
<p className="text-sm font-medium text-green-700">
|
| 722 |
+
{liveAIResults.cervixDetected ? 'Yes' : 'No'} ({(liveAIResults.detectionConfidence * 100).toFixed(1)}%)
|
| 723 |
+
</p>
|
| 724 |
+
</div>
|
| 725 |
+
</div>
|
| 726 |
+
<div>
|
| 727 |
+
<div className="flex items-center justify-between mb-2">
|
| 728 |
+
<p className="text-sm text-gray-600">Image Quality</p>
|
| 729 |
+
<p className="text-sm font-semibold">
|
| 730 |
+
{liveAIResults.quality} ({(liveAIResults.qualityConfidence * 100).toFixed(1)}%)
|
| 731 |
+
</p>
|
| 732 |
+
</div>
|
| 733 |
+
{/* Progress Bar */}
|
| 734 |
+
<div className="w-full bg-gray-200 rounded-full h-3 overflow-hidden shadow-inner">
|
| 735 |
+
<div
|
| 736 |
+
className={`h-full rounded-full transition-all duration-500 ${
|
| 737 |
+
liveAIResults.qualityConfidence >= 0.8
|
| 738 |
+
? 'bg-gradient-to-r from-green-500 to-green-600'
|
| 739 |
+
: liveAIResults.qualityConfidence >= 0.6
|
| 740 |
+
? 'bg-gradient-to-r from-orange-400 to-orange-500'
|
| 741 |
+
: 'bg-gradient-to-r from-red-500 to-red-600'
|
| 742 |
+
}`}
|
| 743 |
+
style={{ width: `${Math.min(liveAIResults.qualityConfidence * 100, 100)}%` }}
|
| 744 |
+
/>
|
| 745 |
+
</div>
|
| 746 |
+
</div>
|
| 747 |
+
</div>
|
| 748 |
+
</div>
|
| 749 |
+
</div>
|
| 750 |
+
</div>
|
| 751 |
+
)}
|
| 752 |
+
|
| 753 |
+
{liveAIError && !isAnnotatingMode && !isCompareMode && currentStep !== 'report' && currentStep !== 'biopsyMarking' && (
|
| 754 |
+
<div className="mb-4 p-4 bg-red-50 border border-red-200 rounded-lg">
|
| 755 |
+
<p className="text-red-700 font-medium">{liveAIError}</p>
|
| 756 |
</div>
|
| 757 |
)}
|
| 758 |
|
|
|
|
| 798 |
<button
|
| 799 |
onClick={() => {
|
| 800 |
setIsAnnotatingMode(false);
|
|
|
|
| 801 |
}}
|
| 802 |
className="flex items-center gap-2 px-4 py-2 text-gray-600 hover:bg-gray-100 rounded-lg transition-colors"
|
| 803 |
>
|
| 804 |
<ArrowLeft className="w-4 h-4" />
|
| 805 |
Back to Live Feed
|
| 806 |
</button>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 807 |
</div>
|
| 808 |
|
| 809 |
<div>
|
| 810 |
{currentStep === 'acetowhite' ? (
|
| 811 |
<AceticAnnotator
|
| 812 |
ref={aceticAnnotatorRef}
|
| 813 |
+
imageUrls={imageCaptures.map(item => item.url)}
|
| 814 |
onAnnotationsChange={setAnnotations}
|
| 815 |
/>
|
| 816 |
) : (
|
| 817 |
<ImageAnnotator
|
| 818 |
ref={imageAnnotatorRef}
|
| 819 |
+
imageUrls={imageCaptures.map(item => item.url)}
|
| 820 |
onAnnotationsChange={setAnnotations}
|
| 821 |
/>
|
| 822 |
)}
|
| 823 |
</div>
|
| 824 |
</div>
|
| 825 |
+
) : (
|
| 826 |
// Live Feed View
|
| 827 |
<>
|
| 828 |
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6 mb-6">
|
|
|
|
| 832 |
{/* Live Video Feed */}
|
| 833 |
<div className="relative bg-gray-900 rounded-xl overflow-hidden shadow-2xl border-2 border-gray-700 mb-4">
|
| 834 |
<div className="aspect-video flex items-center justify-center">
|
| 835 |
+
{selectedItem ? (
|
| 836 |
+
selectedItem.type === 'video' ? (
|
| 837 |
+
<video
|
| 838 |
+
key={selectedItem.id}
|
| 839 |
+
src={selectedItem.url}
|
| 840 |
+
controls
|
| 841 |
+
autoPlay
|
| 842 |
+
style={{ width: '100%', height: '100%', objectFit: 'contain' }}
|
| 843 |
+
onError={(e) => console.error('Video playback error:', e)}
|
| 844 |
+
onLoadedMetadata={() => console.log('Video loaded:', selectedItem.url)}
|
| 845 |
+
/>
|
| 846 |
+
) : (
|
| 847 |
+
<img
|
| 848 |
+
src={selectedItem.url}
|
| 849 |
+
alt="Selected capture"
|
| 850 |
+
className="w-full h-full object-contain"
|
| 851 |
+
onError={(e) => console.error('Image load error:', e)}
|
| 852 |
+
onLoad={() => console.log('Image loaded:', selectedItem.url)}
|
| 853 |
+
/>
|
| 854 |
+
)
|
| 855 |
+
) : (
|
| 856 |
+
<>
|
| 857 |
+
<video
|
| 858 |
+
ref={videoRef}
|
| 859 |
+
src="/live.mp4"
|
| 860 |
+
autoPlay
|
| 861 |
+
loop
|
| 862 |
+
muted
|
| 863 |
+
crossOrigin="anonymous"
|
| 864 |
+
className="w-full h-full object-cover"
|
| 865 |
+
style={currentStep === 'greenFilter' && greenApplied ? { filter: 'saturate(0.3) hue-rotate(120deg) brightness(1.1)' } : {}}
|
| 866 |
+
/>
|
| 867 |
+
{currentStep === 'greenFilter' && greenApplied && (
|
| 868 |
+
<div className="absolute inset-0 bg-green-500 opacity-5 pointer-events-none" />
|
| 869 |
+
)}
|
| 870 |
+
</>
|
| 871 |
+
)}
|
| 872 |
+
{!selectedItem && (
|
| 873 |
+
<div className="absolute top-4 left-4 flex items-center gap-2 bg-red-500 text-white px-3 py-1 rounded-full text-sm font-semibold">
|
| 874 |
+
<div className={`w-2 h-2 rounded-full ${isRecording ? 'bg-white animate-pulse' : 'bg-white/70'}`} />
|
| 875 |
+
{isRecording ? 'Recording' : 'Live'}
|
| 876 |
+
</div>
|
| 877 |
+
)}
|
| 878 |
+
{selectedItem && (
|
| 879 |
+
<>
|
| 880 |
+
<button
|
| 881 |
+
onClick={() => {
|
| 882 |
+
console.log('Back to live feed clicked');
|
| 883 |
+
setSelectedImage(null);
|
| 884 |
+
}}
|
| 885 |
+
className="absolute top-4 left-4 bg-blue-600 text-white px-4 py-2 rounded-lg hover:bg-blue-700 transition-colors flex items-center gap-2"
|
| 886 |
+
>
|
| 887 |
+
<ArrowLeft className="w-4 h-4" />
|
| 888 |
+
Back to Live Feed
|
| 889 |
+
</button>
|
| 890 |
+
{selectedItem.type === 'image' && (
|
| 891 |
+
<button
|
| 892 |
+
onClick={handleCaptureImage}
|
| 893 |
+
className="absolute top-4 right-4 bg-[#05998c] text-white px-4 py-2 rounded-lg hover:bg-[#047569] transition-colors flex items-center gap-2 shadow-md font-semibold"
|
| 894 |
+
>
|
| 895 |
+
<Camera className="w-4 h-4" />
|
| 896 |
+
Capture This Image
|
| 897 |
+
</button>
|
| 898 |
+
)}
|
| 899 |
+
</>
|
| 900 |
+
)}
|
| 901 |
|
| 902 |
|
| 903 |
+
{!selectedItem && currentStep === 'acetowhite' && aceticApplied && (
|
| 904 |
<div className="absolute top-4 right-4 bg-black/70 text-white px-4 py-2 rounded-lg">
|
| 905 |
<p className="text-2xl font-mono font-bold">{formatTime(seconds)}</p>
|
| 906 |
</div>
|
| 907 |
)}
|
| 908 |
|
| 909 |
+
{!selectedItem && currentStep === 'lugol' && lugolApplied && (
|
| 910 |
<div className="absolute top-4 right-4 bg-black/70 text-white px-4 py-2 rounded-lg">
|
| 911 |
<p className="text-2xl font-mono font-bold">{formatTime(lugolSeconds)}</p>
|
| 912 |
</div>
|
| 913 |
)}
|
| 914 |
|
| 915 |
+
{!selectedItem && currentStep === 'acetowhite' && showFlash && (
|
| 916 |
<div className="absolute inset-0 bg-[#05998c]/30 animate-pulse flex items-center justify-center">
|
| 917 |
<div className="bg-white/90 px-6 py-4 rounded-lg">
|
| 918 |
<p className="text-2xl font-bold text-[#0A2540]">
|
|
|
|
| 922 |
</div>
|
| 923 |
)}
|
| 924 |
|
| 925 |
+
{!selectedItem && currentStep === 'lugol' && lugolShowFlash && (
|
| 926 |
<div className="absolute inset-0 bg-[#05998c]/30 animate-pulse flex items-center justify-center">
|
| 927 |
<div className="bg-white/90 px-6 py-4 rounded-lg">
|
| 928 |
<p className="text-2xl font-bold text-[#0A2540]">
|
|
|
|
| 1232 |
{imageCaptures.map(item => (
|
| 1233 |
<div key={item.id} className="relative group">
|
| 1234 |
<div
|
| 1235 |
+
onClick={() => {
|
| 1236 |
+
console.log('Clicked image thumbnail:', item.id);
|
| 1237 |
+
setSelectedImage(item.id);
|
| 1238 |
+
}}
|
| 1239 |
+
className={`aspect-square bg-gray-100 rounded-lg overflow-hidden border-2 transition-all cursor-pointer hover:border-blue-500 ${
|
| 1240 |
+
selectedImage === item.id ? 'border-blue-600 ring-2 ring-blue-300' : 'border-gray-200'
|
| 1241 |
+
}`}
|
| 1242 |
>
|
| 1243 |
<img src={item.url} alt="Capture" className="w-full h-full object-cover" />
|
| 1244 |
{item.annotations && item.annotations.length > 0 && (
|
|
|
|
| 1248 |
)}
|
| 1249 |
</div>
|
| 1250 |
<button
|
| 1251 |
+
onClick={(e) => {
|
| 1252 |
+
e.stopPropagation();
|
| 1253 |
+
handleDeleteCapture(item.id);
|
| 1254 |
+
}}
|
| 1255 |
className="absolute top-1 right-1 bg-red-500 text-white p-1 rounded opacity-0 group-hover:opacity-100 transition-opacity"
|
| 1256 |
>
|
| 1257 |
<X className="w-3 h-3" />
|
|
|
|
| 1268 |
<h4 className="text-xs font-semibold text-gray-500 uppercase mb-2">Videos ({videoCaptures.length})</h4>
|
| 1269 |
<div className="space-y-2">
|
| 1270 |
{videoCaptures.map(item => (
|
| 1271 |
+
<div
|
| 1272 |
+
key={item.id}
|
| 1273 |
+
onClick={() => {
|
| 1274 |
+
console.log('Clicked video thumbnail:', item.id);
|
| 1275 |
+
setSelectedImage(item.id);
|
| 1276 |
+
}}
|
| 1277 |
+
className={`relative group rounded-lg p-3 flex items-center gap-3 cursor-pointer transition-all ${
|
| 1278 |
+
selectedImage === item.id ? 'bg-blue-100 border-2 border-blue-600' : 'bg-gray-50 border-2 border-transparent hover:bg-gray-100'
|
| 1279 |
+
}`}
|
| 1280 |
+
>
|
| 1281 |
<div className="w-12 h-12 bg-gray-200 rounded flex items-center justify-center">
|
| 1282 |
<Video className="w-6 h-6 text-gray-500" />
|
| 1283 |
</div>
|
|
|
|
| 1286 |
<p className="text-xs text-gray-500">{item.timestamp.toLocaleTimeString()}</p>
|
| 1287 |
</div>
|
| 1288 |
<button
|
| 1289 |
+
onClick={(e) => {
|
| 1290 |
+
e.stopPropagation();
|
| 1291 |
+
handleDeleteCapture(item.id);
|
| 1292 |
+
}}
|
| 1293 |
className="p-1 hover:bg-red-50 rounded text-red-500"
|
| 1294 |
>
|
| 1295 |
<X className="w-4 h-4" />
|
|
|
|
| 1316 |
</div>
|
| 1317 |
)}
|
| 1318 |
</>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1319 |
)}
|
| 1320 |
</div>
|
| 1321 |
</div>
|
| 1322 |
+
|
| 1323 |
+
<canvas ref={canvasRef} className="hidden" />
|
| 1324 |
</div>
|
| 1325 |
);
|
| 1326 |
}
|
src/pages/LugolExamPage.tsx
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import { useState, useEffect } from 'react';
|
| 2 |
-
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, Save, ChevronRight } from 'lucide-react';
|
| 3 |
import { ImageAnnotator } from '../components/ImageAnnotator';
|
| 4 |
|
| 5 |
type CapturedItem = {
|
|
@@ -29,6 +29,9 @@ export function LugolExamPage({ goBack, onNext }: Props) {
|
|
| 29 |
const [showFlash, setShowFlash] = useState(false);
|
| 30 |
const audibleAlert = true;
|
| 31 |
const [timerPaused, setTimerPaused] = useState(false);
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
const cervixImageUrl = "/C87Aceto_(1).jpg";
|
| 34 |
|
|
@@ -126,6 +129,54 @@ export function LugolExamPage({ goBack, onNext }: Props) {
|
|
| 126 |
}
|
| 127 |
};
|
| 128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
const selectedItem = selectedImage
|
| 130 |
? capturedItems.find(item => item.id === selectedImage)
|
| 131 |
: null;
|
|
@@ -399,6 +450,40 @@ export function LugolExamPage({ goBack, onNext }: Props) {
|
|
| 399 |
{isRecording ? 'Stop' : 'Record'}
|
| 400 |
</button>
|
| 401 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 402 |
<button className="w-full flex items-center justify-center gap-2 px-4 py-3 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors">
|
| 403 |
Next
|
| 404 |
<ArrowRight className="w-4 h-4" />
|
|
|
|
| 1 |
import { useState, useEffect } from 'react';
|
| 2 |
+
import { Camera, Video, ArrowLeft, ArrowRight, CheckCircle2, Info, Pause, X, Edit2, RotateCcw, Save, ChevronRight, Sparkles } from 'lucide-react';
|
| 3 |
import { ImageAnnotator } from '../components/ImageAnnotator';
|
| 4 |
|
| 5 |
type CapturedItem = {
|
|
|
|
| 29 |
const [showFlash, setShowFlash] = useState(false);
|
| 30 |
const audibleAlert = true;
|
| 31 |
const [timerPaused, setTimerPaused] = useState(false);
|
| 32 |
+
const [isLiveAILoading, setIsLiveAILoading] = useState(false);
|
| 33 |
+
const [liveAIResults, setLiveAIResults] = useState<{ cervixDetected: boolean; quality: string; confidence: number } | null>(null);
|
| 34 |
+
const [liveAIError, setLiveAIError] = useState<string | null>(null);
|
| 35 |
|
| 36 |
const cervixImageUrl = "/C87Aceto_(1).jpg";
|
| 37 |
|
|
|
|
| 129 |
}
|
| 130 |
};
|
| 131 |
|
| 132 |
+
const mapQualityLabel = (score: number) => {
|
| 133 |
+
if (score >= 0.8) return 'Excellent';
|
| 134 |
+
if (score >= 0.6) return 'Good';
|
| 135 |
+
return 'Bad';
|
| 136 |
+
};
|
| 137 |
+
|
| 138 |
+
const handleLugolMainAIAssist = async () => {
|
| 139 |
+
setLiveAIError(null);
|
| 140 |
+
setLiveAIResults(null);
|
| 141 |
+
const imageItems = capturedItems.filter(item => item.type === 'image');
|
| 142 |
+
const targetItem = imageItems[0];
|
| 143 |
+
|
| 144 |
+
setIsLiveAILoading(true);
|
| 145 |
+
|
| 146 |
+
try {
|
| 147 |
+
const response = await fetch(targetItem ? targetItem.url : cervixImageUrl);
|
| 148 |
+
const blob = await response.blob();
|
| 149 |
+
|
| 150 |
+
const formData = new FormData();
|
| 151 |
+
formData.append('file', blob, 'image.jpg');
|
| 152 |
+
const backendResponse = await fetch('http://localhost:8000/infer/image', {
|
| 153 |
+
method: 'POST',
|
| 154 |
+
body: formData,
|
| 155 |
+
});
|
| 156 |
+
|
| 157 |
+
if (!backendResponse.ok) {
|
| 158 |
+
throw new Error(`Backend error: ${backendResponse.statusText}`);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
const result = await backendResponse.json();
|
| 162 |
+
|
| 163 |
+
const qualityScore = typeof result.quality_score === 'number'
|
| 164 |
+
? result.quality_score
|
| 165 |
+
: (typeof result.quality_percent === 'number' ? result.quality_percent / 100 : 0);
|
| 166 |
+
|
| 167 |
+
setLiveAIResults({
|
| 168 |
+
cervixDetected: Boolean(result.detected),
|
| 169 |
+
quality: mapQualityLabel(qualityScore),
|
| 170 |
+
confidence: qualityScore
|
| 171 |
+
});
|
| 172 |
+
setIsLiveAILoading(false);
|
| 173 |
+
} catch (error) {
|
| 174 |
+
console.error('Live AI assist error:', error);
|
| 175 |
+
setLiveAIError(error instanceof Error ? error.message : 'Failed to check image quality');
|
| 176 |
+
setIsLiveAILoading(false);
|
| 177 |
+
}
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
const selectedItem = selectedImage
|
| 181 |
? capturedItems.find(item => item.id === selectedImage)
|
| 182 |
: null;
|
|
|
|
| 450 |
{isRecording ? 'Stop' : 'Record'}
|
| 451 |
</button>
|
| 452 |
</div>
|
| 453 |
+
|
| 454 |
+
{/* Centered AI Assist Button */}
|
| 455 |
+
<button
|
| 456 |
+
onClick={handleLugolMainAIAssist}
|
| 457 |
+
disabled={isLiveAILoading}
|
| 458 |
+
className="w-full flex items-center justify-center gap-2 px-6 py-4 rounded-lg bg-gradient-to-r from-blue-600 to-blue-700 text-white font-bold hover:from-blue-700 hover:to-blue-800 transition-all shadow-lg hover:shadow-xl disabled:opacity-50 disabled:cursor-not-allowed text-base"
|
| 459 |
+
>
|
| 460 |
+
<Sparkles className="w-6 h-6" />
|
| 461 |
+
{isLiveAILoading ? 'Checking...' : 'AI Assist'}
|
| 462 |
+
</button>
|
| 463 |
+
|
| 464 |
+
{/* Live AI Results Panel */}
|
| 465 |
+
{liveAIResults && (
|
| 466 |
+
<div className="p-4 bg-green-50 border border-green-300 rounded-lg">
|
| 467 |
+
<div className="flex items-center gap-3 mb-3">
|
| 468 |
+
<div className="w-3 h-3 bg-green-500 rounded-full"></div>
|
| 469 |
+
<h4 className="font-bold text-green-800">Quality Check Results</h4>
|
| 470 |
+
</div>
|
| 471 |
+
<div className="space-y-2 text-sm">
|
| 472 |
+
<p className="text-gray-700">
|
| 473 |
+
<span className="font-semibold">Cervix Detected:</span> {liveAIResults.cervixDetected ? 'Yes' : 'No'} ({((liveAIResults.cervixDetected ? liveAIResults.confidence : 0) * 100).toFixed(1)}%)
|
| 474 |
+
</p>
|
| 475 |
+
<p className="text-gray-700">
|
| 476 |
+
<span className="font-semibold">Quality:</span> {liveAIResults.quality} ({(liveAIResults.confidence * 100).toFixed(1)}%)
|
| 477 |
+
</p>
|
| 478 |
+
</div>
|
| 479 |
+
</div>
|
| 480 |
+
)}
|
| 481 |
+
|
| 482 |
+
{liveAIError && (
|
| 483 |
+
<div className="text-sm text-red-600 bg-red-50 border border-red-200 rounded-lg p-3 text-center">
|
| 484 |
+
{liveAIError}
|
| 485 |
+
</div>
|
| 486 |
+
)}
|
| 487 |
<button className="w-full flex items-center justify-center gap-2 px-4 py-3 bg-gray-600 text-white rounded-lg font-semibold hover:bg-slate-700 transition-colors">
|
| 488 |
Next
|
| 489 |
<ArrowRight className="w-4 h-4" />
|
src/utils/filterUtils.ts
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Apply green filter to an image using Canvas API.
|
| 3 |
+
* Matches the CSS filter: saturate(0.3) hue-rotate(120deg) brightness(1.1)
|
| 4 |
+
* applied to the live video feed.
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
/**
|
| 8 |
+
* Applies the same pixel-level transformation as the live-feed CSS filter:
|
| 9 |
+
* saturate(0.3) → hue-rotate(120deg) → brightness(1.1)
|
| 10 |
+
*
|
| 11 |
+
* We compose the three standard color-matrix transforms in one pass.
|
| 12 |
+
*/
|
| 13 |
+
function applyLiveFeedFilter(data: Uint8ClampedArray): void {
|
| 14 |
+
// --- 1. Saturate(0.3) color matrix ---
|
| 15 |
+
// Standard W3C/SVG saturate matrix with s = 0.3
|
| 16 |
+
const s = 0.3;
|
| 17 |
+
const sr = (1 - s) * 0.2126;
|
| 18 |
+
const sg = (1 - s) * 0.7152;
|
| 19 |
+
const sb = (1 - s) * 0.0722;
|
| 20 |
+
|
| 21 |
+
const sat = [
|
| 22 |
+
sr + s, sg, sb,
|
| 23 |
+
sr, sg + s, sb,
|
| 24 |
+
sr, sg, sb + s,
|
| 25 |
+
];
|
| 26 |
+
|
| 27 |
+
// --- 2. Hue-rotate(120deg) color matrix ---
|
| 28 |
+
// Standard hue-rotation matrix for 120°
|
| 29 |
+
const cos120 = -0.5;
|
| 30 |
+
const sin120 = Math.sqrt(3) / 2; // ≈ 0.866
|
| 31 |
+
|
| 32 |
+
// W3C hue-rotate matrix components
|
| 33 |
+
const hue = [
|
| 34 |
+
0.213 + cos120 * 0.787 - sin120 * 0.213,
|
| 35 |
+
0.715 - cos120 * 0.715 - sin120 * 0.715,
|
| 36 |
+
0.072 - cos120 * 0.072 + sin120 * 0.928,
|
| 37 |
+
|
| 38 |
+
0.213 - cos120 * 0.213 + sin120 * 0.143,
|
| 39 |
+
0.715 + cos120 * 0.285 + sin120 * 0.140,
|
| 40 |
+
0.072 - cos120 * 0.072 - sin120 * 0.283,
|
| 41 |
+
|
| 42 |
+
0.213 - cos120 * 0.213 - sin120 * 0.787,
|
| 43 |
+
0.715 - cos120 * 0.715 + sin120 * 0.715,
|
| 44 |
+
0.072 + cos120 * 0.928 + sin120 * 0.072,
|
| 45 |
+
];
|
| 46 |
+
|
| 47 |
+
// --- 3. Compose sat × hue into one 3×3 matrix ---
|
| 48 |
+
// result[row*3+col] = sum_k sat[row*3+k] * hue[k*3+col]
|
| 49 |
+
const m = new Array(9).fill(0);
|
| 50 |
+
for (let row = 0; row < 3; row++) {
|
| 51 |
+
for (let col = 0; col < 3; col++) {
|
| 52 |
+
for (let k = 0; k < 3; k++) {
|
| 53 |
+
m[row * 3 + col] += sat[row * 3 + k] * hue[k * 3 + col];
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// --- 4. Brightness(1.1): just multiply final result by 1.1 ---
|
| 59 |
+
const brightness = 1.1;
|
| 60 |
+
|
| 61 |
+
for (let i = 0; i < data.length; i += 4) {
|
| 62 |
+
const r = data[i];
|
| 63 |
+
const g = data[i + 1];
|
| 64 |
+
const b = data[i + 2];
|
| 65 |
+
|
| 66 |
+
data[i] = Math.min(255, Math.max(0, Math.round((m[0] * r + m[1] * g + m[2] * b) * brightness)));
|
| 67 |
+
data[i + 1] = Math.min(255, Math.max(0, Math.round((m[3] * r + m[4] * g + m[5] * b) * brightness)));
|
| 68 |
+
data[i + 2] = Math.min(255, Math.max(0, Math.round((m[6] * r + m[7] * g + m[8] * b) * brightness)));
|
| 69 |
+
// Alpha unchanged
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
/**
|
| 74 |
+
* Apply green filter to an image (async, loads from URL/dataURL).
|
| 75 |
+
* The result matches the live-feed CSS filter exactly.
|
| 76 |
+
*/
|
| 77 |
+
export async function applyGreenFilter(
|
| 78 |
+
imageSrc: string,
|
| 79 |
+
outputFormat: 'dataUrl' | 'blob' = 'dataUrl'
|
| 80 |
+
): Promise<string | Blob> {
|
| 81 |
+
return new Promise((resolve, reject) => {
|
| 82 |
+
const img = new Image();
|
| 83 |
+
// Only set crossOrigin for remote http URLs; blob:// and data:// URLs
|
| 84 |
+
// don't support the CORS attribute and will fail silently if it is set.
|
| 85 |
+
if (imageSrc.startsWith('http')) {
|
| 86 |
+
img.crossOrigin = 'anonymous';
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
img.onload = () => {
|
| 90 |
+
try {
|
| 91 |
+
const canvas = document.createElement('canvas');
|
| 92 |
+
canvas.width = img.width;
|
| 93 |
+
canvas.height = img.height;
|
| 94 |
+
|
| 95 |
+
const ctx = canvas.getContext('2d');
|
| 96 |
+
if (!ctx) {
|
| 97 |
+
reject(new Error('Could not get canvas context'));
|
| 98 |
+
return;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
ctx.drawImage(img, 0, 0);
|
| 102 |
+
|
| 103 |
+
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
| 104 |
+
applyLiveFeedFilter(imageData.data);
|
| 105 |
+
ctx.putImageData(imageData, 0, 0);
|
| 106 |
+
|
| 107 |
+
if (outputFormat === 'blob') {
|
| 108 |
+
canvas.toBlob(
|
| 109 |
+
(blob) => {
|
| 110 |
+
if (blob) {
|
| 111 |
+
resolve(blob);
|
| 112 |
+
} else {
|
| 113 |
+
reject(new Error('Could not convert canvas to blob'));
|
| 114 |
+
}
|
| 115 |
+
},
|
| 116 |
+
'image/png'
|
| 117 |
+
);
|
| 118 |
+
} else {
|
| 119 |
+
resolve(canvas.toDataURL('image/png'));
|
| 120 |
+
}
|
| 121 |
+
} catch (error) {
|
| 122 |
+
reject(error);
|
| 123 |
+
}
|
| 124 |
+
};
|
| 125 |
+
|
| 126 |
+
img.onerror = () => {
|
| 127 |
+
reject(new Error(`Failed to load image: ${imageSrc}`));
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
img.src = imageSrc;
|
| 131 |
+
});
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
/**
|
| 135 |
+
* Apply green filter to an already-drawn canvas (e.g. captured video frame).
|
| 136 |
+
* Matches the live-feed CSS filter: saturate(0.3) hue-rotate(120deg) brightness(1.1)
|
| 137 |
+
*/
|
| 138 |
+
export function applyGreenFilterToCanvas(
|
| 139 |
+
canvas: HTMLCanvasElement,
|
| 140 |
+
ctx: CanvasRenderingContext2D
|
| 141 |
+
): void {
|
| 142 |
+
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
| 143 |
+
applyLiveFeedFilter(imageData.data);
|
| 144 |
+
ctx.putImageData(imageData, 0, 0);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/**
|
| 148 |
+
* Check if an image source is valid
|
| 149 |
+
*/
|
| 150 |
+
export function isValidImageSource(src: string): boolean {
|
| 151 |
+
return !!(src && (src.startsWith('data:') || src.startsWith('http') || src.startsWith('/')));
|
| 152 |
+
}
|