M Bilal Naeem commited on
Commit
edb77cb
·
1 Parent(s): adf0afd

final tuning for best results

Browse files
.gitattributes CHANGED
@@ -19,7 +19,6 @@
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pickle filter=lfs diff=lfs merge=lfs -text
21
  *.pkl filter=lfs diff=lfs merge=lfs -text
 
22
  *.pth filter=lfs diff=lfs merge=lfs -text
23
  *.rar filter=lfs diff=lfs merge=lfs -text
24
  *.safetensors filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -25,7 +25,7 @@ logs/
25
  *.swo
26
 
27
  # Ignore dataset and large model files
28
- ./app/models/
29
 
30
  # Ignore temp files generated during inference
31
  temp_*.png
 
25
  *.swo
26
 
27
  # Ignore dataset and large model files
28
+ app/models/
29
 
30
  # Ignore temp files generated during inference
31
  temp_*.png
app/__pycache__/main.cpython-39.pyc CHANGED
Binary files a/app/__pycache__/main.cpython-39.pyc and b/app/__pycache__/main.cpython-39.pyc differ
 
app/main.py CHANGED
@@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
18
  # Enable CORS
19
  app.add_middleware(
20
  CORSMiddleware,
21
- allow_origins=["https://ai-powered-skin-care-ecommerce.vercel.app"],
22
  allow_credentials=True,
23
  allow_methods=["*"],
24
  allow_headers=["*"],
 
18
  # Enable CORS
19
  app.add_middleware(
20
  CORSMiddleware,
21
+ allow_origins=["https://ai-powered-skin-care-ecommerce.vercel.app","http://localhost:5173"],
22
  allow_credentials=True,
23
  allow_methods=["*"],
24
  allow_headers=["*"],
app/routes/__pycache__/__init__.cpython-39.pyc CHANGED
Binary files a/app/routes/__pycache__/__init__.cpython-39.pyc and b/app/routes/__pycache__/__init__.cpython-39.pyc differ
 
app/routes/__pycache__/acne.cpython-39.pyc CHANGED
Binary files a/app/routes/__pycache__/acne.cpython-39.pyc and b/app/routes/__pycache__/acne.cpython-39.pyc differ
 
app/routes/__pycache__/inpainting.cpython-39.pyc CHANGED
Binary files a/app/routes/__pycache__/inpainting.cpython-39.pyc and b/app/routes/__pycache__/inpainting.cpython-39.pyc differ
 
app/routes/__pycache__/puffy_eyes.cpython-39.pyc CHANGED
Binary files a/app/routes/__pycache__/puffy_eyes.cpython-39.pyc and b/app/routes/__pycache__/puffy_eyes.cpython-39.pyc differ
 
app/services/__pycache__/__init__.cpython-39.pyc CHANGED
Binary files a/app/services/__pycache__/__init__.cpython-39.pyc and b/app/services/__pycache__/__init__.cpython-39.pyc differ
 
app/services/__pycache__/acne_service.cpython-39.pyc CHANGED
Binary files a/app/services/__pycache__/acne_service.cpython-39.pyc and b/app/services/__pycache__/acne_service.cpython-39.pyc differ
 
app/services/__pycache__/inpainting_service.cpython-39.pyc CHANGED
Binary files a/app/services/__pycache__/inpainting_service.cpython-39.pyc and b/app/services/__pycache__/inpainting_service.cpython-39.pyc differ
 
app/services/__pycache__/puffy_eyes_service.cpython-39.pyc CHANGED
Binary files a/app/services/__pycache__/puffy_eyes_service.cpython-39.pyc and b/app/services/__pycache__/puffy_eyes_service.cpython-39.pyc differ
 
app/services/acne_service.py CHANGED
@@ -13,7 +13,7 @@ MODEL_PATH = "app/models/acne_model.pt"
13
  model = AutoDetectionModel.from_pretrained(
14
  model_type='yolov8',
15
  model_path=MODEL_PATH,
16
- confidence_threshold=0.3,
17
  device="cpu"
18
  )
19
 
@@ -26,11 +26,11 @@ def predict_acne(image: Image.Image):
26
 
27
 
28
  # Dynamically calculate slice size (approx. 1/3rd of the image dimension)
29
- slice_height = 256
30
- slice_width = 256
31
  # Dynamically calculate overlap (e.g., 20% of slice size as ratio to image size)
32
- overlap_height_ratio = 0.2
33
- overlap_width_ratio = 0.2
34
 
35
  logging.info(f"[SAHI] Using slice size ({slice_width}x{slice_height}) with overlap ({overlap_width_ratio:.2f}, {overlap_height_ratio:.2f})")
36
 
 
13
  model = AutoDetectionModel.from_pretrained(
14
  model_type='yolov8',
15
  model_path=MODEL_PATH,
16
+ confidence_threshold=0.25,
17
  device="cpu"
18
  )
19
 
 
26
 
27
 
28
  # Dynamically calculate slice size (approx. 1/3rd of the image dimension)
29
+ slice_height = 320
30
+ slice_width = 320
31
  # Dynamically calculate overlap (e.g., 20% of slice size as ratio to image size)
32
+ overlap_height_ratio = 0.3
33
+ overlap_width_ratio = 0.3
34
 
35
  logging.info(f"[SAHI] Using slice size ({slice_width}x{slice_height}) with overlap ({overlap_width_ratio:.2f}, {overlap_height_ratio:.2f})")
36
 
app/services/puffy_eyes_service.py CHANGED
@@ -10,11 +10,10 @@ model = YOLO(MODEL_PATH)
10
 
11
  # Set confidence threshold
12
  CONFIDENCE_THRESHOLD = 0.25
13
- MAX_SHOW = 2 # only show up to 2 detections
14
 
15
  def predict_puffy_eyes(image: Image):
16
- """Run YOLO model on the input image, draw up to MAX_SHOW highest-confidence boxes,
17
- and return detections + processed image plus max_confidence if >2 initial boxes."""
18
 
19
  # Run YOLO detection
20
  results = model(image, conf=CONFIDENCE_THRESHOLD)
@@ -27,30 +26,31 @@ def predict_puffy_eyes(image: Image):
27
  x1, y1, x2, y2 = [float(c) for c in box.xyxy[0]]
28
  all_dets.append({"confidence": conf, "bbox": [x1, y1, x2, y2]})
29
 
30
- # Sort by confidence descending
 
 
 
 
 
 
 
 
 
 
31
  all_dets.sort(key=lambda d: d["confidence"], reverse=True)
32
 
33
- # Keep only top MAX_SHOW
34
- keep = all_dets[:MAX_SHOW]
35
- extra = all_dets[MAX_SHOW:]
36
- if extra:
37
- max_conf = extra[0]["confidence"]
38
- else:
39
- max_conf = None
40
-
41
- # Draw only kept boxes
42
  draw = ImageDraw.Draw(image)
43
- for det in keep:
44
  x1, y1, x2, y2 = det["bbox"]
45
  draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
46
 
47
-
48
  # Encode annotated image
49
  buf = io.BytesIO()
50
  image.save(buf, format="JPEG")
51
  encoded_image = base64.b64encode(buf.getvalue()).decode("utf-8")
52
 
53
- out = {"detections": keep, "labeled_image": encoded_image}
54
- if max_conf is not None:
55
- out["max_confidence"] = max_conf
56
- return out
 
10
 
11
  # Set confidence threshold
12
  CONFIDENCE_THRESHOLD = 0.25
13
+ MAX_SHOW = 2 # Only allow 2 detections
14
 
15
  def predict_puffy_eyes(image: Image):
16
+ """Run YOLO model, only return if exactly 2 detections."""
 
17
 
18
  # Run YOLO detection
19
  results = model(image, conf=CONFIDENCE_THRESHOLD)
 
26
  x1, y1, x2, y2 = [float(c) for c in box.xyxy[0]]
27
  all_dets.append({"confidence": conf, "bbox": [x1, y1, x2, y2]})
28
 
29
+ # Only continue if exactly 2 detections
30
+ if len(all_dets) != 2:
31
+ logging.info(f"[Puffy Eyes] Skipping: found {len(all_dets)} detections (need exactly 2)")
32
+ return {
33
+ "detections": [],
34
+ "labeled_image": None,
35
+ "max_confidence": None,
36
+ "message": f"Expected 2 puffy eye detections, but found {len(all_dets)}"
37
+ }
38
+
39
+ # Sort by confidence
40
  all_dets.sort(key=lambda d: d["confidence"], reverse=True)
41
 
42
+ # Draw the two boxes
 
 
 
 
 
 
 
 
43
  draw = ImageDraw.Draw(image)
44
+ for det in all_dets:
45
  x1, y1, x2, y2 = det["bbox"]
46
  draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
47
 
 
48
  # Encode annotated image
49
  buf = io.BytesIO()
50
  image.save(buf, format="JPEG")
51
  encoded_image = base64.b64encode(buf.getvalue()).decode("utf-8")
52
 
53
+ return {
54
+ "detections": all_dets,
55
+ "labeled_image": encoded_image
56
+ }
app/services/skin_analysis_service.py CHANGED
@@ -16,7 +16,7 @@ from app.services.acne_severity_service import predict_acne_severity
16
  from app.services.skin_type_service import predict_skin_type
17
 
18
  # Define the size to which images will be resized for model input
19
- MODEL_INPUT_SIZE = (512, 512)
20
 
21
  def skin_analysis(image: Image.Image) -> Dict[str, Any]:
22
  try:
 
16
  from app.services.skin_type_service import predict_skin_type
17
 
18
  # Define the size to which images will be resized for model input
19
+ MODEL_INPUT_SIZE = (640, 640)
20
 
21
  def skin_analysis(image: Image.Image) -> Dict[str, Any]:
22
  try: