HardikUppal commited on
Commit
953a2bf
·
1 Parent(s): 89aada1

clean up changes

Browse files
Files changed (8) hide show
  1. src/analyze.py +17 -14
  2. src/app.py +0 -105
  3. src/eyes_utils.py +1 -1
  4. src/gradio_app.py +0 -105
  5. src/hair_utils.py +1 -1
  6. src/image.py +24 -8
  7. src/main.py +37 -24
  8. src/skin_utils.py +3 -2
src/analyze.py CHANGED
@@ -3,10 +3,10 @@ import numpy as np
3
  import math
4
  from sklearn.cluster import KMeans
5
  from collections import Counter
6
- from color_utils import rgb_to_hex, hex_to_bgr, hex_to_rgb
7
- from hair_utils import HairColorPalette
8
- from skin_utils import SkinTonePalette
9
- from eyes_utils import EyeColorPalette
10
  from sklearn.metrics import silhouette_score
11
  import matplotlib.pyplot as plt
12
 
@@ -308,7 +308,7 @@ def create_combined_overlay(image, hair_mask, skin_mask, eye_mask):
308
  # Color the masks
309
  hair_overlay[hair_mask > 0] = [0, 255, 0] # Green for hair
310
  skin_overlay[skin_mask > 0] = [255, 0, 0] # Red for skin
311
- eye_overlay[eye_mask > 0] = [0, 0, 255] # Blue for eyes
312
 
313
  # Combine the overlays with the original image
314
  combined_overlay = cv2.addWeighted(image, 0.8, hair_overlay, 0.2, 0)
@@ -318,7 +318,6 @@ def create_combined_overlay(image, hair_mask, skin_mask, eye_mask):
318
  return combined_overlay
319
 
320
 
321
-
322
  def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
323
  image_np = np.array(image)
324
  hair_mask_np = np.array(hair_mask)
@@ -352,7 +351,9 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
352
  # Calculate ITA for the dominant skin color
353
  dominant_skin_color = skin_dominant_colors[0]
354
  ita = skin_palette.calculate_ita(dominant_skin_color)
355
- vectorscope_check = skin_palette.is_within_vectorscope_skin_tone_line(dominant_skin_color)
 
 
356
 
357
  eye_palette = EyeColorPalette()
358
  eye_dominant_colors, eye_dominant_percentages = get_dominant_colors(
@@ -362,7 +363,9 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
362
  eye_dominant_colors, eye_palette.palette
363
  )
364
 
365
- combined_overlay = create_combined_overlay(image_np, hair_mask_np, skin_mask_np, eye_mask_np)
 
 
366
 
367
  bar_width = 50
368
  hair_color_bar = create_dominant_color_bar(
@@ -379,19 +382,19 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
379
  image_np,
380
  list(hair_palette.palette.keys()).index(hair_color),
381
  hair_palette.palette,
382
- bar_width
383
  )
384
  skin_palette_bar = create_tone_palette_bar(
385
  image_np,
386
  list(skin_palette.palette.keys()).index(skin_color),
387
  skin_palette.palette,
388
- bar_width
389
  )
390
  eye_palette_bar = create_tone_palette_bar(
391
  image_np,
392
  list(eye_palette.palette.keys()).index(eye_color),
393
  eye_palette.palette,
394
- bar_width
395
  )
396
 
397
  output_image = np.hstack(
@@ -411,14 +414,14 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
411
  hair_dominant_percentages,
412
  hair_hex,
413
  hair_distance,
414
- img_shape
415
  )
416
  msg_bar_skin = create_message_bar(
417
  skin_dominant_colors,
418
  skin_dominant_percentages,
419
  skin_hex,
420
  skin_distance,
421
- img_shape
422
  )
423
  msg_bar_eye = create_message_bar(
424
  eye_dominant_colors, eye_dominant_percentages, eye_hex, eye_distance, img_shape
@@ -452,4 +455,4 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
452
  },
453
  }
454
 
455
- return output_image, analysis_record
 
3
  import math
4
  from sklearn.cluster import KMeans
5
  from collections import Counter
6
+ from src.color_utils import rgb_to_hex, hex_to_bgr, hex_to_rgb
7
+ from src.hair_utils import HairColorPalette
8
+ from src.skin_utils import SkinTonePalette
9
+ from src.eyes_utils import EyeColorPalette
10
  from sklearn.metrics import silhouette_score
11
  import matplotlib.pyplot as plt
12
 
 
308
  # Color the masks
309
  hair_overlay[hair_mask > 0] = [0, 255, 0] # Green for hair
310
  skin_overlay[skin_mask > 0] = [255, 0, 0] # Red for skin
311
+ eye_overlay[eye_mask > 0] = [0, 0, 255] # Blue for eyes
312
 
313
  # Combine the overlays with the original image
314
  combined_overlay = cv2.addWeighted(image, 0.8, hair_overlay, 0.2, 0)
 
318
  return combined_overlay
319
 
320
 
 
321
  def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
322
  image_np = np.array(image)
323
  hair_mask_np = np.array(hair_mask)
 
351
  # Calculate ITA for the dominant skin color
352
  dominant_skin_color = skin_dominant_colors[0]
353
  ita = skin_palette.calculate_ita(dominant_skin_color)
354
+ vectorscope_check = skin_palette.is_within_vectorscope_skin_tone_line(
355
+ dominant_skin_color
356
+ )
357
 
358
  eye_palette = EyeColorPalette()
359
  eye_dominant_colors, eye_dominant_percentages = get_dominant_colors(
 
363
  eye_dominant_colors, eye_palette.palette
364
  )
365
 
366
+ combined_overlay = create_combined_overlay(
367
+ image_np, hair_mask_np, skin_mask_np, eye_mask_np
368
+ )
369
 
370
  bar_width = 50
371
  hair_color_bar = create_dominant_color_bar(
 
382
  image_np,
383
  list(hair_palette.palette.keys()).index(hair_color),
384
  hair_palette.palette,
385
+ bar_width,
386
  )
387
  skin_palette_bar = create_tone_palette_bar(
388
  image_np,
389
  list(skin_palette.palette.keys()).index(skin_color),
390
  skin_palette.palette,
391
+ bar_width,
392
  )
393
  eye_palette_bar = create_tone_palette_bar(
394
  image_np,
395
  list(eye_palette.palette.keys()).index(eye_color),
396
  eye_palette.palette,
397
+ bar_width,
398
  )
399
 
400
  output_image = np.hstack(
 
414
  hair_dominant_percentages,
415
  hair_hex,
416
  hair_distance,
417
+ img_shape,
418
  )
419
  msg_bar_skin = create_message_bar(
420
  skin_dominant_colors,
421
  skin_dominant_percentages,
422
  skin_hex,
423
  skin_distance,
424
+ img_shape,
425
  )
426
  msg_bar_eye = create_message_bar(
427
  eye_dominant_colors, eye_dominant_percentages, eye_hex, eye_distance, img_shape
 
455
  },
456
  }
457
 
458
+ return output_image, analysis_record
src/app.py DELETED
@@ -1,105 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
- from PIL import Image
5
-
6
- from skin_analyzer import analyze_skin_function
7
- from image import ImageBundle
8
-
9
-
10
- def process_image(
11
- image,
12
- l_min_skin,
13
- l_max_skin,
14
- l_min_tonality,
15
- l_max_tonality,
16
- chroma_thresh,
17
- analyze_skin,
18
- analyze_eyes,
19
- analyze_hair,
20
- ):
21
- """
22
- Process the uploaded image and return the analysis results based on selected analyses.
23
- :param image: Uploaded image.
24
- :param analyze_skin: Whether to perform skin analysis.
25
- :param analyze_eyes: Whether to perform eye analysis.
26
- :param analyze_hair: Whether to perform hair analysis.
27
- :return: Analysis results.
28
- """
29
- image_bundle = ImageBundle(image_array=image)
30
-
31
- # Detect faces and landmarks
32
- face_data = image_bundle.detect_faces_and_landmarks()
33
- landmarks = image_bundle.detect_face_landmarks()
34
-
35
- # Perform segmentation
36
- segmentation_maps = image_bundle.segment_image()
37
-
38
- analysis_results = {}
39
- # overlay_images = []
40
-
41
- # if analyze_hair:
42
- # hair_mask = segmentation_maps["hair_mask"]
43
- # # analysis_results['hair_analysis'] = analyze_hair_function(image, hair_mask)
44
- # overlay_images.append(segmentation_maps["hair_mask"])
45
-
46
- if analyze_skin:
47
- skin_mask = segmentation_maps["face_skin_mask"]
48
- skin_analysis = analyze_skin_function(
49
- image,
50
- skin_mask,
51
- l_min_skin,
52
- l_max_skin,
53
- l_min_tonality,
54
- l_max_tonality,
55
- chroma_thresh,
56
- )
57
- if "filtered_skin_mask" in skin_analysis:
58
- filtered_skin_mask = skin_analysis["filtered_skin_mask"]
59
- del skin_analysis["filtered_skin_mask"]
60
- analysis_results["skin_analysis"] = skin_analysis
61
-
62
- # overlay_images.append(skin_analysis["overlay_image"])
63
-
64
- # if analyze_eyes:
65
- # eye_mask = segmentation_maps["right_iris_mask"] | segmentation_maps["left_iris_mask"]
66
- # # analysis_results['eye_analysis'] = analyze_eye_function(image, eye_mask)
67
- # overlay_images.append(eye_mask)
68
-
69
- # Combine overlay images
70
- overlay = image.copy()
71
- overlay[filtered_skin_mask > 0] = (0, 0, 255) # Red for skin
72
- # overlay[cool_mask > 0] = (255, 0, 0) # Blue for cool
73
- # overlay[neutral_mask > 0] = (0, 255, 0) # Green for neutral
74
- overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
75
-
76
- # Convert combined_overlay to PIL Image for display
77
- combined_overlay = Image.fromarray(overlay)
78
-
79
- return combined_overlay, analysis_results
80
-
81
-
82
- # Define Gradio interface
83
- iface = gr.Interface(
84
- fn=process_image,
85
- inputs=[
86
- gr.Image(type="numpy", label="Upload an Image"),
87
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Skin"),
88
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Skin"),
89
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Tonality"),
90
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Tonality"),
91
- gr.Slider(minimum=0, maximum=255, value=0, label="Chroma Threshold"),
92
- gr.Checkbox(label="Skin Analysis", value=True),
93
- gr.Checkbox(label="Eye Analysis", value=False),
94
- gr.Checkbox(label="Hair Analysis", value=False),
95
- ],
96
- outputs=[
97
- gr.Image(type="pil", label="Processed Image"),
98
- gr.JSON(label="Analysis Results"),
99
- ],
100
- title="Color Palette Analysis",
101
- description="Upload an image to analyze the skin, hair, and eye colors. Select the analyses you want to perform.",
102
- )
103
-
104
- # Launch the Gradio interface
105
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/eyes_utils.py CHANGED
@@ -2,7 +2,7 @@ from PIL import Image
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
- from color_utils import calculate_color_distance_lab
6
 
7
 
8
  class EyeColorPalette:
 
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
+ from src.color_utils import calculate_color_distance_lab
6
 
7
 
8
  class EyeColorPalette:
src/gradio_app.py DELETED
@@ -1,105 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
- from PIL import Image
5
-
6
- from skin_analyzer import analyze_skin_function
7
- from image import ImageBundle
8
-
9
-
10
- def process_image(
11
- image,
12
- l_min_skin,
13
- l_max_skin,
14
- l_min_tonality,
15
- l_max_tonality,
16
- chroma_thresh,
17
- analyze_skin,
18
- analyze_eyes,
19
- analyze_hair,
20
- ):
21
- """
22
- Process the uploaded image and return the analysis results based on selected analyses.
23
- :param image: Uploaded image.
24
- :param analyze_skin: Whether to perform skin analysis.
25
- :param analyze_eyes: Whether to perform eye analysis.
26
- :param analyze_hair: Whether to perform hair analysis.
27
- :return: Analysis results.
28
- """
29
- image_bundle = ImageBundle(image_array=image)
30
-
31
- # Detect faces and landmarks
32
- face_data = image_bundle.detect_faces_and_landmarks()
33
- landmarks = image_bundle.detect_face_landmarks()
34
-
35
- # Perform segmentation
36
- segmentation_maps = image_bundle.segment_image()
37
-
38
- analysis_results = {}
39
- # overlay_images = []
40
-
41
- # if analyze_hair:
42
- # hair_mask = segmentation_maps["hair_mask"]
43
- # # analysis_results['hair_analysis'] = analyze_hair_function(image, hair_mask)
44
- # overlay_images.append(segmentation_maps["hair_mask"])
45
-
46
- if analyze_skin:
47
- skin_mask = segmentation_maps["face_skin_mask"]
48
- skin_analysis = analyze_skin_function(
49
- image,
50
- skin_mask,
51
- l_min_skin,
52
- l_max_skin,
53
- l_min_tonality,
54
- l_max_tonality,
55
- chroma_thresh,
56
- )
57
- if "filtered_skin_mask" in skin_analysis:
58
- filtered_skin_mask = skin_analysis["filtered_skin_mask"]
59
- del skin_analysis["filtered_skin_mask"]
60
- analysis_results["skin_analysis"] = skin_analysis
61
-
62
- # overlay_images.append(skin_analysis["overlay_image"])
63
-
64
- # if analyze_eyes:
65
- # eye_mask = segmentation_maps["right_iris_mask"] | segmentation_maps["left_iris_mask"]
66
- # # analysis_results['eye_analysis'] = analyze_eye_function(image, eye_mask)
67
- # overlay_images.append(eye_mask)
68
-
69
- # Combine overlay images
70
- overlay = image.copy()
71
- overlay[filtered_skin_mask > 0] = (0, 0, 255) # Red for skin
72
- # overlay[cool_mask > 0] = (255, 0, 0) # Blue for cool
73
- # overlay[neutral_mask > 0] = (0, 255, 0) # Green for neutral
74
- overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
75
-
76
- # Convert combined_overlay to PIL Image for display
77
- combined_overlay = Image.fromarray(overlay)
78
-
79
- return combined_overlay, analysis_results
80
-
81
-
82
- # Define Gradio interface
83
- iface = gr.Interface(
84
- fn=process_image,
85
- inputs=[
86
- gr.Image(type="numpy", label="Upload an Image"),
87
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Skin"),
88
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Skin"),
89
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Tonality"),
90
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Tonality"),
91
- gr.Slider(minimum=0, maximum=255, value=0, label="Chroma Threshold"),
92
- gr.Checkbox(label="Skin Analysis", value=True),
93
- gr.Checkbox(label="Eye Analysis", value=False),
94
- gr.Checkbox(label="Hair Analysis", value=False),
95
- ],
96
- outputs=[
97
- gr.Image(type="pil", label="Processed Image"),
98
- gr.JSON(label="Analysis Results"),
99
- ],
100
- title="Color Palette Analysis",
101
- description="Upload an image to analyze the skin, hair, and eye colors. Select the analyses you want to perform.",
102
- )
103
-
104
- # Launch the Gradio interface
105
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/hair_utils.py CHANGED
@@ -2,7 +2,7 @@ from PIL import Image
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
- from color_utils import calculate_color_distance_lab
6
 
7
 
8
  class HairColorPalette:
 
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
+ from src.color_utils import calculate_color_distance_lab
6
 
7
 
8
  class HairColorPalette:
src/image.py CHANGED
@@ -5,14 +5,20 @@ from PIL import Image, ExifTags, ImageCms
5
  from io import BytesIO
6
  import requests
7
  import pillow_heif
8
- from segmentation_utils import detect_faces_and_landmarks, detect_face_landmarks, mediapipe_selfie_segmentor, create_feature_masks
9
- from utils import is_url, extract_filename_and_extension
 
 
 
 
 
10
 
11
  # Register HEIF opener
12
  pillow_heif.register_heif_opener()
13
 
14
  LOG = logging.getLogger(__name__)
15
 
 
16
  class ImageBundle:
17
  def __init__(self, image_source=None, image_array=None):
18
  """
@@ -149,21 +155,31 @@ class ImageBundle:
149
  """
150
  image_np = self.numpy_image()
151
  # save image to check bgr or rgb
152
- cv2.imwrite('workspace/image.jpg', image_np)
153
  masks = mediapipe_selfie_segmentor(image_np)
154
-
155
  # Detect face landmarks and create masks for individual features
156
  landmarks = self.detect_face_landmarks()
157
  feature_masks = create_feature_masks(image_np, landmarks)
158
 
159
  # Subtract feature masks from face skin mask
160
- for feature in ["lips_mask", "left_eyebrow_mask", "right_eyebrow_mask", "left_eye_mask", "right_eye_mask", "left_iris_mask", "right_iris_mask"]:
 
 
 
 
 
 
 
 
161
  if "iris" in feature:
162
  masks[feature] = feature_masks[feature]
163
- masks["face_skin_mask"] = cv2.subtract(masks["face_skin_mask"], feature_masks[feature])
 
 
164
 
165
  self.segmentation_maps = masks
166
-
167
  return self.segmentation_maps
168
 
169
  def numpy_image(self):
@@ -176,4 +192,4 @@ class ImageBundle:
176
  image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
177
  # import IPython; IPython.embed()
178
  # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
179
- return image
 
5
  from io import BytesIO
6
  import requests
7
  import pillow_heif
8
+ from src.segmentation_utils import (
9
+ detect_faces_and_landmarks,
10
+ detect_face_landmarks,
11
+ mediapipe_selfie_segmentor,
12
+ create_feature_masks,
13
+ )
14
+ from src.utils import is_url, extract_filename_and_extension
15
 
16
  # Register HEIF opener
17
  pillow_heif.register_heif_opener()
18
 
19
  LOG = logging.getLogger(__name__)
20
 
21
+
22
  class ImageBundle:
23
  def __init__(self, image_source=None, image_array=None):
24
  """
 
155
  """
156
  image_np = self.numpy_image()
157
  # save image to check bgr or rgb
158
+ cv2.imwrite("workspace/image.jpg", image_np)
159
  masks = mediapipe_selfie_segmentor(image_np)
160
+
161
  # Detect face landmarks and create masks for individual features
162
  landmarks = self.detect_face_landmarks()
163
  feature_masks = create_feature_masks(image_np, landmarks)
164
 
165
  # Subtract feature masks from face skin mask
166
+ for feature in [
167
+ "lips_mask",
168
+ "left_eyebrow_mask",
169
+ "right_eyebrow_mask",
170
+ "left_eye_mask",
171
+ "right_eye_mask",
172
+ "left_iris_mask",
173
+ "right_iris_mask",
174
+ ]:
175
  if "iris" in feature:
176
  masks[feature] = feature_masks[feature]
177
+ masks["face_skin_mask"] = cv2.subtract(
178
+ masks["face_skin_mask"], feature_masks[feature]
179
+ )
180
 
181
  self.segmentation_maps = masks
182
+
183
  return self.segmentation_maps
184
 
185
  def numpy_image(self):
 
192
  image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
193
  # import IPython; IPython.embed()
194
  # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
195
+ return image
src/main.py CHANGED
@@ -4,15 +4,20 @@ from typing import Union
4
  from argparse import ArgumentParser
5
  import cv2
6
  from PIL import Image
7
- from analyze import analyze_and_visualize
8
- from skin_analyzer import analyze_skin_function
9
- from image import ImageBundle
 
10
  import os
11
  import numpy as np
12
  import timeit
 
13
  LOG = logging.getLogger(__name__)
14
 
15
- def process_image(filename_or_url: Union[str, Path], analyze_skin, analyze_eyes, analyze_hair):
 
 
 
16
  """
17
  Process the image and return the result.
18
  :param filename_or_url: The filename (in local devices) or URL (in Internet) of the image.
@@ -26,40 +31,40 @@ def process_image(filename_or_url: Union[str, Path], analyze_skin, analyze_eyes,
26
  start = timeit.default_timer()
27
  image_bundle = ImageBundle(image_source=filename_or_url)
28
  stop = timeit.default_timer()
29
- print('load image time: ', stop - start)
30
-
31
  # Detect faces and landmarks
32
  start = timeit.default_timer()
33
  face_data = image_bundle.detect_faces_and_landmarks()
34
  stop = timeit.default_timer()
35
- print('detect faces and landmarks time: ', stop - start)
36
  start = timeit.default_timer()
37
  landmarks = image_bundle.detect_face_landmarks()
38
  stop = timeit.default_timer()
39
- print('detect face landmarks time: ', stop - start)
40
 
41
  start = timeit.default_timer()
42
  # Perform segmentation
43
  segmentation_maps = image_bundle.segment_image()
44
  stop = timeit.default_timer()
45
- print('segment image time: ', stop - start)
46
-
47
  analysis_results = {}
48
  overlay_images = []
49
 
50
  image_np = image_bundle.numpy_image()
51
- print('image_np shape: ', image_np.shape)
52
  # import IPython; IPython.embed()
53
 
54
  if analyze_hair:
55
  hair_mask = segmentation_maps["hair_mask"]
56
  hair_analysis = analyze_hair_function(image_np, hair_mask)
57
- analysis_results['hair_analysis'] = hair_analysis
58
  overlay_images.append(segmentation_maps["hair_mask"])
59
 
60
  if analyze_skin:
61
  skin_mask = segmentation_maps["face_skin_mask"]
62
-
63
  skin_analysis = analyze_skin_function(image_np, skin_mask)
64
  overlay_images.append(skin_analysis["overlay_image"])
65
  if "overlay_image" in skin_analysis.keys():
@@ -68,15 +73,16 @@ def process_image(filename_or_url: Union[str, Path], analyze_skin, analyze_eyes,
68
  del skin_analysis["filtered_skin_mask"]
69
  if "dominant_skin_tones" in skin_analysis.keys():
70
  del skin_analysis["dominant_skin_tones"]
71
- analysis_results['skin_analysis'] = skin_analysis
72
-
73
- # del overlay_images[-1]
74
 
 
75
 
76
  if analyze_eyes:
77
- eye_mask = segmentation_maps["right_eye_mask"] | segmentation_maps["left_eye_mask"]
 
 
78
  eye_analysis = analyze_eye_function(image_np, eye_mask)
79
- analysis_results['eye_analysis'] = eye_analysis
80
  overlay_images.append(segmentation_maps["right_eye_mask"])
81
 
82
  # Combine overlay images
@@ -90,12 +96,19 @@ def process_image(filename_or_url: Union[str, Path], analyze_skin, analyze_eyes,
90
 
91
  return combined_overlay_bgr, analysis_results
92
 
 
93
  if __name__ == "__main__":
94
  parser = ArgumentParser()
95
  parser.add_argument("-i", "--image_path", required=True, help="Path to the image")
96
- parser.add_argument("--analyze_skin", action="store_true", help="Perform skin analysis")
97
- parser.add_argument("--analyze_eyes", action="store_true", help="Perform eye analysis")
98
- parser.add_argument("--analyze_hair", action="store_true", help="Perform hair analysis")
 
 
 
 
 
 
99
 
100
  args = parser.parse_args()
101
  image_path = args.image_path
@@ -105,7 +118,7 @@ if __name__ == "__main__":
105
  image_path,
106
  analyze_skin=args.analyze_skin,
107
  analyze_eyes=args.analyze_eyes,
108
- analyze_hair=args.analyze_hair
109
  )
110
 
111
  # Save the results
@@ -113,12 +126,12 @@ if __name__ == "__main__":
113
  overlay_image_path = f"outputs/overlay_{im_basename}.png"
114
  cv2.imwrite(overlay_image_path, combined_overlay)
115
  import json
 
116
  # Save the analysis results as JSON, including the path to the overlay image
117
  analysis_results["overlay_image_path"] = overlay_image_path
118
  print(analysis_results)
119
 
120
-
121
  with open(f"outputs/analysis_{im_basename}.json", "w") as f:
122
  json.dump(analysis_results, f, indent=4)
123
 
124
- print("Analysis complete. Results saved in 'outputs' directory.")
 
4
  from argparse import ArgumentParser
5
  import cv2
6
  from PIL import Image
7
+
8
+ # from src.analyze import analyze_and_visualize
9
+ from src.skin_analyzer import analyze_skin_function
10
+ from src.image import ImageBundle
11
  import os
12
  import numpy as np
13
  import timeit
14
+
15
  LOG = logging.getLogger(__name__)
16
 
17
+
18
+ def process_image(
19
+ filename_or_url: Union[str, Path], analyze_skin, analyze_eyes, analyze_hair
20
+ ):
21
  """
22
  Process the image and return the result.
23
  :param filename_or_url: The filename (in local devices) or URL (in Internet) of the image.
 
31
  start = timeit.default_timer()
32
  image_bundle = ImageBundle(image_source=filename_or_url)
33
  stop = timeit.default_timer()
34
+ print("load image time: ", stop - start)
35
+
36
  # Detect faces and landmarks
37
  start = timeit.default_timer()
38
  face_data = image_bundle.detect_faces_and_landmarks()
39
  stop = timeit.default_timer()
40
+ print("detect faces and landmarks time: ", stop - start)
41
  start = timeit.default_timer()
42
  landmarks = image_bundle.detect_face_landmarks()
43
  stop = timeit.default_timer()
44
+ print("detect face landmarks time: ", stop - start)
45
 
46
  start = timeit.default_timer()
47
  # Perform segmentation
48
  segmentation_maps = image_bundle.segment_image()
49
  stop = timeit.default_timer()
50
+ print("segment image time: ", stop - start)
51
+
52
  analysis_results = {}
53
  overlay_images = []
54
 
55
  image_np = image_bundle.numpy_image()
56
+ print("image_np shape: ", image_np.shape)
57
  # import IPython; IPython.embed()
58
 
59
  if analyze_hair:
60
  hair_mask = segmentation_maps["hair_mask"]
61
  hair_analysis = analyze_hair_function(image_np, hair_mask)
62
+ analysis_results["hair_analysis"] = hair_analysis
63
  overlay_images.append(segmentation_maps["hair_mask"])
64
 
65
  if analyze_skin:
66
  skin_mask = segmentation_maps["face_skin_mask"]
67
+
68
  skin_analysis = analyze_skin_function(image_np, skin_mask)
69
  overlay_images.append(skin_analysis["overlay_image"])
70
  if "overlay_image" in skin_analysis.keys():
 
73
  del skin_analysis["filtered_skin_mask"]
74
  if "dominant_skin_tones" in skin_analysis.keys():
75
  del skin_analysis["dominant_skin_tones"]
76
+ analysis_results["skin_analysis"] = skin_analysis
 
 
77
 
78
+ # del overlay_images[-1]
79
 
80
  if analyze_eyes:
81
+ eye_mask = (
82
+ segmentation_maps["right_eye_mask"] | segmentation_maps["left_eye_mask"]
83
+ )
84
  eye_analysis = analyze_eye_function(image_np, eye_mask)
85
+ analysis_results["eye_analysis"] = eye_analysis
86
  overlay_images.append(segmentation_maps["right_eye_mask"])
87
 
88
  # Combine overlay images
 
96
 
97
  return combined_overlay_bgr, analysis_results
98
 
99
+
100
  if __name__ == "__main__":
101
  parser = ArgumentParser()
102
  parser.add_argument("-i", "--image_path", required=True, help="Path to the image")
103
+ parser.add_argument(
104
+ "--analyze_skin", action="store_true", help="Perform skin analysis"
105
+ )
106
+ parser.add_argument(
107
+ "--analyze_eyes", action="store_true", help="Perform eye analysis"
108
+ )
109
+ parser.add_argument(
110
+ "--analyze_hair", action="store_true", help="Perform hair analysis"
111
+ )
112
 
113
  args = parser.parse_args()
114
  image_path = args.image_path
 
118
  image_path,
119
  analyze_skin=args.analyze_skin,
120
  analyze_eyes=args.analyze_eyes,
121
+ analyze_hair=args.analyze_hair,
122
  )
123
 
124
  # Save the results
 
126
  overlay_image_path = f"outputs/overlay_{im_basename}.png"
127
  cv2.imwrite(overlay_image_path, combined_overlay)
128
  import json
129
+
130
  # Save the analysis results as JSON, including the path to the overlay image
131
  analysis_results["overlay_image_path"] = overlay_image_path
132
  print(analysis_results)
133
 
 
134
  with open(f"outputs/analysis_{im_basename}.json", "w") as f:
135
  json.dump(analysis_results, f, indent=4)
136
 
137
+ print("Analysis complete. Results saved in 'outputs' directory.")
src/skin_utils.py CHANGED
@@ -2,9 +2,10 @@ from PIL import Image
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
- from color_utils import calculate_color_distance_lab, rgb_to_lab
6
  import cv2
7
 
 
8
  class SkinTonePalette:
9
  def __init__(self):
10
  self.palette = {
@@ -62,4 +63,4 @@ class SkinTonePalette:
62
  def is_within_vectorscope_skin_tone_line(self, rgb_color):
63
  ycbcr_color = cv2.cvtColor(np.uint8([[rgb_color]]), cv2.COLOR_RGB2YCrCb)[0][0]
64
  cb, cr = ycbcr_color[1], ycbcr_color[2]
65
- return 80 <= cb <= 120 and 133 <= cr <= 173
 
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
+ from src.color_utils import calculate_color_distance_lab, rgb_to_lab
6
  import cv2
7
 
8
+
9
  class SkinTonePalette:
10
  def __init__(self):
11
  self.palette = {
 
63
  def is_within_vectorscope_skin_tone_line(self, rgb_color):
64
  ycbcr_color = cv2.cvtColor(np.uint8([[rgb_color]]), cv2.COLOR_RGB2YCrCb)[0][0]
65
  cb, cr = ycbcr_color[1], ycbcr_color[2]
66
+ return 80 <= cb <= 120 and 133 <= cr <= 173