HardikUppal commited on
Commit
9751db3
·
1 Parent(s): 6e81f52

added gradio UI, removed deepface, added ITA logic

Browse files
Files changed (10) hide show
  1. .gitignore +1 -0
  2. =1.9.0 +37 -0
  3. requirements.txt +0 -109
  4. src/analyze.py +48 -42
  5. src/gradio_app.py +75 -0
  6. src/image.py +64 -59
  7. src/main.py +82 -79
  8. src/segmentation_utils.py +165 -29
  9. src/skin_analyzer.py +128 -0
  10. src/skin_utils.py +13 -100
.gitignore CHANGED
@@ -5,3 +5,4 @@ outputs/
5
  venv/
6
  workspace/
7
  *.pyc
 
 
5
  venv/
6
  workspace/
7
  *.pyc
8
+ temp.jpg
=1.9.0 ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Requirement already satisfied: tensorflow in ./venv/lib/python3.12/site-packages (2.16.1)
2
+ Requirement already satisfied: absl-py>=1.0.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (2.1.0)
3
+ Requirement already satisfied: astunparse>=1.6.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (1.6.3)
4
+ Requirement already satisfied: flatbuffers>=23.5.26 in ./venv/lib/python3.12/site-packages (from tensorflow) (24.3.25)
5
+ Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in ./venv/lib/python3.12/site-packages (from tensorflow) (0.5.4)
6
+ Requirement already satisfied: google-pasta>=0.1.1 in ./venv/lib/python3.12/site-packages (from tensorflow) (0.2.0)
7
+ Requirement already satisfied: h5py>=3.10.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (3.11.0)
8
+ Requirement already satisfied: libclang>=13.0.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (18.1.1)
9
+ Requirement already satisfied: ml-dtypes~=0.3.1 in ./venv/lib/python3.12/site-packages (from tensorflow) (0.3.2)
10
+ Requirement already satisfied: opt-einsum>=2.3.2 in ./venv/lib/python3.12/site-packages (from tensorflow) (3.3.0)
11
+ Requirement already satisfied: packaging in ./venv/lib/python3.12/site-packages (from tensorflow) (24.0)
12
+ Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in ./venv/lib/python3.12/site-packages (from tensorflow) (4.25.3)
13
+ Requirement already satisfied: requests<3,>=2.21.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (2.32.3)
14
+ Requirement already satisfied: setuptools in ./venv/lib/python3.12/site-packages (from tensorflow) (70.0.0)
15
+ Requirement already satisfied: six>=1.12.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (1.16.0)
16
+ Requirement already satisfied: termcolor>=1.1.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (2.4.0)
17
+ Requirement already satisfied: typing-extensions>=3.6.6 in ./venv/lib/python3.12/site-packages (from tensorflow) (4.12.1)
18
+ Requirement already satisfied: wrapt>=1.11.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (1.16.0)
19
+ Requirement already satisfied: grpcio<2.0,>=1.24.3 in ./venv/lib/python3.12/site-packages (from tensorflow) (1.64.1)
20
+ Requirement already satisfied: tensorboard<2.17,>=2.16 in ./venv/lib/python3.12/site-packages (from tensorflow) (2.16.2)
21
+ Requirement already satisfied: keras>=3.0.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (3.3.3)
22
+ Requirement already satisfied: numpy<2.0.0,>=1.26.0 in ./venv/lib/python3.12/site-packages (from tensorflow) (1.26.4)
23
+ Requirement already satisfied: wheel<1.0,>=0.23.0 in ./venv/lib/python3.12/site-packages (from astunparse>=1.6.0->tensorflow) (0.43.0)
24
+ Requirement already satisfied: rich in ./venv/lib/python3.12/site-packages (from keras>=3.0.0->tensorflow) (13.7.1)
25
+ Requirement already satisfied: namex in ./venv/lib/python3.12/site-packages (from keras>=3.0.0->tensorflow) (0.0.8)
26
+ Requirement already satisfied: optree in ./venv/lib/python3.12/site-packages (from keras>=3.0.0->tensorflow) (0.11.0)
27
+ Requirement already satisfied: charset-normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow) (3.3.2)
28
+ Requirement already satisfied: idna<4,>=2.5 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow) (3.7)
29
+ Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow) (2.2.1)
30
+ Requirement already satisfied: certifi>=2017.4.17 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow) (2024.6.2)
31
+ Requirement already satisfied: markdown>=2.6.8 in ./venv/lib/python3.12/site-packages (from tensorboard<2.17,>=2.16->tensorflow) (3.6)
32
+ Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./venv/lib/python3.12/site-packages (from tensorboard<2.17,>=2.16->tensorflow) (0.7.2)
33
+ Requirement already satisfied: werkzeug>=1.0.1 in ./venv/lib/python3.12/site-packages (from tensorboard<2.17,>=2.16->tensorflow) (3.0.3)
34
+ Requirement already satisfied: MarkupSafe>=2.1.1 in ./venv/lib/python3.12/site-packages (from werkzeug>=1.0.1->tensorboard<2.17,>=2.16->tensorflow) (2.1.5)
35
+ Requirement already satisfied: markdown-it-py>=2.2.0 in ./venv/lib/python3.12/site-packages (from rich->keras>=3.0.0->tensorflow) (3.0.0)
36
+ Requirement already satisfied: pygments<3.0.0,>=2.13.0 in ./venv/lib/python3.12/site-packages (from rich->keras>=3.0.0->tensorflow) (2.18.0)
37
+ Requirement already satisfied: mdurl~=0.1 in ./venv/lib/python3.12/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.0.0->tensorflow) (0.1.2)
requirements.txt DELETED
@@ -1,109 +0,0 @@
1
- absl-py==2.1.0
2
- asttokens==2.4.1
3
- astunparse==1.6.3
4
- attrs==23.2.0
5
- beautifulsoup4==4.12.3
6
- blinker==1.8.2
7
- certifi==2024.2.2
8
- cffi==1.16.0
9
- charset-normalizer==3.3.2
10
- click==8.1.7
11
- colorama==0.4.6
12
- colored==1.3.93
13
- colormath==3.0.0
14
- contourpy==1.2.1
15
- cycler==0.12.1
16
- decorator==5.1.1
17
- deepface==0.0.91
18
- exceptiongroup==1.2.1
19
- executing==2.0.1
20
- filelock==3.14.0
21
- fire==0.6.0
22
- Flask==3.0.3
23
- flatbuffers==24.3.25
24
- fonttools==4.51.0
25
- gast==0.5.4
26
- gdown==5.1.0
27
- Gooey==1.0.8.1
28
- google-pasta==0.2.0
29
- grpcio==1.63.0
30
- gunicorn==22.0.0
31
- h5py==3.11.0
32
- HEIC2PNG==1.1.4
33
- idna==3.7
34
- importlib_metadata==7.1.0
35
- importlib_resources==6.4.0
36
- ipython==8.18.1
37
- itsdangerous==2.2.0
38
- jax==0.4.28
39
- jaxlib==0.4.28
40
- jedi==0.19.1
41
- Jinja2==3.1.4
42
- joblib==1.4.2
43
- keras==3.3.3
44
- kiwisolver==1.4.5
45
- libclang==18.1.1
46
- Markdown==3.6
47
- markdown-it-py==3.0.0
48
- MarkupSafe==2.1.5
49
- matplotlib==3.8.4
50
- matplotlib-inline==0.1.7
51
- mdurl==0.1.2
52
- mediapipe==0.10.14
53
- ml-dtypes==0.3.2
54
- mtcnn==0.1.1
55
- namex==0.0.8
56
- networkx==3.2.1
57
- numpy==1.26.4
58
- opencv-contrib-python==4.9.0.80
59
- opencv-python==4.9.0.80
60
- opt-einsum==3.3.0
61
- optree==0.11.0
62
- packaging==24.0
63
- pandas==2.2.2
64
- parso==0.8.4
65
- pexpect==4.9.0
66
- pillow==10.3.0
67
- pillow_heif==0.16.0
68
- pngquant-cli==2.17.0.post5
69
- prompt-toolkit==3.0.43
70
- protobuf==4.25.3
71
- psutil==5.9.8
72
- ptyprocess==0.7.0
73
- pure-eval==0.2.2
74
- pycparser==2.22
75
- Pygments==2.18.0
76
- pygtrie==2.5.0
77
- pyparsing==3.1.2
78
- PySocks==1.7.1
79
- python-dateutil==2.9.0.post0
80
- pytz==2024.1
81
- re-wx==0.0.10
82
- requests==2.31.0
83
- retina-face==0.0.17
84
- rich==13.7.1
85
- scikit-learn==1.4.2
86
- scipy==1.13.0
87
- six==1.16.0
88
- # Editable install with no version control (skin-tone-classifier==1.2.4)
89
- -e /Users/hardikuppal/Projects/ColorPalette/SkinToneClassifier
90
- sounddevice==0.4.6
91
- soupsieve==2.5
92
- stack-data==0.6.3
93
- tensorboard==2.16.2
94
- tensorboard-data-server==0.7.2
95
- tensorflow==2.16.1
96
- tensorflow-io-gcs-filesystem==0.37.0
97
- termcolor==2.4.0
98
- tf_keras==2.16.0
99
- threadpoolctl==3.5.0
100
- tqdm==4.66.4
101
- traitlets==5.14.3
102
- typing_extensions==4.11.0
103
- tzdata==2024.1
104
- urllib3==2.2.1
105
- wcwidth==0.2.13
106
- Werkzeug==3.0.3
107
- wrapt==1.16.0
108
- wxPython==4.2.1
109
- zipp==3.18.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/analyze.py CHANGED
@@ -23,10 +23,10 @@ def get_dominant_colors(image, mask, n_colors, debug=True):
23
  image_np = image[mask > 0]
24
  pixels = image_np.reshape((-1, 3))
25
  n_colors_elbow = optimal_clusters_elbow(pixels, max_clusters=15)
26
- n_colors_silhouette = optimal_clusters_silhouette(pixels, max_clusters=5)
27
 
28
- kmeans_silhouette = KMeans(n_clusters=n_colors_silhouette)
29
- kmeans_silhouette.fit(pixels)
30
  kmeans_elbow = KMeans(n_clusters=n_colors_elbow)
31
  kmeans_elbow.fit(pixels)
32
  dominant_colors = kmeans_elbow.cluster_centers_
@@ -36,7 +36,7 @@ def get_dominant_colors(image, mask, n_colors, debug=True):
36
  dominant_percentages = [counts[i] / total_count for i in counts.keys()]
37
  if debug:
38
  visualize_clusters(image, mask, kmeans_elbow, tag="elbow")
39
- visualize_clusters(image, mask, kmeans_silhouette, tag="silhouette")
40
  return dominant_colors, dominant_percentages
41
 
42
 
@@ -288,13 +288,38 @@ def color_analysis(skin, hair, eyes):
288
  return analysis
289
 
290
 
291
- # Example usage
292
- result = color_analysis("light", "golden blonde", "blue")
293
- print(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
 
295
 
296
- def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
297
 
 
298
  image_np = np.array(image)
299
  hair_mask_np = np.array(hair_mask)
300
  skin_mask_np = np.array(skin_mask)
@@ -309,14 +334,9 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
309
  raise ValueError("Image and all masks must have the same dimensions")
310
 
311
  hair_palette = HairColorPalette()
312
-
313
  hair_dominant_colors, hair_dominant_percentages = get_dominant_colors(
314
  image_np, hair_mask_np, n_colors, debug=True
315
  )
316
- import IPython
317
-
318
- IPython.embed()
319
- raise Exception("stop")
320
  hair_color, hair_hex, hair_distance = get_closest_color(
321
  hair_dominant_colors, hair_palette.palette
322
  )
@@ -329,6 +349,11 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
329
  skin_dominant_colors, skin_palette.palette
330
  )
331
 
 
 
 
 
 
332
  eye_palette = EyeColorPalette()
333
  eye_dominant_colors, eye_dominant_percentages = get_dominant_colors(
334
  image_np, eye_mask_np, n_colors, debug=True
@@ -337,18 +362,7 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
337
  eye_dominant_colors, eye_palette.palette
338
  )
339
 
340
- # create overlay for different color for hair, skin and eye
341
- hair_overlay = np.zeros_like(image_np)
342
- skin_overlay = np.zeros_like(image_np)
343
- eye_overlay = np.zeros_like(image_np)
344
-
345
- hair_overlay[hair_mask_np > 0] = [0, 255, 0] # hex_to_rgb(hair_hex)
346
- skin_overlay[skin_mask_np > 0] = [255, 0, 0] # hex_to_rgb(skin_hex)
347
- eye_overlay[eye_mask_np > 0] = [0, 0, 255] # hex_to_rgb(eye_hex)
348
-
349
- combined_overlay = cv2.addWeighted(image_np, 0.8, hair_overlay, 0.2, 0)
350
- combined_overlay = cv2.addWeighted(combined_overlay, 0.8, skin_overlay, 0.2, 0)
351
- combined_overlay = cv2.addWeighted(combined_overlay, 0.8, eye_overlay, 0.2, 0)
352
 
353
  bar_width = 50
354
  hair_color_bar = create_dominant_color_bar(
@@ -365,19 +379,19 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
365
  image_np,
366
  list(hair_palette.palette.keys()).index(hair_color),
367
  hair_palette.palette,
368
- bar_width,
369
  )
370
  skin_palette_bar = create_tone_palette_bar(
371
  image_np,
372
  list(skin_palette.palette.keys()).index(skin_color),
373
  skin_palette.palette,
374
- bar_width,
375
  )
376
  eye_palette_bar = create_tone_palette_bar(
377
  image_np,
378
  list(eye_palette.palette.keys()).index(eye_color),
379
  eye_palette.palette,
380
- bar_width,
381
  )
382
 
383
  output_image = np.hstack(
@@ -397,21 +411,21 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
397
  hair_dominant_percentages,
398
  hair_hex,
399
  hair_distance,
400
- img_shape,
401
  )
402
  msg_bar_skin = create_message_bar(
403
  skin_dominant_colors,
404
  skin_dominant_percentages,
405
  skin_hex,
406
  skin_distance,
407
- img_shape,
408
  )
409
  msg_bar_eye = create_message_bar(
410
  eye_dominant_colors, eye_dominant_percentages, eye_hex, eye_distance, img_shape
411
  )
412
 
413
  output_image = np.vstack([output_image, msg_bar_hair, msg_bar_skin, msg_bar_eye])
414
- # Create JSON record
415
  analysis_record = {
416
  "hair": {
417
  "dominant_colors": [rgb_to_hex(color) for color in hair_dominant_colors],
@@ -426,6 +440,8 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
426
  "closest_color": skin_color,
427
  "closest_color_hex": skin_hex,
428
  "distance": skin_distance,
 
 
429
  },
430
  "eyes": {
431
  "dominant_colors": [rgb_to_hex(color) for color in eye_dominant_colors],
@@ -436,14 +452,4 @@ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
436
  },
437
  }
438
 
439
- # Save output image and JSON record
440
- # cv2.imwrite("output_image.png", output_image)
441
- # with open("analysis_record.json", "w") as json_file:
442
- # json.dump(analysis_record, json_file, indent=4)
443
-
444
- return output_image, analysis_record
445
- # cv2.imwrite("output_image.png", output_image)
446
-
447
-
448
- # Example usage
449
- # analyze_and_visualize('path_to_image.jpg', 'path_to_hair_mask.png', 'path_to_skin_mask.png', 'path_to_eye_mask.png')
 
23
  image_np = image[mask > 0]
24
  pixels = image_np.reshape((-1, 3))
25
  n_colors_elbow = optimal_clusters_elbow(pixels, max_clusters=15)
26
+ # n_colors_silhouette = optimal_clusters_silhouette(pixels, max_clusters=5)
27
 
28
+ # kmeans_silhouette = KMeans(n_clusters=n_colors_silhouette)
29
+ # kmeans_silhouette.fit(pixels)
30
  kmeans_elbow = KMeans(n_clusters=n_colors_elbow)
31
  kmeans_elbow.fit(pixels)
32
  dominant_colors = kmeans_elbow.cluster_centers_
 
36
  dominant_percentages = [counts[i] / total_count for i in counts.keys()]
37
  if debug:
38
  visualize_clusters(image, mask, kmeans_elbow, tag="elbow")
39
+ # visualize_clusters(image, mask, kmeans_silhouette, tag="silhouette")
40
  return dominant_colors, dominant_percentages
41
 
42
 
 
288
  return analysis
289
 
290
 
291
+ # # Example usage
292
+ # result = color_analysis("light", "golden blonde", "blue")
293
+ # print(result)
294
+ def create_combined_overlay(image, hair_mask, skin_mask, eye_mask):
295
+ """
296
+ Create an overlay image by combining the original image with the hair, skin, and eye masks.
297
+ :param image: Original image as a numpy array.
298
+ :param hair_mask: Hair mask as a numpy array.
299
+ :param skin_mask: Skin mask as a numpy array.
300
+ :param eye_mask: Eye mask as a numpy array.
301
+ :return: Combined overlay image as a numpy array.
302
+ """
303
+ # Create overlays for different parts
304
+ hair_overlay = np.zeros_like(image)
305
+ skin_overlay = np.zeros_like(image)
306
+ eye_overlay = np.zeros_like(image)
307
+
308
+ # Color the masks
309
+ hair_overlay[hair_mask > 0] = [0, 255, 0] # Green for hair
310
+ skin_overlay[skin_mask > 0] = [255, 0, 0] # Red for skin
311
+ eye_overlay[eye_mask > 0] = [0, 0, 255] # Blue for eyes
312
+
313
+ # Combine the overlays with the original image
314
+ combined_overlay = cv2.addWeighted(image, 0.8, hair_overlay, 0.2, 0)
315
+ combined_overlay = cv2.addWeighted(combined_overlay, 0.8, skin_overlay, 0.2, 0)
316
+ combined_overlay = cv2.addWeighted(combined_overlay, 0.8, eye_overlay, 0.2, 0)
317
+
318
+ return combined_overlay
319
 
320
 
 
321
 
322
+ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
323
  image_np = np.array(image)
324
  hair_mask_np = np.array(hair_mask)
325
  skin_mask_np = np.array(skin_mask)
 
334
  raise ValueError("Image and all masks must have the same dimensions")
335
 
336
  hair_palette = HairColorPalette()
 
337
  hair_dominant_colors, hair_dominant_percentages = get_dominant_colors(
338
  image_np, hair_mask_np, n_colors, debug=True
339
  )
 
 
 
 
340
  hair_color, hair_hex, hair_distance = get_closest_color(
341
  hair_dominant_colors, hair_palette.palette
342
  )
 
349
  skin_dominant_colors, skin_palette.palette
350
  )
351
 
352
+ # Calculate ITA for the dominant skin color
353
+ dominant_skin_color = skin_dominant_colors[0]
354
+ ita = skin_palette.calculate_ita(dominant_skin_color)
355
+ vectorscope_check = skin_palette.is_within_vectorscope_skin_tone_line(dominant_skin_color)
356
+
357
  eye_palette = EyeColorPalette()
358
  eye_dominant_colors, eye_dominant_percentages = get_dominant_colors(
359
  image_np, eye_mask_np, n_colors, debug=True
 
362
  eye_dominant_colors, eye_palette.palette
363
  )
364
 
365
+ combined_overlay = create_combined_overlay(image_np, hair_mask_np, skin_mask_np, eye_mask_np)
 
 
 
 
 
 
 
 
 
 
 
366
 
367
  bar_width = 50
368
  hair_color_bar = create_dominant_color_bar(
 
379
  image_np,
380
  list(hair_palette.palette.keys()).index(hair_color),
381
  hair_palette.palette,
382
+ bar_width
383
  )
384
  skin_palette_bar = create_tone_palette_bar(
385
  image_np,
386
  list(skin_palette.palette.keys()).index(skin_color),
387
  skin_palette.palette,
388
+ bar_width
389
  )
390
  eye_palette_bar = create_tone_palette_bar(
391
  image_np,
392
  list(eye_palette.palette.keys()).index(eye_color),
393
  eye_palette.palette,
394
+ bar_width
395
  )
396
 
397
  output_image = np.hstack(
 
411
  hair_dominant_percentages,
412
  hair_hex,
413
  hair_distance,
414
+ img_shape
415
  )
416
  msg_bar_skin = create_message_bar(
417
  skin_dominant_colors,
418
  skin_dominant_percentages,
419
  skin_hex,
420
  skin_distance,
421
+ img_shape
422
  )
423
  msg_bar_eye = create_message_bar(
424
  eye_dominant_colors, eye_dominant_percentages, eye_hex, eye_distance, img_shape
425
  )
426
 
427
  output_image = np.vstack([output_image, msg_bar_hair, msg_bar_skin, msg_bar_eye])
428
+
429
  analysis_record = {
430
  "hair": {
431
  "dominant_colors": [rgb_to_hex(color) for color in hair_dominant_colors],
 
440
  "closest_color": skin_color,
441
  "closest_color_hex": skin_hex,
442
  "distance": skin_distance,
443
+ "ita": ita,
444
+ # "vectorscope_check": vectorscope_check,
445
  },
446
  "eyes": {
447
  "dominant_colors": [rgb_to_hex(color) for color in eye_dominant_colors],
 
452
  },
453
  }
454
 
455
+ return output_image, analysis_record
 
 
 
 
 
 
 
 
 
 
src/gradio_app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ from skin_analyzer import analyze_skin_function
7
+ from image import ImageBundle
8
+
9
+ def process_image(image, analyze_skin, analyze_eyes, analyze_hair):
10
+ """
11
+ Process the uploaded image and return the analysis results based on selected analyses.
12
+ :param image: Uploaded image.
13
+ :param analyze_skin: Whether to perform skin analysis.
14
+ :param analyze_eyes: Whether to perform eye analysis.
15
+ :param analyze_hair: Whether to perform hair analysis.
16
+ :return: Analysis results.
17
+ """
18
+ image_bundle = ImageBundle(image_array=image)
19
+
20
+ # Detect faces and landmarks
21
+ face_data = image_bundle.detect_faces_and_landmarks()
22
+ landmarks = image_bundle.detect_face_landmarks()
23
+
24
+ # Perform segmentation
25
+ segmentation_maps = image_bundle.segment_image()
26
+
27
+ analysis_results = {}
28
+ overlay_images = []
29
+
30
+ # if analyze_hair:
31
+ # hair_mask = segmentation_maps["hair_mask"]
32
+ # # analysis_results['hair_analysis'] = analyze_hair_function(image, hair_mask)
33
+ # overlay_images.append(segmentation_maps["hair_mask"])
34
+
35
+ if analyze_skin:
36
+ skin_mask = segmentation_maps["face_skin_mask"]
37
+ skin_analysis = analyze_skin_function(image, skin_mask)
38
+ analysis_results['skin_analysis'] = skin_analysis
39
+ overlay_images.append(skin_analysis["overlay_image"])
40
+
41
+ # if analyze_eyes:
42
+ # eye_mask = segmentation_maps["right_iris_mask"] | segmentation_maps["left_iris_mask"]
43
+ # # analysis_results['eye_analysis'] = analyze_eye_function(image, eye_mask)
44
+ # overlay_images.append(eye_mask)
45
+
46
+ # Combine overlay images
47
+ if overlay_images:
48
+ combined_overlay = np.maximum.reduce(overlay_images)
49
+ else:
50
+ combined_overlay = image
51
+
52
+ # Convert combined_overlay to PIL Image for display
53
+ combined_overlay = Image.fromarray(combined_overlay)
54
+
55
+ return combined_overlay, analysis_results
56
+
57
+ # Define Gradio interface
58
+ iface = gr.Interface(
59
+ fn=process_image,
60
+ inputs=[
61
+ gr.Image(type="numpy", label="Upload an Image"),
62
+ gr.Checkbox(label="Skin Analysis", value=True),
63
+ gr.Checkbox(label="Eye Analysis", value=False),
64
+ gr.Checkbox(label="Hair Analysis", value=False),
65
+ ],
66
+ outputs=[
67
+ gr.Image(type="pil", label="Processed Image"),
68
+ gr.JSON(label="Analysis Results")
69
+ ],
70
+ title="Color Palette Analysis",
71
+ description="Upload an image to analyze the skin, hair, and eye colors. Select the analyses you want to perform."
72
+ )
73
+
74
+ # Launch the Gradio interface
75
+ iface.launch()
src/image.py CHANGED
@@ -1,32 +1,33 @@
1
- # Description: Image handling utilities for the Skin Tone Classifier.
2
  import logging
3
  import cv2
4
  import numpy as np
5
- from deepface import DeepFace
6
- from utils import is_url, extract_filename_and_extension, alphabet_id, ArgumentError
7
- from segmentation_utils import detect_eye_mask, mediapipe_selfie_segmentor
8
- import requests
9
  from PIL import Image, ExifTags, ImageCms
10
  from io import BytesIO
 
11
  import pillow_heif
 
 
12
 
13
  # Register HEIF opener
14
  pillow_heif.register_heif_opener()
15
 
16
-
17
  LOG = logging.getLogger(__name__)
18
 
19
-
20
  class ImageBundle:
21
- def __init__(self, image_source):
22
  """
23
  Initialize the ImageHandler object.
24
  :param image_source: Path to the image file or URL of the image.
 
25
  """
26
  self.image_source = image_source
 
27
  self.exif_data = {}
28
  self.segmentation_maps = {}
29
- self._open_image()
 
 
 
30
 
31
  def _open_image(self):
32
  """
@@ -66,6 +67,22 @@ class ImageBundle:
66
  response.raise_for_status() # Raise an exception for HTTP errors
67
  self.image = Image.open(BytesIO(response.content))
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def _handle_color_profile(self):
70
  """
71
  Handle the color profile of the image if mentioned.
@@ -105,66 +122,54 @@ class ImageBundle:
105
  return np.all(r == g) and np.all(g == b)
106
  return False
107
 
108
- def _get_faces(self, detector_backend="retinaface", is_bw=False, min_size=(90, 90)):
109
  """
110
- Get the coordinates of the detected faces in the image.
111
- :param detector_backend: Face detector backend to use.
112
- :param is_bw: Whether the image is black and white.
113
- gets a list of faces detected, with each face as dict of facial_area, face, confidence
114
  """
115
- self.faces = DeepFace.extract_faces(
116
- self.numpy_image(), detector_backend=detector_backend, grayscale=is_bw
117
- )
118
- if len(self.faces) == 0:
119
- raise ValueError("No face is detected in the image.")
120
- elif len(self.faces) > 1:
121
- raise ValueError("Multiple faces are detected in the image.")
122
- else:
123
- # check if the face is too small
124
- face = self.faces[0]
125
- if (
126
- face["facial_area"]["w"] < min_size[0]
127
- or face["facial_area"]["h"] < min_size[1]
128
- ):
129
- raise ValueError("The face is too small.")
130
- else:
131
- self.faces = self.faces[0]
132
-
133
  return self.faces
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  def numpy_image(self):
136
  """
137
  Convert the image to a numpy array.
138
  :return: Numpy array of the image.
139
  """
140
  image = np.array(self.image)
141
- # check if image is 4 chanel convert ot three
142
  if image.shape[2] == 4:
143
  image = image[:, :, :3]
144
- # convert image to BGR
145
  image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
146
- return image
147
-
148
- def _segment_image(
149
- self, segment=["face_skin", "body_skin", "hair", "eyes", "clothes"]
150
- ):
151
- """
152
- Get a segmentation map by name.
153
- :param name: Name of the segmentation map.
154
- :return: Segmentation map as a numpy array or None if not found.
155
- """
156
- if "eyes" in segment:
157
- del segment[segment.index("eyes")]
158
- eye_mask = detect_eye_mask(self.numpy_image(), self.faces)
159
- self.segmentation_maps["eyes_mask"] = eye_mask
160
- if (
161
- "face_skin" in segment
162
- or "body_skin" in segment
163
- or "hair" in segment
164
- or "clothes" in segment
165
- ):
166
- # import IPython; IPython.embed()
167
- mask_dict = mediapipe_selfie_segmentor(self.numpy_image(), segment=segment)
168
- for seg in segment:
169
- self.segmentation_maps[seg + "_mask"] = mask_dict[seg]
170
- return self.segmentation_maps
 
 
1
  import logging
2
  import cv2
3
  import numpy as np
 
 
 
 
4
  from PIL import Image, ExifTags, ImageCms
5
  from io import BytesIO
6
+ import requests
7
  import pillow_heif
8
+ from segmentation_utils import detect_faces_and_landmarks, detect_face_landmarks, mediapipe_selfie_segmentor, create_feature_masks
9
+ from utils import is_url, extract_filename_and_extension
10
 
11
  # Register HEIF opener
12
  pillow_heif.register_heif_opener()
13
 
 
14
  LOG = logging.getLogger(__name__)
15
 
 
16
  class ImageBundle:
17
+ def __init__(self, image_source=None, image_array=None):
18
  """
19
  Initialize the ImageHandler object.
20
  :param image_source: Path to the image file or URL of the image.
21
+ :param image_array: Numpy array of the image.
22
  """
23
  self.image_source = image_source
24
+ self.image_array = image_array
25
  self.exif_data = {}
26
  self.segmentation_maps = {}
27
+ if image_array is not None:
28
+ self._open_image_from_array(image_array)
29
+ else:
30
+ self._open_image()
31
 
32
  def _open_image(self):
33
  """
 
67
  response.raise_for_status() # Raise an exception for HTTP errors
68
  self.image = Image.open(BytesIO(response.content))
69
 
70
+ def _open_image_from_array(self, image_array):
71
+ """
72
+ Open an image from a numpy array.
73
+ :param image_array: Numpy array of the image.
74
+ """
75
+ self.image = Image.fromarray(image_array)
76
+ self._handle_color_profile()
77
+ self._extract_exif_data()
78
+ self.is_black_and_white = self._is_black_and_white()
79
+ self.basename = "uploaded_image"
80
+ self.ext = ".jpg"
81
+ if self.is_black_and_white:
82
+ raise ValueError(
83
+ "The image is black and white. Please provide a colored image."
84
+ )
85
+
86
  def _handle_color_profile(self):
87
  """
88
  Handle the color profile of the image if mentioned.
 
122
  return np.all(r == g) and np.all(g == b)
123
  return False
124
 
125
+ def detect_faces_and_landmarks(self):
126
  """
127
+ Detect faces and landmarks using MediaPipe.
128
+ :return: List of dictionaries with face and landmark information.
 
 
129
  """
130
+ image_np = np.array(self.image)
131
+ face_data = detect_faces_and_landmarks(image_np)
132
+ self.faces = face_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  return self.faces
134
 
135
+ def detect_face_landmarks(self):
136
+ """
137
+ Detect face landmarks using MediaPipe.
138
+ :return: Dictionary with landmarks for iris, lips, eyebrows, and eyes.
139
+ """
140
+ image_np = np.array(self.image)
141
+ landmarks = detect_face_landmarks(image_np)
142
+ self.landmarks = landmarks
143
+ return self.landmarks
144
+
145
+ def segment_image(self):
146
+ """
147
+ Segment the image using MediaPipe Multi-Class Selfie Segmentation.
148
+ :return: Dictionary of segmentation masks.
149
+ """
150
+ image_np = np.array(self.image)
151
+ masks = mediapipe_selfie_segmentor(image_np)
152
+
153
+ # Detect face landmarks and create masks for individual features
154
+ landmarks = self.detect_face_landmarks()
155
+ feature_masks = create_feature_masks(image_np, landmarks)
156
+
157
+ # Subtract feature masks from face skin mask
158
+ for feature in ["lips_mask", "left_eyebrow_mask", "right_eyebrow_mask", "left_eye_mask", "right_eye_mask", "left_iris_mask", "right_iris_mask"]:
159
+ if "iris" in feature:
160
+ masks[feature] = feature_masks[feature]
161
+ masks["face_skin_mask"] = cv2.subtract(masks["face_skin_mask"], feature_masks[feature])
162
+
163
+ self.segmentation_maps = masks
164
+ return self.segmentation_maps
165
+
166
  def numpy_image(self):
167
  """
168
  Convert the image to a numpy array.
169
  :return: Numpy array of the image.
170
  """
171
  image = np.array(self.image)
 
172
  if image.shape[2] == 4:
173
  image = image[:, :, :3]
 
174
  image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
175
+ return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/main.py CHANGED
@@ -1,99 +1,102 @@
1
  import logging
2
  from pathlib import Path
3
- from typing import Union, Literal
4
-
5
  import cv2
 
6
  from analyze import analyze_and_visualize
 
7
  from image import ImageBundle
8
- from utils import ArgumentError
9
- import cv2
10
-
11
- from argparse import ArgumentParser
12
-
13
- from json import dumps # Optional
14
  import os
15
-
16
-
17
  LOG = logging.getLogger(__name__)
18
 
19
-
20
- def process(
21
- filename_or_url: Union[str, Path],
22
- n_dominant_colors: int = 3,
23
- min_size: tuple[int, int] = (90, 90),
24
- return_report_image=False,
25
- ):
26
  """
27
  Process the image and return the result.
28
  :param filename_or_url: The filename (in local devices) or URL (in Internet) of the image.
29
- :param image_type: Specify whether the input image(s) is/are colored or black/white.
30
- Valid choices are: "auto", "color" or "bw", Defaults to "auto", which will be detected automatically.
31
- :param convert_to_black_white: Whether to convert the image to black/white before processing. Defaults to False.
32
- :param n_dominant_colors: Number of dominant colors to be extracted from the image. Defaults to 2.
33
- :param min_size: Minimum possible face size. Faces smaller than that are ignored, defaults to (90, 90).
34
- :param return_report_image: Whether to return the report image(s) in the result. Defaults to False.
35
- :return:
36
-
37
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- ib = ImageBundle(filename_or_url)
40
-
41
- basename, ext = ib.basename, ib.ext
42
- faces = ib._get_faces(detector_backend="retinaface", min_size=min_size)
43
- if len(faces) == 0:
44
- raise ArgumentError("No face detected in the image.")
45
- segmentation_maps = ib._segment_image()
46
- image = ib.numpy_image()
47
- # convert image to RGB
48
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
49
- hair_mask = segmentation_maps["hair_mask"]
50
- skin_mask = segmentation_maps["face_skin_mask"]
51
- eye_mask = segmentation_maps["eyes_mask"]
52
-
53
- report_images, records = analyze_and_visualize(
54
- image, hair_mask, skin_mask, eye_mask, n_colors=n_dominant_colors
55
- )
56
- return {
57
- "basename": basename,
58
- "extension": ext,
59
- "faces": records,
60
- "report_images": report_images if return_report_image else None,
61
- }
62
 
 
 
63
 
64
- if __name__ == "__main__":
65
- args = ArgumentParser()
66
- args.add_argument(
67
- "-i",
68
- "--image_path",
69
- required=True,
70
- help="Path to the image",
71
  )
72
 
73
- args = args.parse_args()
74
- # import IPython
75
-
76
- # IPython.embed()
77
- print("Image path:", args.image_path)
78
- image_path = args.image_path
79
  im_basename = os.path.basename(image_path).split(".")[0]
80
- result = process(image_path, return_report_image=True)
81
- report_images = result.pop("report_images")
82
-
83
- # Save the faces
84
- # for i, face in enumerate(report_images.keys()):
85
- cv2.imwrite(
86
- f"outputs/face_{im_basename}.png",
87
- cv2.cvtColor(report_images, cv2.COLOR_RGB2BGR),
88
- )
89
- # report_images.save(f"outputs/face_{im_basename}.png")
90
 
91
- # convert the result to json
92
- result_json = dumps(result)
93
- # save the json dump
94
- with open(f"outputs/faces_{im_basename}.json", "w") as f:
95
- f.write(result_json)
96
 
97
- print(result["faces"]["hair"])
98
- print(result["faces"]["skin"])
99
- print(result["faces"]["eyes"])
 
1
  import logging
2
  from pathlib import Path
3
+ from typing import Union
4
+ from argparse import ArgumentParser
5
  import cv2
6
+ from PIL import Image
7
  from analyze import analyze_and_visualize
8
+ from skin_analyzer import analyze_skin_function
9
  from image import ImageBundle
 
 
 
 
 
 
10
  import os
11
+ import numpy as np
 
12
  LOG = logging.getLogger(__name__)
13
 
14
+ def process_image(filename_or_url: Union[str, Path], analyze_skin, analyze_eyes, analyze_hair):
 
 
 
 
 
 
15
  """
16
  Process the image and return the result.
17
  :param filename_or_url: The filename (in local devices) or URL (in Internet) of the image.
18
+ :param analyze_skin: Whether to perform skin analysis.
19
+ :param analyze_eyes: Whether to perform eye analysis.
20
+ :param analyze_hair: Whether to perform hair analysis.
21
+ :return: Analysis results.
 
 
 
 
22
  """
23
+ image_bundle = ImageBundle(image_source=filename_or_url)
24
+
25
+ # Detect faces and landmarks
26
+ face_data = image_bundle.detect_faces_and_landmarks()
27
+ landmarks = image_bundle.detect_face_landmarks()
28
+
29
+ # Perform segmentation
30
+ segmentation_maps = image_bundle.segment_image()
31
+
32
+ analysis_results = {}
33
+ overlay_images = []
34
+
35
+ image_np = image_bundle.numpy_image()
36
+
37
+ if analyze_hair:
38
+ hair_mask = segmentation_maps["hair_mask"]
39
+ hair_analysis = analyze_hair_function(image_np, hair_mask)
40
+ analysis_results['hair_analysis'] = hair_analysis
41
+ overlay_images.append(segmentation_maps["hair_mask"])
42
+
43
+ if analyze_skin:
44
+ skin_mask = segmentation_maps["face_skin_mask"]
45
+
46
+ skin_analysis = analyze_skin_function(image_np, skin_mask)
47
+ overlay_images.append(skin_analysis["overlay_image"])
48
+ if "overlay_image" in skin_analysis.keys():
49
+ del skin_analysis["overlay_image"]
50
+ analysis_results['skin_analysis'] = skin_analysis
51
+
52
+ # del overlay_images[-1]
53
+
54
+
55
+ if analyze_eyes:
56
+ eye_mask = segmentation_maps["right_eye_mask"] | segmentation_maps["left_eye_mask"]
57
+ eye_analysis = analyze_eye_function(image_np, eye_mask)
58
+ analysis_results['eye_analysis'] = eye_analysis
59
+ overlay_images.append(segmentation_maps["right_eye_mask"])
60
+
61
+ # Combine overlay images
62
+ if overlay_images:
63
+ combined_overlay = np.maximum.reduce(overlay_images)
64
+ else:
65
+ combined_overlay = image_np
66
+
67
+ # Convert combined_overlay to BGR for saving with OpenCV
68
+ # combined_overlay_bgr = cv2.cvtColor(combined_overlay, cv2.COLOR_RGB2BGR)
69
+
70
+ return combined_overlay, analysis_results
71
 
72
+ if __name__ == "__main__":
73
+ parser = ArgumentParser()
74
+ parser.add_argument("-i", "--image_path", required=True, help="Path to the image")
75
+ parser.add_argument("--analyze_skin", action="store_true", help="Perform skin analysis")
76
+ parser.add_argument("--analyze_eyes", action="store_true", help="Perform eye analysis")
77
+ parser.add_argument("--analyze_hair", action="store_true", help="Perform hair analysis")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
+ args = parser.parse_args()
80
+ image_path = args.image_path
81
 
82
+ # Process the image
83
+ combined_overlay, analysis_results = process_image(
84
+ image_path,
85
+ analyze_skin=args.analyze_skin,
86
+ analyze_eyes=args.analyze_eyes,
87
+ analyze_hair=args.analyze_hair
 
88
  )
89
 
90
+ # Save the results
 
 
 
 
 
91
  im_basename = os.path.basename(image_path).split(".")[0]
92
+ overlay_image_path = f"outputs/overlay_{im_basename}.png"
93
+ cv2.imwrite(overlay_image_path, combined_overlay)
94
+ import json
95
+ # Save the analysis results as JSON, including the path to the overlay image
96
+ analysis_results["overlay_image_path"] = overlay_image_path
97
+
 
 
 
 
98
 
99
+ with open(f"outputs/analysis_{im_basename}.json", "w") as f:
100
+ json.dump(analysis_results, f, indent=4)
 
 
 
101
 
102
+ print("Analysis complete. Results saved in 'outputs' directory.")
 
 
src/segmentation_utils.py CHANGED
@@ -1,13 +1,47 @@
 
1
  from mediapipe.tasks import python
2
  from mediapipe.tasks.python import vision
3
  import mediapipe as mp
4
  import cv2
5
  import numpy as np
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin", "body_skin", "hair"]):
 
 
 
 
 
 
8
  # Create the options that will be used for ImageSegmenter
9
  base_options = python.BaseOptions(
10
- model_asset_path="model_weights/selfie_segmenter.tflite"
11
  )
12
  options = vision.ImageSegmenterOptions(
13
  base_options=base_options,
@@ -16,35 +50,137 @@ def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin",
16
  )
17
  with vision.ImageSegmenter.create_from_options(options) as segmenter:
18
  # Create the MediaPipe image file that will be segmented
19
- image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
20
 
21
  # Retrieve the masks for the segmented image
22
- segmentation_result = segmenter.segment(image)
23
  category_mask = segmentation_result.category_mask.numpy_view()
24
- bg_mask = (category_mask == 0)
25
- hair_mask = (category_mask == 1)
26
- body_skin_mask = (category_mask == 2)
27
- face_skin_mask = (category_mask == 3)
28
- clothes_mask = (category_mask == 4)
 
 
 
 
 
 
29
 
30
- return_dict = {}
31
- for seg in segment:
32
- return_dict[seg] = locals()[seg + "_mask"]
33
- return return_dict
34
-
35
-
36
- def detect_eye_mask(image, face_coord):
37
-
38
- h, w = image.shape[0:2]
39
- imgMask = np.zeros((h, w), np.uint8)
40
-
41
- left_eye = face_coord["facial_area"]["left_eye"]
42
- right_eye = face_coord["facial_area"]["right_eye"]
43
-
44
- eye_distance = np.linalg.norm(np.array(left_eye)-np.array(right_eye))
45
- eye_radius = eye_distance/12 # approximate
46
-
47
- cv2.circle(imgMask, left_eye, int(eye_radius), (255,255,255), -1)
48
- cv2.circle(imgMask, right_eye, int(eye_radius), (255,255,255), -1)
49
-
50
- return imgMask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
  from mediapipe.tasks import python
3
  from mediapipe.tasks.python import vision
4
  import mediapipe as mp
5
  import cv2
6
  import numpy as np
7
 
8
+ # Initialize mediapipe solutions
9
+ mp_face_detection = mp.solutions.face_detection
10
+ mp_face_mesh = mp.solutions.face_mesh
11
+
12
+ def detect_faces_and_landmarks(image: np.ndarray):
13
+ """
14
+ Detect faces and landmarks using MediaPipe Face Detection.
15
+ :param image: Input image as a numpy array.
16
+ :return: List of dictionaries with face and landmark information.
17
+ """
18
+ with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection:
19
+ results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
20
+ face_data = []
21
+ if results.detections:
22
+ for detection in results.detections:
23
+ bboxC = detection.location_data.relative_bounding_box
24
+ h, w, c = image.shape
25
+ bbox = int(bboxC.xmin * w), int(bboxC.ymin * h), \
26
+ int(bboxC.width * w), int(bboxC.height * h)
27
+ landmarks = detection.location_data.relative_keypoints
28
+ face_data.append({
29
+ "bbox": bbox,
30
+ "landmarks": landmarks
31
+ })
32
+ return face_data
33
+
34
+
35
  def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin", "body_skin", "hair"]):
36
+ """
37
+ Segment image using MediaPipe Multi-Class Selfie Segmentation.
38
+ :param image: Input image as a numpy array.
39
+ :param segment: List of segments to extract.
40
+ :return: Dictionary of segmentation masks.
41
+ """
42
  # Create the options that will be used for ImageSegmenter
43
  base_options = python.BaseOptions(
44
+ model_asset_path="model_weights/selfie_multiclass_256x256.tflite"
45
  )
46
  options = vision.ImageSegmenterOptions(
47
  base_options=base_options,
 
50
  )
51
  with vision.ImageSegmenter.create_from_options(options) as segmenter:
52
  # Create the MediaPipe image file that will be segmented
53
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
54
 
55
  # Retrieve the masks for the segmented image
56
+ segmentation_result = segmenter.segment(mp_image)
57
  category_mask = segmentation_result.category_mask.numpy_view()
58
+ h, w = category_mask.shape
59
+ masks = {
60
+ "face_skin_mask": np.zeros((h, w), dtype=np.uint8),
61
+ "hair_mask": np.zeros((h, w), dtype=np.uint8),
62
+ "body_skin_mask": np.zeros((h, w), dtype=np.uint8)
63
+ }
64
+
65
+ # Define class labels based on MediaPipe segmentation (example, may need adjustment)
66
+ face_skin_class = 3
67
+ hair_class = 1
68
+ body_skin_class = 2
69
 
70
+ masks["face_skin_mask"][category_mask == face_skin_class] = 255
71
+ masks["hair_mask"][category_mask == hair_class] = 255
72
+ masks["body_skin_mask"][category_mask == body_skin_class] = 255
73
+
74
+ return masks
75
+
76
+ def detect_face_landmarks(image: np.ndarray):
77
+ """
78
+ Detect face landmarks using MediaPipe Face Mesh.
79
+ :param image: Input image as a numpy array.
80
+ :return: Dictionary with landmarks for iris, lips, eyebrows, and eyes.
81
+ """
82
+ with mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5) as face_mesh:
83
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
84
+ face_landmarks = {
85
+ "left_iris": [],
86
+ "right_iris": [],
87
+ "lips": [],
88
+ "left_eyebrow": [],
89
+ "right_eyebrow": [],
90
+ "left_eye": [],
91
+ "right_eye": []
92
+ }
93
+ if results.multi_face_landmarks:
94
+ for face_landmarks_data in results.multi_face_landmarks:
95
+ # Left iris landmarks
96
+ for i in range(468, 473): # Left iris landmarks
97
+ landmark = face_landmarks_data.landmark[i]
98
+ face_landmarks["left_iris"].append((landmark.x, landmark.y))
99
+ # Right iris landmarks
100
+ for i in range(473, 478): # Right iris landmarks
101
+ landmark = face_landmarks_data.landmark[i]
102
+ face_landmarks["right_iris"].append((landmark.x, landmark.y))
103
+ # Outer lips landmarks
104
+ for i in [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 0, 409, 270, 269, 267, 37,39, 40, 185]:
105
+ landmark = face_landmarks_data.landmark[i]
106
+ face_landmarks["lips"].append((landmark.x, landmark.y))
107
+ # Left eyebrow landmarks
108
+ for i in [70, 63, 105, 66, 107]:
109
+ landmark = face_landmarks_data.landmark[i]
110
+ face_landmarks["left_eyebrow"].append((landmark.x, landmark.y))
111
+ # Right eyebrow landmarks
112
+ for i in [336, 296, 334, 293, 300]:
113
+ landmark = face_landmarks_data.landmark[i]
114
+ face_landmarks["right_eyebrow"].append((landmark.x, landmark.y))
115
+ # Left eye landmarks
116
+ for i in [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7]:
117
+ landmark = face_landmarks_data.landmark[i]
118
+ face_landmarks["left_eye"].append((landmark.x, landmark.y))
119
+ # Right eye landmarks
120
+ for i in [463, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382]:
121
+ landmark = face_landmarks_data.landmark[i]
122
+ face_landmarks["right_eye"].append((landmark.x, landmark.y))
123
+ return face_landmarks
124
+
125
+ def create_feature_masks(image: np.ndarray, landmarks: dict):
126
+ """
127
+ Create individual masks for facial features based on landmarks.
128
+ :param image: Input image as a numpy array.
129
+ :param landmarks: Dictionary with landmarks for iris, lips, eyebrows, and eyes.
130
+ :return: Dictionary with masks for each facial feature.
131
+ """
132
+ h, w = image.shape[:2]
133
+ masks = {
134
+ "lips_mask": np.zeros((h, w), dtype=np.uint8),
135
+ "left_eyebrow_mask": np.zeros((h, w), dtype=np.uint8),
136
+ "right_eyebrow_mask": np.zeros((h, w), dtype=np.uint8),
137
+ "left_eye_mask": np.zeros((h, w), dtype=np.uint8),
138
+ "right_eye_mask": np.zeros((h, w), dtype=np.uint8),
139
+ "left_iris_mask": np.zeros((h, w), dtype=np.uint8),
140
+ "right_iris_mask": np.zeros((h, w), dtype=np.uint8)
141
+ }
142
+
143
+ # Define the order of the points to form polygons correctly
144
+ lips_order = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 0, 409, 270, 269, 267, 37,39, 40, 185]
145
+ left_eyebrow_order = [70, 63, 105, 66, 107]
146
+ right_eyebrow_order = [336, 296, 334, 293, 300]
147
+ left_eye_order = [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7]
148
+ right_eye_order = [463, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382]
149
+ left_iris_order = [468, 469, 470, 471, 472]
150
+ right_iris_order = [473, 474, 475, 476, 477]
151
+
152
+ orders = {
153
+ "lips": lips_order,
154
+ "left_eyebrow": left_eyebrow_order,
155
+ "right_eyebrow": right_eyebrow_order,
156
+ "left_eye": left_eye_order,
157
+ "right_eye": right_eye_order,
158
+ "left_iris": left_iris_order,
159
+ "right_iris": right_iris_order
160
+ }
161
+
162
+ for feature, order in orders.items():
163
+ points = np.array([(int(landmarks[feature][i][0] * w), int(landmarks[feature][i][1] * h)) for i in range(len(order))], dtype=np.int32)
164
+ if len(points) > 0:
165
+ cv2.fillPoly(masks[f"{feature}_mask"], [points], 255)
166
+
167
+ return masks
168
+
169
+ if __name__ == "__main__":
170
+ # Test the face detection and segmentation
171
+ image = cv2.imread("inputs/grace.jpg")
172
+ face_data = detect_faces_and_landmarks(image)
173
+ print(face_data)
174
+ masks = mediapipe_selfie_segmentor(image)
175
+ # write it to disk
176
+ for key, mask in masks.items():
177
+ if key == "face_skin_mask":
178
+ # create feature masks
179
+ landmarks = detect_face_landmarks(image)
180
+ feature_masks = create_feature_masks(image, landmarks)
181
+ # subtract eyes, lips and eyebrows from face skin mask
182
+ for feature, feature_mask in feature_masks.items():
183
+ if "iris_mask" in feature:
184
+ cv2.imwrite(f"outputs/{feature}.png", feature_mask)
185
+ mask = cv2.subtract(mask, feature_mask)
186
+ cv2.imwrite(f"outputs/{key}.png", mask)
src/skin_analyzer.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from colormath.color_objects import LabColor, sRGBColor
4
+ from colormath.color_conversions import convert_color
5
+ from PIL import Image
6
+
7
+ def sample_skin_pixels(image, mask, l_min=10, l_max=90):
8
+ """
9
+ Sample skin pixels from different areas of the face, filtering out very light and very dark pixels.
10
+ :param image: Input image as a numpy array.
11
+ :param mask: Skin mask as a numpy array.
12
+ :param l_min: Minimum L* value to keep.
13
+ :param l_max: Maximum L* value to keep.
14
+ :return: List of sampled skin pixels in LAB color space.
15
+ """
16
+ skin_pixels = image[mask > 0]
17
+
18
+ # Convert skin pixels to LAB color space and filter based on L* value
19
+ lab_pixels = []
20
+ for pixel in skin_pixels:
21
+ rgb = sRGBColor(pixel[2], pixel[1], pixel[0], is_upscaled=True)
22
+ lab = convert_color(rgb, LabColor)
23
+ if l_min <= lab.lab_l <= l_max:
24
+ lab_pixels.append([lab.lab_l, lab.lab_a, lab.lab_b])
25
+
26
+ return np.array(lab_pixels)
27
+
28
+
29
+ def calculate_ita(lab_pixels):
30
+ """
31
+ Calculate the Individual Typology Angle (ITA) from LAB pixels.
32
+ :param lab_pixels: List of skin pixels in LAB color space.
33
+ :return: ITA value.
34
+ """
35
+ mean_l = np.mean(lab_pixels[:, 0])
36
+ mean_b = np.mean(lab_pixels[:, 2])
37
+ ita = np.arctan((mean_l - 50) / mean_b) * (180 / np.pi)
38
+ return ita
39
+
40
+ def determine_undertones(lab_pixels):
41
+ """
42
+ Determine the undertones based on LAB color values.
43
+ :param lab_pixels: List of skin pixels in LAB color space.
44
+ :return: Undertone category (warm, cool, neutral).
45
+ """
46
+ mean_a = np.mean(lab_pixels[:, 1])
47
+ mean_b = np.mean(lab_pixels[:, 2])
48
+
49
+ if mean_b > 0 and mean_a > 0:
50
+ undertone = "Warm"
51
+ elif mean_a > 0 and mean_b <= 0:
52
+ undertone = "Cool"
53
+ else:
54
+ undertone = "Neutral"
55
+
56
+ return undertone
57
+
58
+ def determine_season(ita, mean_l, mean_a, mean_b):
59
+ """
60
+ Determine the color season based on ITA and LAB values.
61
+ :param ita: ITA value.
62
+ :param mean_l: Mean L* value.
63
+ :param mean_a: Mean a* value.
64
+ :param mean_b: Mean b* value.
65
+ :return: Season category.
66
+ """
67
+ if 41 < ita <= 53:
68
+ if mean_a > 0 and mean_b > 0:
69
+ return "Light Spring"
70
+ elif mean_a <= 0 and mean_b <= 0:
71
+ return "Light Summer"
72
+ elif mean_a > 0 and mean_b > 20:
73
+ return "Light Autumn"
74
+ elif mean_a <= 0 and mean_b <= 0:
75
+ return "Light Winter"
76
+ elif 28 < ita <= 41:
77
+ if mean_a > 0 and mean_b > 0:
78
+ return "True Spring"
79
+ elif mean_a <= 0 and mean_b <= 0:
80
+ return "True Summer"
81
+ elif mean_a > 0 and mean_b > 20:
82
+ return "True Autumn"
83
+ elif mean_a <= 0 and mean_b <= 0:
84
+ return "True Winter"
85
+ elif 20 < ita <= 28:
86
+ if mean_a > 0 and mean_b > 0:
87
+ return "Deep Spring"
88
+ elif mean_a <= 0 and mean_b <= 0:
89
+ return "Deep Summer"
90
+ elif mean_a > 0 and mean_b > 20:
91
+ return "Deep Autumn"
92
+ elif mean_a <= 0 and mean_b <= 0:
93
+ return "Deep Winter"
94
+
95
+ return "Unknown"
96
+
97
+ def analyze_skin_function(image, skin_mask):
98
+ """
99
+ Analyze the skin tone and determine undertones and color season.
100
+ :param image: Input image as a numpy array.
101
+ :param skin_mask: Skin mask as a numpy array.
102
+ :return: Analysis result with undertone, ITA, season, and overlayed image.
103
+ """
104
+ lab_pixels = sample_skin_pixels(image, skin_mask)
105
+ ita = calculate_ita(lab_pixels)
106
+ print("ITA:", ita)
107
+ undertone = determine_undertones(lab_pixels)
108
+ print("Undertone:", undertone)
109
+ mean_l = np.mean(lab_pixels[:, 0])
110
+ mean_a = np.mean(lab_pixels[:, 1])
111
+ mean_b = np.mean(lab_pixels[:, 2])
112
+ print("Mean L*:", mean_l)
113
+ print("Mean a*:", mean_a)
114
+ print("Mean b*:", mean_b)
115
+ season = determine_season(ita, mean_l, mean_a, mean_b)
116
+ print("Season:", season)
117
+
118
+ # Create an overlayed image
119
+ overlay = image.copy()
120
+ overlay[skin_mask > 0] = (0, 255, 0)
121
+ overlay = cv2.addWeighted(image, 0.7, overlay, 0.3, 0)
122
+
123
+ return {
124
+ "undertone": undertone,
125
+ "ita": ita,
126
+ "season": season,
127
+ "overlay_image": overlay
128
+ }
src/skin_utils.py CHANGED
@@ -2,8 +2,8 @@ from PIL import Image
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
- from color_utils import calculate_color_distance_lab
6
-
7
 
8
  class SkinTonePalette:
9
  def __init__(self):
@@ -22,48 +22,29 @@ class SkinTonePalette:
22
  }
23
 
24
  def get_dominant_colors(self, image_np, n_colors):
25
- # Reshape the image to be a list of pixels
26
  pixels = image_np.reshape((-1, 3))
27
-
28
- # Use KMeans to find n_colors clusters in the image
29
  kmeans = KMeans(n_clusters=n_colors)
30
  kmeans.fit(pixels)
31
-
32
- # Get the cluster centers (dominant colors)
33
  dominant_colors = kmeans.cluster_centers_
34
-
35
- # Get the number of pixels in each cluster
36
  counts = Counter(kmeans.labels_)
37
-
38
- # Sort the colors by the number of pixels in each cluster
39
  dominant_colors = [dominant_colors[i] for i in counts.keys()]
40
-
41
  return dominant_colors
42
 
43
  def get_closest_color(self, image, mask, n_colors=3):
44
-
45
- # Convert images to numpy arrays
46
  image_np = np.array(image)
47
  mask_np = np.array(mask)
48
-
49
- # Ensure the images have the same h and w
50
  if image_np.shape[:2] != mask_np.shape[:2]:
51
  raise ValueError("Image and mask must have the same dimensions")
52
 
53
- # Extract the skin region from the image
54
  skin_pixels = image_np[mask_np > 0]
55
-
56
- # Get dominant colors from the skin region
57
  dominant_colors = self.get_dominant_colors(skin_pixels, n_colors)
58
 
59
- # Find the closest color in the palette
60
  closest_color = None
61
  closest_hex = None
62
  min_distance = float("inf")
63
  for dom_color in dominant_colors:
64
  for color_name, (color_value, color_hex) in self.palette.items():
65
  distance = calculate_color_distance_lab(dom_color, color_value)
66
- # distance = np.linalg.norm(dom_color - np.array(color_value))
67
  if distance < min_distance:
68
  min_distance = distance
69
  closest_color = color_name
@@ -71,82 +52,14 @@ class SkinTonePalette:
71
 
72
  return closest_color, closest_hex
73
 
74
-
75
- # Example usage
76
- # palette = SkinTonePalette()
77
- # closest_color, closest_hex = palette.get_closest_color('path_to_image.jpg', 'path_to_mask.png')
78
- # print(f"The closest skin tone is: {closest_color} with hex code: {closest_hex}")
79
- # def dominant_colors(image, to_bw, n_clusters=2):
80
- # if to_bw:
81
- # data = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
82
- # data = cv2.cvtColor(data, cv2.COLOR_GRAY2BGR)
83
- # else:
84
- # data = image
85
- # data = np.reshape(data, (-1, 3))
86
- # data = data[np.all(data != 0, axis=1)]
87
- # data = np.float32(data)
88
-
89
- # criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
90
- # flags = cv2.KMEANS_RANDOM_CENTERS
91
- # compactness, labels, colors = cv2.kmeans(data, n_clusters, None, criteria, 10, flags)
92
- # labels, counts = np.unique(labels, return_counts=True)
93
-
94
- # order = (-counts).argsort()
95
- # colors = colors[order]
96
- # counts = counts[order]
97
-
98
- # percents = counts / counts.sum()
99
-
100
- # return colors, percents
101
-
102
- # DEFAULT_TONE_PALETTE = {
103
- # "color": [
104
- # "#373028",
105
- # "#422811",
106
- # "#513b2e",
107
- # "#6f503c",
108
- # "#81654f",
109
- # "#9d7a54",
110
- # "#bea07e",
111
- # "#e5c8a6",
112
- # "#e7c1b8",
113
- # "#f3dad6",
114
- # "#fbf2f3",
115
- # ],
116
- # # Refer to this paper:
117
- # # Leigh, A., & Susilo, T. (2009). Is voting skin-deep? Estimating the effect of candidate ballot photographs on election outcomes.
118
- # # Journal of Economic Psychology, 30(1), 61-70.
119
- # "bw": [
120
- # "#FFFFFF",
121
- # "#F0F0F0",
122
- # "#E0E0E0",
123
- # "#D0D0D0",
124
- # "#C0C0C0",
125
- # "#B0B0B0",
126
- # "#A0A0A0",
127
- # "#909090",
128
- # "#808080",
129
- # "#707070",
130
- # "#606060",
131
- # "#505050",
132
- # "#404040",
133
- # "#303030",
134
- # "#202020",
135
- # "#101010",
136
- # "#000000",
137
- # ],
138
- # }
139
-
140
- # DEFAULT_TONE_LABELS = {
141
- # "color": ["C" + alphabet_id(i) for i in range(len(DEFAULT_TONE_PALETTE["color"]))],
142
- # "bw": ["B" + alphabet_id(i) for i in range(len(DEFAULT_TONE_PALETTE["bw"]))],
143
- # }
144
- # def skin_tone(colors, percents, skin_tone_palette, tone_labels):
145
- # lab_tones = [convert_color(sRGBColor.new_from_rgb_hex(rgb), LabColor) for rgb in skin_tone_palette]
146
- # lab_colors = [convert_color(sRGBColor(rgb_r=r, rgb_g=g, rgb_b=b, is_upscaled=True), LabColor) for b, g, r in colors]
147
- # distances = [np.sum([delta_e_cie2000(c, label) * p for c, p in zip(lab_colors, percents)]) for label in lab_tones]
148
- # tone_id = np.argmin(distances)
149
- # distance: float = distances[tone_id]
150
- # tone_hex = skin_tone_palette[tone_id].upper()
151
- # tone_label = tone_labels[tone_id]
152
- # return tone_id, tone_hex, tone_label, distance
 
2
  import numpy as np
3
  from sklearn.cluster import KMeans
4
  from collections import Counter
5
+ from color_utils import calculate_color_distance_lab, rgb_to_lab
6
+ import cv2
7
 
8
  class SkinTonePalette:
9
  def __init__(self):
 
22
  }
23
 
24
  def get_dominant_colors(self, image_np, n_colors):
 
25
  pixels = image_np.reshape((-1, 3))
 
 
26
  kmeans = KMeans(n_clusters=n_colors)
27
  kmeans.fit(pixels)
 
 
28
  dominant_colors = kmeans.cluster_centers_
 
 
29
  counts = Counter(kmeans.labels_)
 
 
30
  dominant_colors = [dominant_colors[i] for i in counts.keys()]
 
31
  return dominant_colors
32
 
33
  def get_closest_color(self, image, mask, n_colors=3):
 
 
34
  image_np = np.array(image)
35
  mask_np = np.array(mask)
 
 
36
  if image_np.shape[:2] != mask_np.shape[:2]:
37
  raise ValueError("Image and mask must have the same dimensions")
38
 
 
39
  skin_pixels = image_np[mask_np > 0]
 
 
40
  dominant_colors = self.get_dominant_colors(skin_pixels, n_colors)
41
 
 
42
  closest_color = None
43
  closest_hex = None
44
  min_distance = float("inf")
45
  for dom_color in dominant_colors:
46
  for color_name, (color_value, color_hex) in self.palette.items():
47
  distance = calculate_color_distance_lab(dom_color, color_value)
 
48
  if distance < min_distance:
49
  min_distance = distance
50
  closest_color = color_name
 
52
 
53
  return closest_color, closest_hex
54
 
55
+ def calculate_ita(self, rgb_color):
56
+ lab_color = rgb_to_lab(rgb_color)
57
+ L = lab_color.lab_l
58
+ b = lab_color.lab_b
59
+ ita = np.arctan2(L - 50, b) * (180 / np.pi)
60
+ return ita
61
+
62
+ def is_within_vectorscope_skin_tone_line(self, rgb_color):
63
+ ycbcr_color = cv2.cvtColor(np.uint8([[rgb_color]]), cv2.COLOR_RGB2YCrCb)[0][0]
64
+ cb, cr = ycbcr_color[1], ycbcr_color[2]
65
+ return 80 <= cb <= 120 and 133 <= cr <= 173