HardikUppal commited on
Commit
ac07032
·
1 Parent(s): dbf6766

added histograms to aid visualisation

Browse files
Files changed (6) hide show
  1. .gitignore +1 -0
  2. app.py +180 -30
  3. main.py +225 -0
  4. src/image.py +28 -4
  5. src/segmentation_utils.py +176 -26
  6. src/skin_analyzer.py +91 -21
.gitignore CHANGED
@@ -5,3 +5,4 @@ venv/
5
  workspace/
6
  *.pyc
7
  temp.jpg
 
 
5
  workspace/
6
  *.pyc
7
  temp.jpg
8
+ .vscode/settings.json
app.py CHANGED
@@ -57,6 +57,15 @@ def process_image(
57
  if "filtered_skin_mask" in skin_analysis:
58
  filtered_skin_mask = skin_analysis["filtered_skin_mask"]
59
  del skin_analysis["filtered_skin_mask"]
 
 
 
 
 
 
 
 
 
60
  analysis_results["skin_analysis"] = skin_analysis
61
 
62
  # overlay_images.append(skin_analysis["overlay_image"])
@@ -76,33 +85,174 @@ def process_image(
76
  # Convert combined_overlay to PIL Image for display
77
  combined_overlay = Image.fromarray(overlay)
78
 
79
- return combined_overlay, analysis_results
80
-
81
-
82
- # Define Gradio interface
83
- iface = gr.Interface(
84
- fn=process_image,
85
- inputs=[
86
- gr.Image(type="numpy", label="Upload an Image"),
87
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Skin"),
88
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Skin"),
89
- gr.Slider(minimum=0, maximum=100, value=10, label="L% Min Tonality"),
90
- gr.Slider(minimum=0, maximum=100, value=90, label="L% Max Tonality"),
91
- gr.Slider(minimum=0, maximum=255, value=20, label="Chroma Threshold"),
92
- gr.Checkbox(label="Skin Analysis", value=True),
93
- gr.Checkbox(label="Eye Analysis", value=False),
94
- gr.Checkbox(label="Hair Analysis", value=False),
95
- ],
96
- outputs=[
97
- gr.Image(type="pil", label="Processed Image"),
98
- gr.JSON(label="Analysis Results"),
99
- ],
100
- allow_flagging="manual",
101
- flagging_dir="flagged",
102
- flagging_options=["Save", "Hard One"],
103
- title="Color Palette Analysis",
104
- description="Upload an image to analyze the skin, hair, and eye colors. Select the analyses you want to perform.",
105
- )
106
-
107
- # Launch the Gradio interface
108
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  if "filtered_skin_mask" in skin_analysis:
58
  filtered_skin_mask = skin_analysis["filtered_skin_mask"]
59
  del skin_analysis["filtered_skin_mask"]
60
+ elif "l_hist" in skin_analysis:
61
+ l_hist = skin_analysis["l_hist"]
62
+ del skin_analysis["l_hist"]
63
+ elif "tonality_l_hist" in skin_analysis:
64
+ tonality_l_hist = skin_analysis["tonality_l_hist"]
65
+ del skin_analysis["tonality_l_hist"]
66
+ elif "chroma_hist" in skin_analysis:
67
+ chroma_hist = skin_analysis["chroma_hist"]
68
+ del skin_analysis["chroma_hist"]
69
  analysis_results["skin_analysis"] = skin_analysis
70
 
71
  # overlay_images.append(skin_analysis["overlay_image"])
 
85
  # Convert combined_overlay to PIL Image for display
86
  combined_overlay = Image.fromarray(overlay)
87
 
88
+ return combined_overlay, analysis_results, l_hist, tonality_l_hist, chroma_hist
89
+
90
+
91
+ with gr.Blocks() as demo:
92
+ with gr.Row():
93
+ with gr.Column():
94
+ upload_image = gr.Image(type="numpy", label="Upload an Image")
95
+ with gr.Column():
96
+ l_min_slider = gr.Slider(
97
+ minimum=0, maximum=100, value=10, label="L(%) Min Skin"
98
+ )
99
+ l_hist_output = gr.Image(type="pil", label="L Histogram")
100
+ with gr.Column():
101
+ l_max_slider = gr.Slider(
102
+ minimum=0, maximum=100, value=90, label="L(%) Max Skin"
103
+ )
104
+ with gr.Column():
105
+ tonality_min_slider = gr.Slider(
106
+ minimum=0, maximum=100, value=50, label="L(%) Min Tonality"
107
+ )
108
+ tonality_hist_output = gr.Image(type="pil", label="Tonality L Histogram")
109
+ with gr.Column():
110
+ tonality_max_slider = gr.Slider(
111
+ minimum=0, maximum=100, value=70, label="L(%) Max Tonality"
112
+ )
113
+ with gr.Column():
114
+ chroma_slider = gr.Slider(
115
+ minimum=0, maximum=100, value=50, label="Chroma(%) Threshold"
116
+ )
117
+ chroma_hist_output = gr.Image(type="pil", label="Chroma Histogram")
118
+ with gr.Row():
119
+ skin_checkbox = gr.Checkbox(label="Skin Analysis", value=True)
120
+ eye_checkbox = gr.Checkbox(label="Eye Analysis", value=False)
121
+ hair_checkbox = gr.Checkbox(label="Hair Analysis", value=False)
122
+ analysis_results_output = gr.JSON(label="Analysis Results")
123
+ processed_image_output = gr.Image(type="pil", label="Processed Image")
124
+
125
+ gr.Interface(
126
+ fn=process_image,
127
+ inputs=[
128
+ upload_image,
129
+ l_min_slider,
130
+ l_max_slider,
131
+ tonality_min_slider,
132
+ tonality_max_slider,
133
+ chroma_slider,
134
+ skin_checkbox,
135
+ eye_checkbox,
136
+ hair_checkbox,
137
+ ],
138
+ outputs=[
139
+ processed_image_output,
140
+ l_hist_output,
141
+ tonality_hist_output,
142
+ chroma_hist_output,
143
+ analysis_results_output,
144
+ ],
145
+ )
146
+
147
+ # Set up change event triggers for the sliders
148
+ l_min_slider.change(
149
+ process_image,
150
+ inputs=[
151
+ upload_image,
152
+ l_min_slider,
153
+ l_max_slider,
154
+ tonality_min_slider,
155
+ tonality_max_slider,
156
+ chroma_slider,
157
+ skin_checkbox,
158
+ eye_checkbox,
159
+ hair_checkbox,
160
+ ],
161
+ outputs=[
162
+ processed_image_output,
163
+ l_hist_output,
164
+ tonality_hist_output,
165
+ chroma_hist_output,
166
+ analysis_results_output,
167
+ ],
168
+ )
169
+
170
+ l_max_slider.change(
171
+ process_image,
172
+ inputs=[
173
+ upload_image,
174
+ l_min_slider,
175
+ l_max_slider,
176
+ tonality_min_slider,
177
+ tonality_max_slider,
178
+ chroma_slider,
179
+ skin_checkbox,
180
+ eye_checkbox,
181
+ hair_checkbox,
182
+ ],
183
+ outputs=[
184
+ processed_image_output,
185
+ l_hist_output,
186
+ tonality_hist_output,
187
+ chroma_hist_output,
188
+ analysis_results_output,
189
+ ],
190
+ )
191
+
192
+ tonality_min_slider.change(
193
+ process_image,
194
+ inputs=[
195
+ upload_image,
196
+ l_min_slider,
197
+ l_max_slider,
198
+ tonality_min_slider,
199
+ tonality_max_slider,
200
+ chroma_slider,
201
+ skin_checkbox,
202
+ eye_checkbox,
203
+ hair_checkbox,
204
+ ],
205
+ outputs=[
206
+ processed_image_output,
207
+ l_hist_output,
208
+ tonality_hist_output,
209
+ chroma_hist_output,
210
+ analysis_results_output,
211
+ ],
212
+ )
213
+
214
+ tonality_max_slider.change(
215
+ process_image,
216
+ inputs=[
217
+ upload_image,
218
+ l_min_slider,
219
+ l_max_slider,
220
+ tonality_min_slider,
221
+ tonality_max_slider,
222
+ chroma_slider,
223
+ skin_checkbox,
224
+ eye_checkbox,
225
+ hair_checkbox,
226
+ ],
227
+ outputs=[
228
+ processed_image_output,
229
+ l_hist_output,
230
+ tonality_hist_output,
231
+ chroma_hist_output,
232
+ analysis_results_output,
233
+ ],
234
+ )
235
+
236
+ chroma_slider.change(
237
+ process_image,
238
+ inputs=[
239
+ upload_image,
240
+ l_min_slider,
241
+ l_max_slider,
242
+ tonality_min_slider,
243
+ tonality_max_slider,
244
+ chroma_slider,
245
+ skin_checkbox,
246
+ eye_checkbox,
247
+ hair_checkbox,
248
+ ],
249
+ outputs=[
250
+ processed_image_output,
251
+ l_hist_output,
252
+ tonality_hist_output,
253
+ chroma_hist_output,
254
+ analysis_results_output,
255
+ ],
256
+ )
257
+
258
+ demo.launch()
main.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import gradio as gr
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+ import pandas as pd
8
+ import colorspacious as cs
9
+ from src.skin_analyzer import (
10
+ analyze_skin_function,
11
+ categorize_chroma,
12
+ categorize_tonality,
13
+ categorize_undertones,
14
+ determine_season_with_tonality,
15
+ )
16
+ from src.image import ImageBundle
17
+ import matplotlib.pyplot as plt
18
+ from tqdm import tqdm
19
+
20
+ if __name__ == "__main__":
21
+ args = argparse.ArgumentParser()
22
+ args.add_argument("-i", "--input", type=str, default="inputs/")
23
+ args.add_argument("-ch", "--chroma_thresh", type=int, default=45)
24
+ args.add_argument("-lmin", "--l_min_tonality", type=int, default=33)
25
+ args.add_argument("-lmax", "--l_max_tonality", type=int, default=66)
26
+
27
+ args = args.parse_args()
28
+
29
+ # check if input is folder or a csv file
30
+
31
+ if args.input.endswith(".csv"):
32
+ # read csv file
33
+ file = pd.read_csv(args.input)
34
+ # column name Image,Tone,Season,Sub-Season
35
+ for index, row in tqdm(file.iterrows(), total=file.shape[0]):
36
+ if index in [26, 62, 61, 23, 22, 24, 13, 1, 2, 4, 6, 8, 10, 11]:
37
+ # if index in [1, 2, 4, 10]:
38
+ # get the image from url
39
+ image_url = row["Image"]
40
+ # get the tone from the csv
41
+ tone = row["Tone"]
42
+ # get the season from the csv
43
+ season = row["Season"]
44
+ # get the sub-season from the csv
45
+ sub_season = row["Sub-Season"]
46
+
47
+ image_bundle = ImageBundle(image_source=image_url)
48
+
49
+ # Detect faces and landmarks
50
+ face_data = image_bundle.detect_faces_and_landmarks()
51
+ landmarks = image_bundle.detect_face_landmarks()
52
+
53
+ # Perform segmentation
54
+ segmentation_maps = image_bundle.segment_image()
55
+
56
+ skin_mask = segmentation_maps["face_skin_mask"]
57
+ image = image_bundle.numpy_image()
58
+
59
+ # create histogram of Lchannel for skin mask for all images
60
+ skin_pixels = (
61
+ image[skin_mask > 0].reshape(-1, 3) / 255.0
62
+ ) # Normalize to [0, 1] range
63
+
64
+ # Convert skin pixels to LAB color space using colorspacious
65
+ lab_pixels = cs.cspace_convert(skin_pixels, "sRGB1", "CIELab")
66
+
67
+ # Compute L* percentiles
68
+ l_values = lab_pixels[:, 0]
69
+
70
+ l_min = np.percentile(l_values, 10)
71
+ l_max = np.percentile(l_values, 90)
72
+
73
+ # # Filter based on L* value
74
+ mask_l = (lab_pixels[:, 0] >= l_min) & (lab_pixels[:, 0] <= l_max)
75
+ filtered_lab_pixels = lab_pixels[mask_l]
76
+
77
+ filtered_l_values = filtered_lab_pixels[:, 0]
78
+ print(np.unique(filtered_l_values))
79
+ l_max_tonality = args.l_max_tonality
80
+ l_min_tonality = args.l_min_tonality
81
+ l_min_tonality_val = np.percentile(filtered_l_values, l_min_tonality)
82
+ l_max_tonality_val = np.percentile(filtered_l_values, l_max_tonality)
83
+ print(l_min_tonality_val, l_max_tonality_val)
84
+
85
+ # Update mask
86
+ filtered_mask = np.zeros_like(skin_mask, dtype=np.uint8)
87
+ mask_indices = np.where(skin_mask > 0)
88
+ filtered_mask[mask_indices[0][mask_l], mask_indices[1][mask_l]] = 255
89
+
90
+ overlay = image.copy()
91
+ overlay[filtered_mask > 0] = (0, 0, 255) # Red for skin
92
+
93
+ overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
94
+
95
+ # Convert combined_overlay to PIL Image for display
96
+ combined_overlay = Image.fromarray(overlay)
97
+
98
+ # Create a figure with two subplots: one for the overlay image and one for the histogram
99
+ fig, ax = plt.subplots(1, 4, figsize=(18, 6))
100
+
101
+ # # Plot the overlay image in the first subplot
102
+ # ax[0].imshow(image)
103
+ # ax[0].axis("off")
104
+ # ax[0].set_title("Original Image")
105
+
106
+ ax[0].imshow(combined_overlay)
107
+ ax[0].axis("off") # Hide the axis
108
+ ax[0].set_title("Overlay Image")
109
+ ax[0].text(
110
+ 0.05,
111
+ 0.95,
112
+ f"Season: {season}\nSub-Season: {sub_season}\nTone: {tone}",
113
+ transform=ax[0].transAxes,
114
+ horizontalalignment="left",
115
+ verticalalignment="top",
116
+ )
117
+
118
+ # Plot the histogram of filtered L channel in the third subplot
119
+ ax[3].hist(filtered_l_values, bins=100, color="blue", alpha=0.75)
120
+ ax[3].axvline(
121
+ l_min_tonality_val,
122
+ color="purple",
123
+ linestyle="--",
124
+ label="lower percentile",
125
+ )
126
+ ax[3].axvline(
127
+ l_max_tonality_val,
128
+ color="orange",
129
+ linestyle="--",
130
+ label="higher percentile",
131
+ )
132
+ ax[3].set_xlabel("L* Value")
133
+ ax[3].set_ylabel("Frequency")
134
+ ax[3].set_title("Histogram of Filtered L* Values in Skin Mask")
135
+ ax[3].legend()
136
+
137
+ # Plot the histogram of L channel in the second subplot
138
+ ax[2].hist(l_values, bins=100, color="blue", alpha=0.75)
139
+ ax[2].axvline(
140
+ l_min, color="red", linestyle="--", label="10th percentile"
141
+ )
142
+ ax[2].axvline(
143
+ l_max, color="green", linestyle="--", label="90th percentile"
144
+ )
145
+ ax[2].axvline(
146
+ l_min_tonality_val,
147
+ color="purple",
148
+ linestyle="--",
149
+ label="lower percentile",
150
+ )
151
+ ax[2].axvline(
152
+ l_max_tonality_val,
153
+ color="orange",
154
+ linestyle="--",
155
+ label="higher percentile",
156
+ )
157
+ ax[2].set_xlabel("L* Value")
158
+ ax[2].set_ylabel("Frequency")
159
+ ax[2].set_title("Histogram of L* Values in Skin Mask")
160
+ ax[2].legend()
161
+
162
+ # Plot chroma histogram in the subplot
163
+ a_values = filtered_lab_pixels[:, 1]
164
+ b_values = filtered_lab_pixels[:, 2]
165
+ chroma_values = np.sqrt(a_values**2 + b_values**2)
166
+ chroma_thresh = args.chroma_thresh
167
+ chroma_thersh_val = np.percentile(chroma_values, chroma_thresh)
168
+
169
+ ax[1].hist(chroma_values, bins=100, color="blue", alpha=0.75)
170
+ ax[1].set_xlabel("Chroma Value")
171
+ ax[1].set_ylabel("Frequency")
172
+ ax[1].set_title("Histogram of Chroma Values in Skin Mask")
173
+ ax[1].axvline(
174
+ chroma_thersh_val,
175
+ color="red",
176
+ linestyle="--",
177
+ label="Threshold Value",
178
+ )
179
+ chroma_counts, predominant_chroma, chroma = categorize_chroma(
180
+ lab_pixels, chroma_thresh
181
+ )
182
+ tonality_counts, predominant_tonality, tonalities = categorize_tonality(
183
+ filtered_l_values, l_min_tonality, l_max_tonality
184
+ )
185
+ undertone_counts, predominant_undertone, undertones = (
186
+ categorize_undertones(filtered_lab_pixels)
187
+ )
188
+
189
+ # add text on plot for predominant chroma
190
+ ax[1].text(
191
+ 0.05,
192
+ 0.95,
193
+ f"Predominant Chroma: {predominant_chroma},\n undertone: {predominant_undertone},\n tonality: {predominant_tonality}",
194
+ transform=ax[1].transAxes,
195
+ horizontalalignment="left",
196
+ verticalalignment="top",
197
+ )
198
+
199
+ # Save the figure in the workspace
200
+ os.makedirs(
201
+ f"workspace/all_hist-{chroma_thresh}-{l_min_tonality}-{l_max_tonality}",
202
+ exist_ok=True,
203
+ )
204
+ plt.savefig(
205
+ f"workspace/all_hist-{chroma_thresh}-{l_min_tonality}-{l_max_tonality}/{index}-{season}.png"
206
+ )
207
+ plt.close()
208
+
209
+ # skin_analysis = analyze_skin_function(
210
+ # image,
211
+ # skin_mask,
212
+ # 10,
213
+ # 90,
214
+ # l_min_tonality,
215
+ # l_max_tonality,
216
+ # chroma_thresh,
217
+ # )
218
+ season_counts, predominant_season, seasons = (
219
+ determine_season_with_tonality(undertones, chroma, tonalities)
220
+ )
221
+ print(
222
+ f"Season: {season_counts},\n Chroma: {chroma_counts},\n Tonality: {tonality_counts},\n Undertone: {undertone_counts}"
223
+ )
224
+
225
+ input("Press enter to continue")
src/image.py CHANGED
@@ -47,6 +47,8 @@ class ImageBundle:
47
 
48
  self._handle_color_profile()
49
  self._extract_exif_data()
 
 
50
  self.is_black_and_white = self._is_black_and_white()
51
  self.basename, self.ext = extract_filename_and_extension(self.image_source)
52
  if self.is_black_and_white:
@@ -93,14 +95,14 @@ class ImageBundle:
93
  """
94
  Handle the color profile of the image if mentioned.
95
  """
96
- if "icc_profile" in self.image.info:
97
- icc_profile = self.image.info.get("icc_profile")
98
  if icc_profile:
99
  io = BytesIO(icc_profile)
100
  src_profile = ImageCms.ImageCmsProfile(io)
101
  dst_profile = ImageCms.createProfile("sRGB")
102
  self.image = ImageCms.profileToProfile(
103
- self.image, src_profile, dst_profile
104
  )
105
 
106
  def _extract_exif_data(self):
@@ -108,12 +110,33 @@ class ImageBundle:
108
  Extract EXIF data from the image.
109
  """
110
  if hasattr(self.image, "_getexif"):
111
- exif_info = self.image._getexif()
112
  if exif_info is not None:
113
  for tag, value in exif_info.items():
114
  decoded_tag = ExifTags.TAGS.get(tag, tag)
115
  self.exif_data[decoded_tag] = value
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  def _is_black_and_white(self):
118
  """
119
  Check if the image is black and white even if it has 3 channels.
@@ -160,6 +183,7 @@ class ImageBundle:
160
 
161
  # Detect face landmarks and create masks for individual features
162
  landmarks = self.detect_face_landmarks()
 
163
  feature_masks = create_feature_masks(image_np, landmarks)
164
 
165
  # Subtract feature masks from face skin mask
 
47
 
48
  self._handle_color_profile()
49
  self._extract_exif_data()
50
+ self._rotate_image_if_needed()
51
+
52
  self.is_black_and_white = self._is_black_and_white()
53
  self.basename, self.ext = extract_filename_and_extension(self.image_source)
54
  if self.is_black_and_white:
 
95
  """
96
  Handle the color profile of the image if mentioned.
97
  """
98
+ if "icc_profile" in self.image.info: # type: ignore
99
+ icc_profile = self.image.info.get("icc_profile") # type: ignore
100
  if icc_profile:
101
  io = BytesIO(icc_profile)
102
  src_profile = ImageCms.ImageCmsProfile(io)
103
  dst_profile = ImageCms.createProfile("sRGB")
104
  self.image = ImageCms.profileToProfile(
105
+ self.image, src_profile, dst_profile # type: ignore
106
  )
107
 
108
  def _extract_exif_data(self):
 
110
  Extract EXIF data from the image.
111
  """
112
  if hasattr(self.image, "_getexif"):
113
+ exif_info = self.image._getexif() # type: ignore
114
  if exif_info is not None:
115
  for tag, value in exif_info.items():
116
  decoded_tag = ExifTags.TAGS.get(tag, tag)
117
  self.exif_data[decoded_tag] = value
118
 
119
+ def _rotate_image_if_needed(self):
120
+ """
121
+ Rotate the image based on EXIF orientation information.
122
+ """
123
+ if not self.image.info:
124
+ return
125
+
126
+ for orientation in ExifTags.TAGS.keys():
127
+ if ExifTags.TAGS[orientation] == "Orientation":
128
+ break
129
+
130
+ exif = dict(self.image.info.items())
131
+ orientation = exif.get(orientation)
132
+
133
+ if orientation == 3:
134
+ self.image = self.image.rotate(180, expand=True)
135
+ elif orientation == 6:
136
+ self.image = self.image.rotate(270, expand=True)
137
+ elif orientation == 8:
138
+ self.image = self.image.rotate(90, expand=True)
139
+
140
  def _is_black_and_white(self):
141
  """
142
  Check if the image is black and white even if it has 3 channels.
 
183
 
184
  # Detect face landmarks and create masks for individual features
185
  landmarks = self.detect_face_landmarks()
186
+
187
  feature_masks = create_feature_masks(image_np, landmarks)
188
 
189
  # Subtract feature masks from face skin mask
src/segmentation_utils.py CHANGED
@@ -1,13 +1,21 @@
1
-
2
  from mediapipe.tasks import python
3
  from mediapipe.tasks.python import vision
4
  import mediapipe as mp
5
  import cv2
6
  import numpy as np
 
 
 
7
 
 
 
 
 
 
8
  # Initialize mediapipe solutions
9
- mp_face_detection = mp.solutions.face_detection
10
- mp_face_mesh = mp.solutions.face_mesh
 
11
 
12
  def detect_faces_and_landmarks(image: np.ndarray):
13
  """
@@ -15,24 +23,29 @@ def detect_faces_and_landmarks(image: np.ndarray):
15
  :param image: Input image as a numpy array.
16
  :return: List of dictionaries with face and landmark information.
17
  """
18
- with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection:
 
 
19
  results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
20
  face_data = []
21
  if results.detections:
22
  for detection in results.detections:
23
  bboxC = detection.location_data.relative_bounding_box
24
  h, w, c = image.shape
25
- bbox = int(bboxC.xmin * w), int(bboxC.ymin * h), \
26
- int(bboxC.width * w), int(bboxC.height * h)
 
 
 
 
27
  landmarks = detection.location_data.relative_keypoints
28
- face_data.append({
29
- "bbox": bbox,
30
- "landmarks": landmarks
31
- })
32
  return face_data
33
 
34
 
35
- def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin", "body_skin", "hair"]):
 
 
36
  """
37
  Segment image using MediaPipe Multi-Class Selfie Segmentation.
38
  :param image: Input image as a numpy array.
@@ -59,27 +72,33 @@ def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin",
59
  masks = {
60
  "face_skin_mask": np.zeros((h, w), dtype=np.uint8),
61
  "hair_mask": np.zeros((h, w), dtype=np.uint8),
62
- "body_skin_mask": np.zeros((h, w), dtype=np.uint8)
63
  }
64
 
65
  # Define class labels based on MediaPipe segmentation (example, may need adjustment)
66
  face_skin_class = 3
67
  hair_class = 1
68
  body_skin_class = 2
69
-
70
  masks["face_skin_mask"][category_mask == face_skin_class] = 255
71
  masks["hair_mask"][category_mask == hair_class] = 255
72
  masks["body_skin_mask"][category_mask == body_skin_class] = 255
73
-
74
  return masks
75
 
 
76
  def detect_face_landmarks(image: np.ndarray):
77
  """
78
  Detect face landmarks using MediaPipe Face Mesh.
79
  :param image: Input image as a numpy array.
80
  :return: Dictionary with landmarks for iris, lips, eyebrows, and eyes.
81
  """
82
- with mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5) as face_mesh:
 
 
 
 
 
83
  results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
84
  face_landmarks = {
85
  "left_iris": [],
@@ -88,7 +107,7 @@ def detect_face_landmarks(image: np.ndarray):
88
  "left_eyebrow": [],
89
  "right_eyebrow": [],
90
  "left_eye": [],
91
- "right_eye": []
92
  }
93
  if results.multi_face_landmarks:
94
  for face_landmarks_data in results.multi_face_landmarks:
@@ -101,7 +120,28 @@ def detect_face_landmarks(image: np.ndarray):
101
  landmark = face_landmarks_data.landmark[i]
102
  face_landmarks["right_iris"].append((landmark.x, landmark.y))
103
  # Outer lips landmarks
104
- for i in [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 0, 409, 270, 269, 267, 37,39, 40, 185]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  landmark = face_landmarks_data.landmark[i]
106
  face_landmarks["lips"].append((landmark.x, landmark.y))
107
  # Left eyebrow landmarks
@@ -113,15 +153,50 @@ def detect_face_landmarks(image: np.ndarray):
113
  landmark = face_landmarks_data.landmark[i]
114
  face_landmarks["right_eyebrow"].append((landmark.x, landmark.y))
115
  # Left eye landmarks
116
- for i in [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  landmark = face_landmarks_data.landmark[i]
118
  face_landmarks["left_eye"].append((landmark.x, landmark.y))
119
  # Right eye landmarks
120
- for i in [463, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  landmark = face_landmarks_data.landmark[i]
122
  face_landmarks["right_eye"].append((landmark.x, landmark.y))
123
  return face_landmarks
124
 
 
125
  def create_feature_masks(image: np.ndarray, landmarks: dict):
126
  """
127
  Create individual masks for facial features based on landmarks.
@@ -137,15 +212,70 @@ def create_feature_masks(image: np.ndarray, landmarks: dict):
137
  "left_eye_mask": np.zeros((h, w), dtype=np.uint8),
138
  "right_eye_mask": np.zeros((h, w), dtype=np.uint8),
139
  "left_iris_mask": np.zeros((h, w), dtype=np.uint8),
140
- "right_iris_mask": np.zeros((h, w), dtype=np.uint8)
141
  }
142
 
143
  # Define the order of the points to form polygons correctly
144
- lips_order = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 0, 409, 270, 269, 267, 37,39, 40, 185]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  left_eyebrow_order = [70, 63, 105, 66, 107]
146
  right_eyebrow_order = [336, 296, 334, 293, 300]
147
- left_eye_order = [33, 246, 161, 160, 159, 158, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7]
148
- right_eye_order = [463, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  left_iris_order = [468, 469, 470, 471, 472]
150
  right_iris_order = [473, 474, 475, 476, 477]
151
 
@@ -156,16 +286,36 @@ def create_feature_masks(image: np.ndarray, landmarks: dict):
156
  "left_eye": left_eye_order,
157
  "right_eye": right_eye_order,
158
  "left_iris": left_iris_order,
159
- "right_iris": right_iris_order
160
  }
161
 
162
  for feature, order in orders.items():
163
- points = np.array([(int(landmarks[feature][i][0] * w), int(landmarks[feature][i][1] * h)) for i in range(len(order))], dtype=np.int32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  if len(points) > 0:
165
  cv2.fillPoly(masks[f"{feature}_mask"], [points], 255)
166
 
167
  return masks
168
 
 
169
  if __name__ == "__main__":
170
  # Test the face detection and segmentation
171
  image = cv2.imread("inputs/vanika.png")
@@ -183,4 +333,4 @@ if __name__ == "__main__":
183
  if "iris_mask" in feature:
184
  cv2.imwrite(f"outputs/{feature}.png", feature_mask)
185
  mask = cv2.subtract(mask, feature_mask)
186
- cv2.imwrite(f"outputs/{key}.png", mask)
 
 
1
  from mediapipe.tasks import python
2
  from mediapipe.tasks.python import vision
3
  import mediapipe as mp
4
  import cv2
5
  import numpy as np
6
+ import warnings
7
+ import os
8
+ import logging
9
 
10
+ # Suppress INFO and WARNING logs from MediaPipe
11
+ logging.getLogger("mediapipe").setLevel(logging.ERROR)
12
+ # Suppress INFO and WARNING logs
13
+ os.environ["GLOG_minloglevel"] = "2" # 2 means only ERROR and FATAL logs
14
+ os.environ["GLOG_logtostderr"] = "1"
15
  # Initialize mediapipe solutions
16
+ mp_face_detection = mp.solutions.face_detection # type: ignore
17
+ mp_face_mesh = mp.solutions.face_mesh # type: ignore
18
+
19
 
20
  def detect_faces_and_landmarks(image: np.ndarray):
21
  """
 
23
  :param image: Input image as a numpy array.
24
  :return: List of dictionaries with face and landmark information.
25
  """
26
+ with mp_face_detection.FaceDetection(
27
+ model_selection=1, min_detection_confidence=0.5
28
+ ) as face_detection:
29
  results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
30
  face_data = []
31
  if results.detections:
32
  for detection in results.detections:
33
  bboxC = detection.location_data.relative_bounding_box
34
  h, w, c = image.shape
35
+ bbox = (
36
+ int(bboxC.xmin * w),
37
+ int(bboxC.ymin * h),
38
+ int(bboxC.width * w),
39
+ int(bboxC.height * h),
40
+ )
41
  landmarks = detection.location_data.relative_keypoints
42
+ face_data.append({"bbox": bbox, "landmarks": landmarks})
 
 
 
43
  return face_data
44
 
45
 
46
+ def mediapipe_selfie_segmentor(
47
+ image: np.ndarray, segment: list = ["face_skin", "body_skin", "hair"]
48
+ ):
49
  """
50
  Segment image using MediaPipe Multi-Class Selfie Segmentation.
51
  :param image: Input image as a numpy array.
 
72
  masks = {
73
  "face_skin_mask": np.zeros((h, w), dtype=np.uint8),
74
  "hair_mask": np.zeros((h, w), dtype=np.uint8),
75
+ "body_skin_mask": np.zeros((h, w), dtype=np.uint8),
76
  }
77
 
78
  # Define class labels based on MediaPipe segmentation (example, may need adjustment)
79
  face_skin_class = 3
80
  hair_class = 1
81
  body_skin_class = 2
82
+
83
  masks["face_skin_mask"][category_mask == face_skin_class] = 255
84
  masks["hair_mask"][category_mask == hair_class] = 255
85
  masks["body_skin_mask"][category_mask == body_skin_class] = 255
86
+
87
  return masks
88
 
89
+
90
  def detect_face_landmarks(image: np.ndarray):
91
  """
92
  Detect face landmarks using MediaPipe Face Mesh.
93
  :param image: Input image as a numpy array.
94
  :return: Dictionary with landmarks for iris, lips, eyebrows, and eyes.
95
  """
96
+ with mp_face_mesh.FaceMesh(
97
+ static_image_mode=True,
98
+ max_num_faces=1,
99
+ refine_landmarks=True,
100
+ min_detection_confidence=0.5,
101
+ ) as face_mesh:
102
  results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
103
  face_landmarks = {
104
  "left_iris": [],
 
107
  "left_eyebrow": [],
108
  "right_eyebrow": [],
109
  "left_eye": [],
110
+ "right_eye": [],
111
  }
112
  if results.multi_face_landmarks:
113
  for face_landmarks_data in results.multi_face_landmarks:
 
120
  landmark = face_landmarks_data.landmark[i]
121
  face_landmarks["right_iris"].append((landmark.x, landmark.y))
122
  # Outer lips landmarks
123
+ for i in [
124
+ 61,
125
+ 146,
126
+ 91,
127
+ 181,
128
+ 84,
129
+ 17,
130
+ 314,
131
+ 405,
132
+ 321,
133
+ 375,
134
+ 291,
135
+ 0,
136
+ 409,
137
+ 270,
138
+ 269,
139
+ 267,
140
+ 37,
141
+ 39,
142
+ 40,
143
+ 185,
144
+ ]:
145
  landmark = face_landmarks_data.landmark[i]
146
  face_landmarks["lips"].append((landmark.x, landmark.y))
147
  # Left eyebrow landmarks
 
153
  landmark = face_landmarks_data.landmark[i]
154
  face_landmarks["right_eyebrow"].append((landmark.x, landmark.y))
155
  # Left eye landmarks
156
+ for i in [
157
+ 33,
158
+ 246,
159
+ 161,
160
+ 160,
161
+ 159,
162
+ 158,
163
+ 157,
164
+ 173,
165
+ 133,
166
+ 155,
167
+ 154,
168
+ 153,
169
+ 145,
170
+ 144,
171
+ 163,
172
+ 7,
173
+ ]:
174
  landmark = face_landmarks_data.landmark[i]
175
  face_landmarks["left_eye"].append((landmark.x, landmark.y))
176
  # Right eye landmarks
177
+ for i in [
178
+ 463,
179
+ 398,
180
+ 384,
181
+ 385,
182
+ 386,
183
+ 387,
184
+ 388,
185
+ 466,
186
+ 263,
187
+ 249,
188
+ 390,
189
+ 373,
190
+ 374,
191
+ 380,
192
+ 381,
193
+ 382,
194
+ ]:
195
  landmark = face_landmarks_data.landmark[i]
196
  face_landmarks["right_eye"].append((landmark.x, landmark.y))
197
  return face_landmarks
198
 
199
+
200
  def create_feature_masks(image: np.ndarray, landmarks: dict):
201
  """
202
  Create individual masks for facial features based on landmarks.
 
212
  "left_eye_mask": np.zeros((h, w), dtype=np.uint8),
213
  "right_eye_mask": np.zeros((h, w), dtype=np.uint8),
214
  "left_iris_mask": np.zeros((h, w), dtype=np.uint8),
215
+ "right_iris_mask": np.zeros((h, w), dtype=np.uint8),
216
  }
217
 
218
  # Define the order of the points to form polygons correctly
219
+ lips_order = [
220
+ 61,
221
+ 146,
222
+ 91,
223
+ 181,
224
+ 84,
225
+ 17,
226
+ 314,
227
+ 405,
228
+ 321,
229
+ 375,
230
+ 291,
231
+ 0,
232
+ 409,
233
+ 270,
234
+ 269,
235
+ 267,
236
+ 37,
237
+ 39,
238
+ 40,
239
+ 185,
240
+ ]
241
  left_eyebrow_order = [70, 63, 105, 66, 107]
242
  right_eyebrow_order = [336, 296, 334, 293, 300]
243
+ left_eye_order = [
244
+ 33,
245
+ 246,
246
+ 161,
247
+ 160,
248
+ 159,
249
+ 158,
250
+ 157,
251
+ 173,
252
+ 133,
253
+ 155,
254
+ 154,
255
+ 153,
256
+ 145,
257
+ 144,
258
+ 163,
259
+ 7,
260
+ ]
261
+ right_eye_order = [
262
+ 463,
263
+ 398,
264
+ 384,
265
+ 385,
266
+ 386,
267
+ 387,
268
+ 388,
269
+ 466,
270
+ 263,
271
+ 249,
272
+ 390,
273
+ 373,
274
+ 374,
275
+ 380,
276
+ 381,
277
+ 382,
278
+ ]
279
  left_iris_order = [468, 469, 470, 471, 472]
280
  right_iris_order = [473, 474, 475, 476, 477]
281
 
 
286
  "left_eye": left_eye_order,
287
  "right_eye": right_eye_order,
288
  "left_iris": left_iris_order,
289
+ "right_iris": right_iris_order,
290
  }
291
 
292
  for feature, order in orders.items():
293
+ points = []
294
+
295
+ for i in range(len(order)):
296
+ try:
297
+ point = (
298
+ int(landmarks[feature][i][0] * w),
299
+ int(landmarks[feature][i][1] * h),
300
+ )
301
+ points.append(point)
302
+ except KeyError:
303
+ warnings.warn(
304
+ f"Feature '{feature}' at index {i} is not present in landmarks. Skipping this point."
305
+ )
306
+ except IndexError:
307
+ warnings.warn(
308
+ f"Index {i} is out of range for feature '{feature}'. Skipping this point."
309
+ )
310
+
311
+ points = np.array(points, dtype=np.int32)
312
+
313
  if len(points) > 0:
314
  cv2.fillPoly(masks[f"{feature}_mask"], [points], 255)
315
 
316
  return masks
317
 
318
+
319
  if __name__ == "__main__":
320
  # Test the face detection and segmentation
321
  image = cv2.imread("inputs/vanika.png")
 
333
  if "iris_mask" in feature:
334
  cv2.imwrite(f"outputs/{feature}.png", feature_mask)
335
  mask = cv2.subtract(mask, feature_mask)
336
+ cv2.imwrite(f"outputs/{key}.png", mask)
src/skin_analyzer.py CHANGED
@@ -1,4 +1,6 @@
 
1
  import cv2
 
2
  import numpy as np
3
  from colormath.color_objects import LabColor, sRGBColor
4
  from colormath.color_conversions import convert_color
@@ -38,7 +40,26 @@ def sample_skin_pixels(image, mask, l_min_percentile=10, l_max_percentile=90):
38
  mask_indices = np.where(mask > 0)
39
  filtered_mask[mask_indices[0][mask_l], mask_indices[1][mask_l]] = 255
40
 
41
- return filtered_lab_pixels, filtered_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
 
44
  def rgb_to_lab_and_save(image, output_dir="workspace"):
@@ -269,10 +290,41 @@ def categorize_tonality(L_values, l_min_tonality, l_max_tonality):
269
  tonality_counts = {"Light": light_count, "True": true_count, "Deep": deep_count}
270
  predominant_tonality = max(tonality_counts, key=tonality_counts.get)
271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  return (
273
  tonality_counts,
274
  predominant_tonality,
275
  tonality,
 
276
  )
277
 
278
 
@@ -299,11 +351,18 @@ def categorize_chroma(lab_pixels, chroma_thresh):
299
  :return: Chroma category (Bright, Muted).
300
  """
301
  chroma = []
 
302
  chroma_counts = {"Bright": 0, "Muted": 0}
 
303
  for pixel in lab_pixels:
304
  a = pixel[1]
305
  b = pixel[2]
306
- distance = np.sqrt(a**2 + b**2)
 
 
 
 
 
307
  if distance > chroma_thresh:
308
  chroma.append("Bright")
309
  chroma_counts["Bright"] += 1
@@ -312,7 +371,29 @@ def categorize_chroma(lab_pixels, chroma_thresh):
312
  chroma_counts["Muted"] += 1
313
 
314
  predominant_chroma = max(chroma_counts, key=chroma_counts.get)
315
- return chroma_counts, predominant_chroma, chroma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
 
318
  def categorize_undertones(cluster_centers):
@@ -370,7 +451,7 @@ def analyze_skin_function(
370
  """
371
 
372
  # sapmle skin pixels
373
- lab_pixels, filtered_skin_mask = sample_skin_pixels(
374
  image, skin_mask, l_min_skin, l_max_skin
375
  )
376
 
@@ -387,12 +468,12 @@ def analyze_skin_function(
387
  predominant_overtone,
388
  overtone,
389
  ) = categorize_overtone(ITA_values)
390
- tonality_counts, predominant_tonality, tonalities = categorize_tonality(
391
- L_values, l_min_tonality, l_max_tonality
392
  )
393
 
394
  # calculate chroma
395
- chroma_counts, predominant_chroma, chroma = categorize_chroma(
396
  lab_pixels, chroma_thresh
397
  )
398
 
@@ -435,20 +516,6 @@ def analyze_skin_function(
435
  # else:
436
  # neutral_mask[cluster_indices[0][cluster_mask], cluster_indices[1][cluster_mask]] = 255
437
 
438
- # # Create overlays
439
- # overlay = image.copy()
440
- # overlay[filtered_skin_mask > 0] = (0, 0, 255) # Red for skin
441
- # # overlay[cool_mask > 0] = (255, 0, 0) # Blue for cool
442
- # # overlay[neutral_mask > 0] = (0, 255, 0) # Green for neutral
443
- # overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
444
- # # resize overlay to longest side 512
445
- # h, w, _ = overlay.shape
446
- # if h > w:
447
- # scale = 512 / h
448
- # else:
449
- # scale = 512 / w
450
- # overlay = cv2.resize(overlay, (int(w * scale), int(h * scale)))
451
-
452
  return {
453
  "undertone": predominant_undertone,
454
  "overtone": predominant_overtone,
@@ -464,4 +531,7 @@ def analyze_skin_function(
464
  "overtone_counts": overtone_counts,
465
  "tonality_counts": tonality_counts,
466
  "season_counts": season_counts,
 
 
 
467
  }
 
1
+ import io
2
  import cv2
3
+ from matplotlib import pyplot as plt
4
  import numpy as np
5
  from colormath.color_objects import LabColor, sRGBColor
6
  from colormath.color_conversions import convert_color
 
40
  mask_indices = np.where(mask > 0)
41
  filtered_mask[mask_indices[0][mask_l], mask_indices[1][mask_l]] = 255
42
 
43
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6))
44
+ # Plot the histogram of L channel in the second subplot
45
+ ax[0].hist(l_values, bins=100, color="blue", alpha=0.75)
46
+ ax[0].axvline(l_min, color="red", linestyle="--", label="10th percentile")
47
+ ax[0].axvline(l_max, color="green", linestyle="--", label="90th percentile")
48
+ ax[0].set_xlabel("L* Value")
49
+ ax[0].set_ylabel("Frequency")
50
+ ax[0].set_title("Histogram of L* Values in Skin Mask")
51
+ ax[0].legend()
52
+
53
+ # Save the plot to a file-like object
54
+ buf = io.BytesIO()
55
+ plt.savefig(buf, format="png")
56
+ plt.close(fig)
57
+ buf.seek(0)
58
+ # Convert the buffer to a PIL Image and then to a NumPy array
59
+ image = Image.open(buf)
60
+ l_hist = np.array(image)
61
+
62
+ return filtered_lab_pixels, filtered_mask, l_hist
63
 
64
 
65
  def rgb_to_lab_and_save(image, output_dir="workspace"):
 
290
  tonality_counts = {"Light": light_count, "True": true_count, "Deep": deep_count}
291
  predominant_tonality = max(tonality_counts, key=tonality_counts.get)
292
 
293
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6))
294
+
295
+ # Plot the histogram of filtered L channel in the third subplot
296
+ ax[0].hist(L_values, bins=100, color="blue", alpha=0.75)
297
+ ax[0].axvline(
298
+ l_abs_min,
299
+ color="purple",
300
+ linestyle="--",
301
+ label="lower percentile",
302
+ )
303
+ ax[0].axvline(
304
+ l_abs_max,
305
+ color="orange",
306
+ linestyle="--",
307
+ label="higher percentile",
308
+ )
309
+ ax[0].set_xlabel("L* Value")
310
+ ax[0].set_ylabel("Frequency")
311
+ ax[0].set_title("Histogram of Filtered L* Values in Skin Mask")
312
+ ax[0].legend()
313
+
314
+ # Save the plot to a file-like object
315
+ buf = io.BytesIO()
316
+ plt.savefig(buf, format="png")
317
+ plt.close(fig)
318
+ buf.seek(0)
319
+ # Convert the buffer to a PIL Image and then to a NumPy array
320
+ image = Image.open(buf)
321
+ tonality_l_hist = np.array(image)
322
+
323
  return (
324
  tonality_counts,
325
  predominant_tonality,
326
  tonality,
327
+ tonality_l_hist,
328
  )
329
 
330
 
 
351
  :return: Chroma category (Bright, Muted).
352
  """
353
  chroma = []
354
+ distances = []
355
  chroma_counts = {"Bright": 0, "Muted": 0}
356
+
357
  for pixel in lab_pixels:
358
  a = pixel[1]
359
  b = pixel[2]
360
+ distances.append(np.sqrt(a**2 + b**2))
361
+
362
+ # get the 50th percentile of the distance
363
+ chroma_thresh = np.percentile(distances, chroma_thresh)
364
+ print(f"chroma_thresh: {chroma_thresh}")
365
+ for distance in distances:
366
  if distance > chroma_thresh:
367
  chroma.append("Bright")
368
  chroma_counts["Bright"] += 1
 
371
  chroma_counts["Muted"] += 1
372
 
373
  predominant_chroma = max(chroma_counts, key=chroma_counts.get)
374
+
375
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6))
376
+ ax[0].hist(distances, bins=100, color="blue", alpha=0.75)
377
+ ax[0].set_xlabel("Chroma Value")
378
+ ax[0].set_ylabel("Frequency")
379
+ ax[0].set_title("Histogram of Chroma Values in Skin Mask")
380
+ ax[0].axvline(
381
+ chroma_thresh,
382
+ color="red",
383
+ linestyle="--",
384
+ label="Threshold Value",
385
+ )
386
+
387
+ # Save the plot to a file-like object
388
+ buf = io.BytesIO()
389
+ plt.savefig(buf, format="png")
390
+ plt.close(fig)
391
+ buf.seek(0)
392
+ # Convert the buffer to a PIL Image and then to a NumPy array
393
+ image = Image.open(buf)
394
+ chorma_hist = np.array(image)
395
+
396
+ return chroma_counts, predominant_chroma, chroma, chorma_hist
397
 
398
 
399
  def categorize_undertones(cluster_centers):
 
451
  """
452
 
453
  # sapmle skin pixels
454
+ lab_pixels, filtered_skin_mask, l_hist = sample_skin_pixels(
455
  image, skin_mask, l_min_skin, l_max_skin
456
  )
457
 
 
468
  predominant_overtone,
469
  overtone,
470
  ) = categorize_overtone(ITA_values)
471
+ tonality_counts, predominant_tonality, tonalities, tonality_l_hist = (
472
+ categorize_tonality(L_values, l_min_tonality, l_max_tonality)
473
  )
474
 
475
  # calculate chroma
476
+ chroma_counts, predominant_chroma, chroma, chorma_hist = categorize_chroma(
477
  lab_pixels, chroma_thresh
478
  )
479
 
 
516
  # else:
517
  # neutral_mask[cluster_indices[0][cluster_mask], cluster_indices[1][cluster_mask]] = 255
518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519
  return {
520
  "undertone": predominant_undertone,
521
  "overtone": predominant_overtone,
 
531
  "overtone_counts": overtone_counts,
532
  "tonality_counts": tonality_counts,
533
  "season_counts": season_counts,
534
+ "l_hist": l_hist,
535
+ "tonality_l_hist": tonality_l_hist,
536
+ "chorma_hist": chorma_hist,
537
  }