HardikUppal commited on
Commit
d22f22b
·
1 Parent(s): ac07032

fixes to histograms

Browse files
Files changed (4) hide show
  1. .vscode/settings.json +1 -1
  2. app.py +151 -145
  3. requirements.txt +1 -0
  4. src/skin_analyzer.py +52 -52
.vscode/settings.json CHANGED
@@ -1,3 +1,3 @@
1
  {
2
- "python.analysis.typeCheckingMode": "basic"
3
  }
 
1
  {
2
+ "python.analysis.typeCheckingMode": "off"
3
  }
app.py CHANGED
@@ -5,6 +5,50 @@ from PIL import Image
5
 
6
  from src.skin_analyzer import analyze_skin_function
7
  from src.image import ImageBundle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  def process_image(
@@ -57,16 +101,30 @@ def process_image(
57
  if "filtered_skin_mask" in skin_analysis:
58
  filtered_skin_mask = skin_analysis["filtered_skin_mask"]
59
  del skin_analysis["filtered_skin_mask"]
60
- elif "l_hist" in skin_analysis:
61
  l_hist = skin_analysis["l_hist"]
62
  del skin_analysis["l_hist"]
63
- elif "tonality_l_hist" in skin_analysis:
64
  tonality_l_hist = skin_analysis["tonality_l_hist"]
65
  del skin_analysis["tonality_l_hist"]
66
- elif "chroma_hist" in skin_analysis:
67
  chroma_hist = skin_analysis["chroma_hist"]
68
  del skin_analysis["chroma_hist"]
69
- analysis_results["skin_analysis"] = skin_analysis
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  # overlay_images.append(skin_analysis["overlay_image"])
72
 
@@ -78,17 +136,44 @@ def process_image(
78
  # Combine overlay images
79
  overlay = image.copy()
80
  overlay[filtered_skin_mask > 0] = (0, 0, 255) # Red for skin
81
- # overlay[cool_mask > 0] = (255, 0, 0) # Blue for cool
82
- # overlay[neutral_mask > 0] = (0, 255, 0) # Green for neutral
83
  overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
84
 
85
  # Convert combined_overlay to PIL Image for display
86
  combined_overlay = Image.fromarray(overlay)
 
 
 
 
 
 
 
 
 
 
87
 
88
- return combined_overlay, analysis_results, l_hist, tonality_l_hist, chroma_hist
 
 
 
 
 
 
 
 
 
 
 
89
 
90
 
91
  with gr.Blocks() as demo:
 
 
 
 
 
 
 
92
  with gr.Row():
93
  with gr.Column():
94
  upload_image = gr.Image(type="numpy", label="Upload an Image")
@@ -96,163 +181,84 @@ with gr.Blocks() as demo:
96
  l_min_slider = gr.Slider(
97
  minimum=0, maximum=100, value=10, label="L(%) Min Skin"
98
  )
99
- l_hist_output = gr.Image(type="pil", label="L Histogram")
100
- with gr.Column():
101
  l_max_slider = gr.Slider(
102
  minimum=0, maximum=100, value=90, label="L(%) Max Skin"
103
  )
 
 
 
 
 
 
 
 
 
104
  with gr.Column():
105
  tonality_min_slider = gr.Slider(
106
  minimum=0, maximum=100, value=50, label="L(%) Min Tonality"
107
  )
108
- tonality_hist_output = gr.Image(type="pil", label="Tonality L Histogram")
109
- with gr.Column():
110
  tonality_max_slider = gr.Slider(
111
  minimum=0, maximum=100, value=70, label="L(%) Max Tonality"
112
  )
 
 
 
 
 
 
 
 
113
  with gr.Column():
114
  chroma_slider = gr.Slider(
115
  minimum=0, maximum=100, value=50, label="Chroma(%) Threshold"
116
  )
117
- chroma_hist_output = gr.Image(type="pil", label="Chroma Histogram")
 
 
 
118
  with gr.Row():
119
- skin_checkbox = gr.Checkbox(label="Skin Analysis", value=True)
120
- eye_checkbox = gr.Checkbox(label="Eye Analysis", value=False)
121
- hair_checkbox = gr.Checkbox(label="Hair Analysis", value=False)
122
- analysis_results_output = gr.JSON(label="Analysis Results")
123
- processed_image_output = gr.Image(type="pil", label="Processed Image")
124
-
125
- gr.Interface(
126
- fn=process_image,
127
- inputs=[
128
- upload_image,
129
- l_min_slider,
130
- l_max_slider,
131
- tonality_min_slider,
132
- tonality_max_slider,
133
- chroma_slider,
134
- skin_checkbox,
135
- eye_checkbox,
136
- hair_checkbox,
137
- ],
138
- outputs=[
139
- processed_image_output,
140
- l_hist_output,
141
- tonality_hist_output,
142
- chroma_hist_output,
143
- analysis_results_output,
144
- ],
145
- )
146
 
147
- # Set up change event triggers for the sliders
148
- l_min_slider.change(
149
- process_image,
150
- inputs=[
151
- upload_image,
152
- l_min_slider,
153
- l_max_slider,
154
- tonality_min_slider,
155
- tonality_max_slider,
156
- chroma_slider,
157
- skin_checkbox,
158
- eye_checkbox,
159
- hair_checkbox,
160
- ],
161
- outputs=[
162
- processed_image_output,
163
- l_hist_output,
164
- tonality_hist_output,
165
- chroma_hist_output,
166
- analysis_results_output,
167
- ],
168
- )
169
 
170
- l_max_slider.change(
171
- process_image,
172
- inputs=[
173
- upload_image,
174
- l_min_slider,
175
- l_max_slider,
176
- tonality_min_slider,
177
- tonality_max_slider,
178
- chroma_slider,
179
- skin_checkbox,
180
- eye_checkbox,
181
- hair_checkbox,
182
- ],
183
- outputs=[
184
- processed_image_output,
185
- l_hist_output,
186
- tonality_hist_output,
187
- chroma_hist_output,
188
- analysis_results_output,
189
- ],
190
- )
191
 
192
- tonality_min_slider.change(
193
- process_image,
194
- inputs=[
195
- upload_image,
196
- l_min_slider,
197
- l_max_slider,
198
- tonality_min_slider,
199
- tonality_max_slider,
200
- chroma_slider,
201
- skin_checkbox,
202
- eye_checkbox,
203
- hair_checkbox,
204
- ],
205
- outputs=[
206
- processed_image_output,
207
- l_hist_output,
208
- tonality_hist_output,
209
- chroma_hist_output,
210
- analysis_results_output,
211
- ],
212
- )
213
 
214
- tonality_max_slider.change(
215
- process_image,
216
- inputs=[
217
- upload_image,
218
- l_min_slider,
219
- l_max_slider,
220
- tonality_min_slider,
221
- tonality_max_slider,
222
- chroma_slider,
223
- skin_checkbox,
224
- eye_checkbox,
225
- hair_checkbox,
226
- ],
227
- outputs=[
228
- processed_image_output,
229
- l_hist_output,
230
- tonality_hist_output,
231
- chroma_hist_output,
232
- analysis_results_output,
233
- ],
234
- )
235
 
236
- chroma_slider.change(
237
- process_image,
238
- inputs=[
239
- upload_image,
240
- l_min_slider,
241
- l_max_slider,
242
- tonality_min_slider,
243
- tonality_max_slider,
244
- chroma_slider,
245
- skin_checkbox,
246
- eye_checkbox,
247
- hair_checkbox,
248
- ],
249
- outputs=[
250
- processed_image_output,
251
- l_hist_output,
252
- tonality_hist_output,
253
- chroma_hist_output,
254
- analysis_results_output,
255
- ],
256
- )
257
 
258
  demo.launch()
 
5
 
6
  from src.skin_analyzer import analyze_skin_function
7
  from src.image import ImageBundle
8
+ import plotly.express as px
9
+ import plotly.graph_objects as go
10
+
11
+ import matplotlib.pyplot as plt
12
+ import io
13
+
14
+
15
+ def resize_image(image, height=512):
16
+ # Calculate the new width to maintain the aspect ratio
17
+ aspect_ratio = image.width / image.height
18
+ new_width = int(height * aspect_ratio)
19
+ return image.resize((new_width, height), Image.Resampling.LANCZOS)
20
+
21
+
22
+ def create_histogram(data, title):
23
+ fig = px.histogram(data, nbins=30, title=title)
24
+ fig.update_layout(title=dict(y=0.9))
25
+ fig.update_xaxes(title_text="Value")
26
+ fig.update_yaxes(title_text="Frequency")
27
+ return fig
28
+
29
+
30
+ # Function to create pie chart using Plotly
31
+ def create_pie_chart(data, title):
32
+ fig = px.pie(values=data.values(), names=data.keys(), title=title)
33
+ fig.update_layout(title=dict(y=0.9))
34
+ return fig
35
+
36
+
37
+ # Function to create bar chart using Plotly
38
+ def create_bar_chart(data, title, top_n=None):
39
+ sorted_data = dict(sorted(data.items(), key=lambda item: item[1], reverse=True))
40
+ colors = (
41
+ ["red" if i < top_n else "blue" for i in range(len(sorted_data))]
42
+ if top_n and top_n < len(sorted_data)
43
+ else "blue"
44
+ )
45
+ fig = px.bar(x=list(sorted_data.keys()), y=list(sorted_data.values()), title=title)
46
+ fig.update_traces(marker_color=colors)
47
+ fig.update_layout(
48
+ title=dict(y=0.9), xaxis_title=None, yaxis_title="Counts", showlegend=False
49
+ )
50
+ fig.update_xaxes(tickangle=45)
51
+ return fig
52
 
53
 
54
  def process_image(
 
101
  if "filtered_skin_mask" in skin_analysis:
102
  filtered_skin_mask = skin_analysis["filtered_skin_mask"]
103
  del skin_analysis["filtered_skin_mask"]
104
+ if "l_hist" in skin_analysis:
105
  l_hist = skin_analysis["l_hist"]
106
  del skin_analysis["l_hist"]
107
+ if "tonality_l_hist" in skin_analysis:
108
  tonality_l_hist = skin_analysis["tonality_l_hist"]
109
  del skin_analysis["tonality_l_hist"]
110
+ if "chroma_hist" in skin_analysis:
111
  chroma_hist = skin_analysis["chroma_hist"]
112
  del skin_analysis["chroma_hist"]
113
+ # analysis_results["skin_analysis"] = skin_analysis
114
+ # Create bar charts for analysis results
115
+ chroma_chart = create_pie_chart(skin_analysis["chroma_counts"], "Chroma Counts")
116
+ undertone_chart = create_pie_chart(
117
+ skin_analysis["undertone_counts"], "Undertone Counts"
118
+ )
119
+ overtone_chart = create_bar_chart(
120
+ skin_analysis["overtone_counts"], "Overtone Counts", 1
121
+ )
122
+ tonality_chart = create_pie_chart(
123
+ skin_analysis["tonality_counts"], "Tonality Counts"
124
+ )
125
+ season_chart = create_bar_chart(
126
+ skin_analysis["season_counts"], "Season Counts", 3
127
+ )
128
 
129
  # overlay_images.append(skin_analysis["overlay_image"])
130
 
 
136
  # Combine overlay images
137
  overlay = image.copy()
138
  overlay[filtered_skin_mask > 0] = (0, 0, 255) # Red for skin
139
+
 
140
  overlay = cv2.addWeighted(image, 0.85, overlay, 0.15, 0)
141
 
142
  # Convert combined_overlay to PIL Image for display
143
  combined_overlay = Image.fromarray(overlay)
144
+ # Resize images before returning
145
+ combined_overlay = resize_image(combined_overlay)
146
+ # l_hist = resize_image(l_hist)
147
+ # tonality_l_hist = resize_image(tonality_l_hist)
148
+ # chroma_hist = resize_image(chroma_hist)
149
+ # chroma_chart = resize_image(chroma_chart)
150
+ # undertone_chart = resize_image(undertone_chart)
151
+ # overtone_chart = resize_image(overtone_chart)
152
+ # tonality_chart = resize_image(tonality_chart)
153
+ # season_chart = resize_image(season_chart)
154
 
155
+ return (
156
+ combined_overlay,
157
+ l_hist,
158
+ tonality_l_hist,
159
+ chroma_hist,
160
+ # analysis_results,
161
+ chroma_chart,
162
+ undertone_chart,
163
+ overtone_chart,
164
+ tonality_chart,
165
+ season_chart,
166
+ )
167
 
168
 
169
  with gr.Blocks() as demo:
170
+ with gr.Row():
171
+ with gr.Column():
172
+ skin_checkbox = gr.Checkbox(label="Skin Analysis", value=True)
173
+ submit_button = gr.Button("Submit")
174
+ with gr.Column():
175
+ eye_checkbox = gr.Checkbox(label="Eye Analysis", value=False)
176
+ hair_checkbox = gr.Checkbox(label="Hair Analysis", value=False)
177
  with gr.Row():
178
  with gr.Column():
179
  upload_image = gr.Image(type="numpy", label="Upload an Image")
 
181
  l_min_slider = gr.Slider(
182
  minimum=0, maximum=100, value=10, label="L(%) Min Skin"
183
  )
 
 
184
  l_max_slider = gr.Slider(
185
  minimum=0, maximum=100, value=90, label="L(%) Max Skin"
186
  )
187
+ l_hist_output = gr.Plot(label="L Histogram")
188
+
189
+ # processed_image_output = gr.Image(type="pil", label="Processed Image")
190
+ with gr.Row():
191
+ with gr.Column():
192
+ processed_image_output = gr.Image(type="pil", label="Processed Image")
193
+ with gr.Column():
194
+ undertone_chart_output = gr.Plot(label="Undertone Counts")
195
+ with gr.Row():
196
  with gr.Column():
197
  tonality_min_slider = gr.Slider(
198
  minimum=0, maximum=100, value=50, label="L(%) Min Tonality"
199
  )
 
 
200
  tonality_max_slider = gr.Slider(
201
  minimum=0, maximum=100, value=70, label="L(%) Max Tonality"
202
  )
203
+ tonality_hist_output = gr.Plot(label="Tonality L Histogram")
204
+ with gr.Column():
205
+ tonality_chart_output = gr.Plot(label="Tonality Counts")
206
+
207
+ # with gr.Row():
208
+ # submit_button = gr.Button("Submit")
209
+
210
+ with gr.Row():
211
  with gr.Column():
212
  chroma_slider = gr.Slider(
213
  minimum=0, maximum=100, value=50, label="Chroma(%) Threshold"
214
  )
215
+ chroma_hist_output = gr.Plot(label="Chroma Histogram")
216
+ with gr.Column():
217
+ chroma_chart_output = gr.Plot(label="Chroma Counts")
218
+
219
  with gr.Row():
220
+ with gr.Column():
221
+ overtone_chart_output = gr.Plot(label="Overtone Counts")
222
+ with gr.Column():
223
+ season_chart_output = gr.Plot(label="Season Counts")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
+ inputs = [
226
+ upload_image,
227
+ l_min_slider,
228
+ l_max_slider,
229
+ tonality_min_slider,
230
+ tonality_max_slider,
231
+ chroma_slider,
232
+ skin_checkbox,
233
+ eye_checkbox,
234
+ hair_checkbox,
235
+ ]
236
+ outputs = [
237
+ processed_image_output,
238
+ l_hist_output,
239
+ tonality_hist_output,
240
+ chroma_hist_output,
241
+ chroma_chart_output,
242
+ undertone_chart_output,
243
+ overtone_chart_output,
244
+ tonality_chart_output,
245
+ season_chart_output,
246
+ ]
247
 
248
+ # Set up change event triggers for the sliders
249
+ l_min_slider.change(process_image, inputs=inputs, outputs=outputs)
250
+ l_max_slider.change(process_image, inputs=inputs, outputs=outputs)
251
+ tonality_min_slider.change(process_image, inputs=inputs, outputs=outputs)
252
+ tonality_max_slider.change(process_image, inputs=inputs, outputs=outputs)
253
+ chroma_slider.change(process_image, inputs=inputs, outputs=outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
+ # upload_image.change(process_image, inputs=inputs, outputs=outputs)
256
+ skin_checkbox.change(process_image, inputs=inputs, outputs=outputs)
257
+ eye_checkbox.change(process_image, inputs=inputs, outputs=outputs)
258
+ hair_checkbox.change(process_image, inputs=inputs, outputs=outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
260
+ # Link the submit button to the analyze_image function
261
+ submit_button.click(process_image, inputs=inputs, outputs=outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
  demo.launch()
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  absl-py==2.1.0
 
2
  aiofiles==23.2.1
3
  altair==5.3.0
4
  annotated-types==0.7.0
 
1
  absl-py==2.1.0
2
+ plotly
3
  aiofiles==23.2.1
4
  altair==5.3.0
5
  annotated-types==0.7.0
src/skin_analyzer.py CHANGED
@@ -40,26 +40,26 @@ def sample_skin_pixels(image, mask, l_min_percentile=10, l_max_percentile=90):
40
  mask_indices = np.where(mask > 0)
41
  filtered_mask[mask_indices[0][mask_l], mask_indices[1][mask_l]] = 255
42
 
43
- fig, ax = plt.subplots(1, 1, figsize=(6, 6))
44
  # Plot the histogram of L channel in the second subplot
45
- ax[0].hist(l_values, bins=100, color="blue", alpha=0.75)
46
- ax[0].axvline(l_min, color="red", linestyle="--", label="10th percentile")
47
- ax[0].axvline(l_max, color="green", linestyle="--", label="90th percentile")
48
- ax[0].set_xlabel("L* Value")
49
- ax[0].set_ylabel("Frequency")
50
- ax[0].set_title("Histogram of L* Values in Skin Mask")
51
- ax[0].legend()
52
-
53
- # Save the plot to a file-like object
54
- buf = io.BytesIO()
55
- plt.savefig(buf, format="png")
56
- plt.close(fig)
57
- buf.seek(0)
58
- # Convert the buffer to a PIL Image and then to a NumPy array
59
- image = Image.open(buf)
60
- l_hist = np.array(image)
61
-
62
- return filtered_lab_pixels, filtered_mask, l_hist
63
 
64
 
65
  def rgb_to_lab_and_save(image, output_dir="workspace"):
@@ -290,41 +290,41 @@ def categorize_tonality(L_values, l_min_tonality, l_max_tonality):
290
  tonality_counts = {"Light": light_count, "True": true_count, "Deep": deep_count}
291
  predominant_tonality = max(tonality_counts, key=tonality_counts.get)
292
 
293
- fig, ax = plt.subplots(1, 1, figsize=(6, 6))
294
 
295
  # Plot the histogram of filtered L channel in the third subplot
296
- ax[0].hist(L_values, bins=100, color="blue", alpha=0.75)
297
- ax[0].axvline(
298
  l_abs_min,
299
  color="purple",
300
  linestyle="--",
301
  label="lower percentile",
302
  )
303
- ax[0].axvline(
304
  l_abs_max,
305
  color="orange",
306
  linestyle="--",
307
  label="higher percentile",
308
  )
309
- ax[0].set_xlabel("L* Value")
310
- ax[0].set_ylabel("Frequency")
311
- ax[0].set_title("Histogram of Filtered L* Values in Skin Mask")
312
- ax[0].legend()
313
 
314
  # Save the plot to a file-like object
315
- buf = io.BytesIO()
316
- plt.savefig(buf, format="png")
317
- plt.close(fig)
318
- buf.seek(0)
319
- # Convert the buffer to a PIL Image and then to a NumPy array
320
- image = Image.open(buf)
321
- tonality_l_hist = np.array(image)
322
 
323
  return (
324
  tonality_counts,
325
  predominant_tonality,
326
  tonality,
327
- tonality_l_hist,
328
  )
329
 
330
 
@@ -372,28 +372,28 @@ def categorize_chroma(lab_pixels, chroma_thresh):
372
 
373
  predominant_chroma = max(chroma_counts, key=chroma_counts.get)
374
 
375
- fig, ax = plt.subplots(1, 1, figsize=(6, 6))
376
- ax[0].hist(distances, bins=100, color="blue", alpha=0.75)
377
- ax[0].set_xlabel("Chroma Value")
378
- ax[0].set_ylabel("Frequency")
379
- ax[0].set_title("Histogram of Chroma Values in Skin Mask")
380
- ax[0].axvline(
381
  chroma_thresh,
382
  color="red",
383
  linestyle="--",
384
  label="Threshold Value",
385
  )
386
 
387
- # Save the plot to a file-like object
388
- buf = io.BytesIO()
389
- plt.savefig(buf, format="png")
390
- plt.close(fig)
391
- buf.seek(0)
392
- # Convert the buffer to a PIL Image and then to a NumPy array
393
- image = Image.open(buf)
394
- chorma_hist = np.array(image)
395
 
396
- return chroma_counts, predominant_chroma, chroma, chorma_hist
397
 
398
 
399
  def categorize_undertones(cluster_centers):
@@ -473,7 +473,7 @@ def analyze_skin_function(
473
  )
474
 
475
  # calculate chroma
476
- chroma_counts, predominant_chroma, chroma, chorma_hist = categorize_chroma(
477
  lab_pixels, chroma_thresh
478
  )
479
 
@@ -533,5 +533,5 @@ def analyze_skin_function(
533
  "season_counts": season_counts,
534
  "l_hist": l_hist,
535
  "tonality_l_hist": tonality_l_hist,
536
- "chorma_hist": chorma_hist,
537
  }
 
40
  mask_indices = np.where(mask > 0)
41
  filtered_mask[mask_indices[0][mask_l], mask_indices[1][mask_l]] = 255
42
 
43
+ fig, ax = plt.subplots()
44
  # Plot the histogram of L channel in the second subplot
45
+ ax.hist(l_values, bins=100, color="blue", alpha=0.75)
46
+ ax.axvline(l_min, color="red", linestyle="--", label="10th percentile")
47
+ ax.axvline(l_max, color="green", linestyle="--", label="90th percentile")
48
+ ax.set_xlabel("L* Value")
49
+ ax.set_ylabel("Frequency")
50
+ ax.set_title("Histogram of L* Values in Skin Mask")
51
+ ax.legend()
52
+
53
+ # # Save the plot to a file-like object
54
+ # buf = io.BytesIO()
55
+ # plt.savefig(buf, format="png")
56
+ # plt.close(fig)
57
+ # buf.seek(0)
58
+ # # Convert the buffer to a PIL Image and then to a NumPy array
59
+ # image = Image.open(buf)
60
+ # # l_hist = np.array(image)
61
+
62
+ return filtered_lab_pixels, filtered_mask, fig
63
 
64
 
65
  def rgb_to_lab_and_save(image, output_dir="workspace"):
 
290
  tonality_counts = {"Light": light_count, "True": true_count, "Deep": deep_count}
291
  predominant_tonality = max(tonality_counts, key=tonality_counts.get)
292
 
293
+ fig, ax = plt.subplots()
294
 
295
  # Plot the histogram of filtered L channel in the third subplot
296
+ ax.hist(L_values, bins=100, color="blue", alpha=0.75)
297
+ ax.axvline(
298
  l_abs_min,
299
  color="purple",
300
  linestyle="--",
301
  label="lower percentile",
302
  )
303
+ ax.axvline(
304
  l_abs_max,
305
  color="orange",
306
  linestyle="--",
307
  label="higher percentile",
308
  )
309
+ ax.set_xlabel("L* Value")
310
+ ax.set_ylabel("Frequency")
311
+ ax.set_title("Histogram of Filtered L* Values in Skin Mask")
312
+ ax.legend()
313
 
314
  # Save the plot to a file-like object
315
+ # buf = io.BytesIO()
316
+ # plt.savefig(buf, format="png")
317
+ # plt.close(fig)
318
+ # buf.seek(0)
319
+ # # Convert the buffer to a PIL Image and then to a NumPy array
320
+ # image = Image.open(buf)
321
+ # tonality_l_hist = np.array(image)
322
 
323
  return (
324
  tonality_counts,
325
  predominant_tonality,
326
  tonality,
327
+ fig,
328
  )
329
 
330
 
 
372
 
373
  predominant_chroma = max(chroma_counts, key=chroma_counts.get)
374
 
375
+ fig, ax = plt.subplots()
376
+ ax.hist(distances, bins=100, color="blue", alpha=0.75)
377
+ ax.set_xlabel("Chroma Value")
378
+ ax.set_ylabel("Frequency")
379
+ ax.set_title("Histogram of Chroma Values in Skin Mask")
380
+ ax.axvline(
381
  chroma_thresh,
382
  color="red",
383
  linestyle="--",
384
  label="Threshold Value",
385
  )
386
 
387
+ # # Save the plot to a file-like object
388
+ # buf = io.BytesIO()
389
+ # plt.savefig(buf, format="png")
390
+ # plt.close(fig)
391
+ # buf.seek(0)
392
+ # # Convert the buffer to a PIL Image and then to a NumPy array
393
+ # image = Image.open(buf)
394
+ # # chorma_hist = np.array(image)
395
 
396
+ return chroma_counts, predominant_chroma, chroma, fig
397
 
398
 
399
  def categorize_undertones(cluster_centers):
 
473
  )
474
 
475
  # calculate chroma
476
+ chroma_counts, predominant_chroma, chroma, chroma_hist = categorize_chroma(
477
  lab_pixels, chroma_thresh
478
  )
479
 
 
533
  "season_counts": season_counts,
534
  "l_hist": l_hist,
535
  "tonality_l_hist": tonality_l_hist,
536
+ "chroma_hist": chroma_hist,
537
  }