HardikUppal commited on
Commit
a952689
·
0 Parent(s):

Save current state after cleanup

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ model_weights/
3
+ inputs/
4
+ outputs/
5
+ venv-color-palette/
6
+ workspace/
7
+ *.pyc
README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # ColorPalette
2
+ Color Palette Recommendation
src/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from api import process
3
+
4
+ # from stone.image import DEFAULT_TONE_PALETTE, DEFAULT_TONE_LABELS, show
5
+ from utils import __version__, check_version
6
+
7
+ setattr(np, "asscalar", lambda x: np.asarray(x).item())
8
+
9
+ __all__ = ["process", "__version__"]
10
+
11
+ check_version()
src/__main__.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ import os
4
+ import shutil
5
+ import sys
6
+ import threading
7
+ from datetime import datetime
8
+ from multiprocessing import freeze_support, cpu_count, Pool
9
+
10
+ import cv2
11
+ import numpy as np
12
+ from tqdm import tqdm
13
+ from tqdm.contrib.logging import logging_redirect_tqdm
14
+ from typing import List
15
+
16
+ from stone.api import process
17
+ # from stone.image import normalize_palette
18
+ from stone.utils import (
19
+ build_arguments,
20
+ build_image_paths,
21
+ is_windows,
22
+ ArgumentError,
23
+ is_debugging,
24
+ # resolve_labels,
25
+ )
26
+ from stone.package import (
27
+ __app_name__,
28
+ __version__,
29
+ __description__,
30
+ __copyright__,
31
+ __url__,
32
+ __author__,
33
+ __license__,
34
+ __code__,
35
+ __issues__,
36
+ __package_name__,
37
+ )
38
+
39
+ LOG = logging.getLogger(__name__)
40
+ lock = threading.Lock()
41
+
42
+ use_cli = len(sys.argv) > 1 and "--gui" not in sys.argv
43
+
44
+
45
+ def process_in_main(
46
+ filename_or_url,
47
+ # image_type,
48
+ # tone_palette,
49
+ # tone_labels,
50
+ # convert_to_black_white,
51
+ n_dominant_colors=2,
52
+ # new_width=250,
53
+ # scale=1.1,
54
+ # min_nbrs=5,
55
+ min_size=(90, 90),
56
+ # threshold=0.3,
57
+ return_report_image=False,
58
+ ):
59
+ """
60
+ This is a wrapper function that calls process() in the main process to avoid pickling error.
61
+ :param filename_or_url:
62
+ :param image_type:
63
+ :param tone_palette:
64
+ :param tone_labels:
65
+ :param convert_to_black_white:
66
+ :param n_dominant_colors:
67
+ :param new_width:
68
+ :param scale:
69
+ :param min_nbrs:
70
+ :param min_size:
71
+ :param threshold:
72
+ :param return_report_image:
73
+ :return:
74
+ """
75
+ try:
76
+ return process(
77
+ filename_or_url,
78
+ # image_type=image_type,
79
+ # tone_palette=tone_palette,
80
+ # tone_labels=tone_labels,
81
+ # convert_to_black_white=convert_to_black_white,
82
+ n_dominant_colors=n_dominant_colors,
83
+ # new_width=new_width,
84
+ # scale=scale,
85
+ # min_nbrs=min_nbrs,
86
+ min_size=min_size,
87
+ # threshold=threshold,
88
+ return_report_image=return_report_image,
89
+ )
90
+ except ArgumentError as e:
91
+ # Abort the app if any argument error occurs
92
+ raise e
93
+ except Exception as e:
94
+ msg = f"Error processing image {filename_or_url}: {str(e)}"
95
+ LOG.error(msg)
96
+ return {
97
+ "filename": filename_or_url,
98
+ "message": msg,
99
+ }
100
+
101
+
102
+ def main():
103
+ args = build_arguments()
104
+ # Setup logger
105
+ now = datetime.now()
106
+ os.makedirs("./log", exist_ok=True)
107
+
108
+ logging.basicConfig(
109
+ filename=now.strftime("./log/log-%y%m%d%H%M.log"),
110
+ level=logging.INFO,
111
+ format="[%(asctime)s] {%(filename)s:%(lineno)4d} %(levelname)s - %(message)s",
112
+ datefmt="%H:%M:%S",
113
+ )
114
+
115
+ image_paths = build_image_paths(args.images, args.recursive)
116
+
117
+ debug: bool = args.debug
118
+
119
+ n_dominant_colors = args.n_colors
120
+ min_size = args.min_size[:2]
121
+ output_dir = args.output
122
+ os.makedirs(output_dir, exist_ok=True)
123
+ result_filename = os.path.join(output_dir, "./result.csv")
124
+
125
+ def write_to_csv(row: list):
126
+ with lock:
127
+ with open(result_filename, "a", newline="", encoding="UTF8") as f:
128
+ f.write(",".join(map(str, row)) + "\n")
129
+
130
+ num_workers = cpu_count() if args.n_workers == 0 else args.n_workers
131
+
132
+ pool = Pool(processes=num_workers)
133
+
134
+ # Backup result.csv if exists
135
+ if os.path.exists(result_filename):
136
+ renamed_file = os.path.join(output_dir, now.strftime("./result_bak_%y%m%d%H%M.csv"))
137
+ shutil.move(result_filename, renamed_file)
138
+ header = (
139
+ "file,image type,face id,"
140
+ + ",".join([f"dominant {i + 1},percent {i + 1}" for i in range(n_dominant_colors)])
141
+ + ",skin tone,tone label,accuracy(0-100)"
142
+ )
143
+ write_to_csv(header.split(","))
144
+
145
+ # Start
146
+ process_wrapper = functools.partial(
147
+ process if is_debugging() else process_in_main,
148
+ n_dominant_colors=n_dominant_colors,
149
+ min_size=min_size,
150
+ return_report_image=debug,
151
+ )
152
+ print("The program is processing your images...")
153
+ print("Please wait for the program to finish.")
154
+ with logging_redirect_tqdm():
155
+ with tqdm(image_paths, desc="Processing images", unit="images") as pbar:
156
+ for result in pool.imap(process_wrapper, image_paths):
157
+ if "faces" in result:
158
+ write_to_csv([result["faces"]])
159
+ pbar.update()
160
+ continue
161
+
162
+ basename = result["basename"]
163
+ extension = result["extension"]
164
+ # image_type = result["image_type"]
165
+ faces = result["faces"]
166
+ report_images = result["report_images"]
167
+ pbar.set_description(f"Processing {basename}.{extension}")
168
+ pbar.update()
169
+ pool.close()
170
+ pool.join()
171
+
172
+
173
+ sys.argv.remove("--gui") if "--gui" in sys.argv else None
174
+ if not use_cli and "--ignore-gooey" not in sys.argv:
175
+ try:
176
+ from gooey import Gooey
177
+ except ImportError:
178
+ # If gooey is not installed, use a dummy decorator
179
+ from stone.utils import Gooey
180
+
181
+ from importlib.resources import files
182
+
183
+ main = Gooey(
184
+ show_preview_warning=False,
185
+ advanced=True, # fixme: `False` is not working
186
+ dump_build_config=False, # fixme: `True` is not working, as the path cannot be resolved correctly
187
+ target="stone",
188
+ suppress_gooey_flag=True,
189
+ program_name=f"{__app_name__} v{__version__}",
190
+ required_cols=1,
191
+ optional_cols=1,
192
+ image_dir=str(files("stone.ui")),
193
+ tabbed_groups=True,
194
+ navigation="Tabbed",
195
+ richtext_controls=True,
196
+ use_cmd_args=True,
197
+ menu=[
198
+ {
199
+ "name": "Help",
200
+ "items": [
201
+ {
202
+ "type": "AboutDialog",
203
+ "menuTitle": "About",
204
+ "name": __app_name__,
205
+ "description": __description__,
206
+ "version": __version__,
207
+ "copyright": __copyright__,
208
+ "website": __url__,
209
+ "developer": __author__,
210
+ "license": __license__,
211
+ },
212
+ {"type": "Link", "menuTitle": "Documentation", "url": __code__},
213
+ {"type": "Link", "menuTitle": "Report Bugs", "url": __issues__},
214
+ ],
215
+ },
216
+ ],
217
+ )(main)
218
+
219
+ if __name__ == "__main__":
220
+ if is_windows():
221
+ freeze_support()
222
+ main()
src/analyze.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from PIL import Image
4
+ import math
5
+ from sklearn.cluster import KMeans
6
+ from collections import Counter
7
+ from src.color_utils import rgb_to_hex, hex_to_bgr, hex_to_rgb
8
+ from src.hair_utils import HairColorPalette
9
+ from src.skin_utils import SkinTonePalette
10
+ from src.eyes_utils import EyeColorPalette
11
+ import json
12
+
13
+
14
+ # Function to create a color bar
15
+ def create_color_bar(height, width, color):
16
+ bar = np.zeros((height, width, 3), dtype=np.uint8)
17
+ bar[:] = color
18
+ return bar
19
+
20
+
21
+ # Function to get dominant colors and their percentages
22
+ def get_dominant_colors(image_np, n_colors):
23
+ pixels = image_np.reshape((-1, 3))
24
+ kmeans = KMeans(n_clusters=n_colors)
25
+ kmeans.fit(pixels)
26
+ dominant_colors = kmeans.cluster_centers_
27
+ counts = Counter(kmeans.labels_)
28
+ total_count = sum(counts.values())
29
+ dominant_colors = [dominant_colors[i] for i in counts.keys()]
30
+ dominant_percentages = [counts[i] / total_count for i in counts.keys()]
31
+ return dominant_colors, dominant_percentages
32
+
33
+
34
+ # Function to get the closest color from the palette
35
+ def get_closest_color(dominant_colors, palette):
36
+ closest_color = None
37
+ closest_hex = None
38
+ min_distance = float("inf")
39
+ for dom_color in dominant_colors:
40
+ for color_name, (color_value, color_hex) in palette.items():
41
+ distance = np.linalg.norm(dom_color - np.array(color_value))
42
+ if distance < min_distance:
43
+ min_distance = distance
44
+ closest_color = color_name
45
+ closest_hex = color_hex
46
+ return closest_color, closest_hex, min_distance
47
+
48
+
49
+ # Function to create the dominant color bar
50
+ def create_dominant_color_bar(
51
+ report_image, dominant_colors, dominant_percentages, bar_width
52
+ ):
53
+ color_bars = []
54
+ total_height = 0
55
+ for color, pct in zip(dominant_colors, dominant_percentages):
56
+ bar_height = int(math.floor(report_image.shape[0] * pct))
57
+ total_height += bar_height
58
+ bar = create_color_bar(bar_height, bar_width, color)
59
+ color_bars.append(bar)
60
+ padding_height = report_image.shape[0] - total_height
61
+ if padding_height > 0:
62
+ padding = create_color_bar(padding_height, bar_width, (255, 255, 255))
63
+ color_bars.append(padding)
64
+ return np.vstack(color_bars)
65
+
66
+
67
+ # Function to create the tone palette bar
68
+ def create_tone_palette_bar(report_image, tone_id, skin_tone_palette, bar_width):
69
+ palette_bars = []
70
+ tone_height = report_image.shape[0] // len(skin_tone_palette)
71
+ tone_bgrs = []
72
+ for tone in skin_tone_palette.values():
73
+ color_bgr = hex_to_bgr(tone[1])
74
+ tone_bgrs.append(color_bgr)
75
+ bar = create_color_bar(tone_height, bar_width, color_bgr)
76
+ palette_bars.append(bar)
77
+ padding_height = report_image.shape[0] - tone_height * len(skin_tone_palette)
78
+ if padding_height > 0:
79
+ padding = create_color_bar(padding_height, bar_width, (255, 255, 255))
80
+ palette_bars.append(padding)
81
+ bar = np.vstack(palette_bars)
82
+
83
+ padding = 1
84
+ start_point = (padding, tone_id * tone_height + padding)
85
+ end_point = (bar_width - padding, (tone_id + 1) * tone_height)
86
+ bar = cv2.rectangle(bar, start_point, end_point, (255, 0, 0), 2)
87
+ return bar
88
+
89
+
90
+ # Function to create the message bar
91
+ def create_message_bar(
92
+ dominant_colors, dominant_percentages, tone_hex, distance, img_shape
93
+ ):
94
+ bar_width = img_shape[1]
95
+ bar_height = img_shape[0] // 30
96
+ msg_bar = create_color_bar(
97
+ height=bar_height, width=bar_width, color=(243, 239, 214)
98
+ )
99
+ b, g, r = np.around(dominant_colors[0]).astype(int)
100
+ dominant_color_hex = "#%02X%02X%02X" % (r, g, b)
101
+ pct = f"{dominant_percentages[0] * 100:.2f}%"
102
+
103
+ font, font_scale, txt_color, thickness, line_type = (
104
+ cv2.FONT_HERSHEY_SIMPLEX,
105
+ 1,
106
+ (0, 0, 0),
107
+ 1,
108
+ cv2.LINE_AA,
109
+ )
110
+ x, y = 2, 15
111
+ msg = f"- Dominant color: {dominant_color_hex}, percent: {pct}"
112
+ cv2.putText(msg_bar, msg, (x, y), font, font_scale, txt_color, thickness, line_type)
113
+
114
+ text_size, _ = cv2.getTextSize(msg, font, font_scale, thickness)
115
+ line_height = text_size[1] + 10
116
+ accuracy = round(100 - distance, 2)
117
+ cv2.putText(
118
+ msg_bar,
119
+ f"- Skin tone: {tone_hex}, accuracy: {accuracy}",
120
+ (x, y + line_height),
121
+ font,
122
+ font_scale,
123
+ txt_color,
124
+ thickness,
125
+ cv2.LINE_AA,
126
+ )
127
+
128
+ return msg_bar
129
+
130
+
131
+ def analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=3):
132
+
133
+ image_np = np.array(image)
134
+ hair_mask_np = np.array(hair_mask)
135
+ skin_mask_np = np.array(skin_mask)
136
+ eye_mask_np = np.array(eye_mask)
137
+
138
+ if not (
139
+ image_np.shape[:2]
140
+ == hair_mask_np.shape[:2]
141
+ == skin_mask_np.shape[:2]
142
+ == eye_mask_np.shape[:2]
143
+ ):
144
+ raise ValueError("Image and all masks must have the same dimensions")
145
+
146
+ hair_palette = HairColorPalette()
147
+ hair_dominant_colors, hair_dominant_percentages = get_dominant_colors(
148
+ image_np[hair_mask_np > 0], n_colors
149
+ )
150
+ hair_color, hair_hex, hair_distance = get_closest_color(
151
+ hair_dominant_colors, hair_palette.palette
152
+ )
153
+
154
+ skin_palette = SkinTonePalette()
155
+ skin_dominant_colors, skin_dominant_percentages = get_dominant_colors(
156
+ image_np[skin_mask_np > 0], n_colors
157
+ )
158
+ skin_color, skin_hex, skin_distance = get_closest_color(
159
+ skin_dominant_colors, skin_palette.palette
160
+ )
161
+
162
+ eye_palette = EyeColorPalette()
163
+ eye_dominant_colors, eye_dominant_percentages = get_dominant_colors(
164
+ image_np[eye_mask_np > 0], n_colors
165
+ )
166
+ eye_color, eye_hex, eye_distance = get_closest_color(
167
+ eye_dominant_colors, eye_palette.palette
168
+ )
169
+
170
+ # create overlay for different color for hair, skin and eye
171
+ hair_overlay = np.zeros_like(image_np)
172
+ skin_overlay = np.zeros_like(image_np)
173
+ eye_overlay = np.zeros_like(image_np)
174
+
175
+ hair_overlay[hair_mask_np > 0] = [0, 255, 0] # hex_to_rgb(hair_hex)
176
+ skin_overlay[skin_mask_np > 0] = [255, 0, 0] # hex_to_rgb(skin_hex)
177
+ eye_overlay[eye_mask_np > 0] = [0, 0, 255] # hex_to_rgb(eye_hex)
178
+
179
+ combined_overlay = cv2.addWeighted(image_np, 0.8, hair_overlay, 0.2, 0)
180
+ combined_overlay = cv2.addWeighted(combined_overlay, 0.8, skin_overlay, 0.2, 0)
181
+ combined_overlay = cv2.addWeighted(combined_overlay, 0.8, eye_overlay, 0.2, 0)
182
+
183
+ bar_width = 50
184
+ hair_color_bar = create_dominant_color_bar(
185
+ image_np, hair_dominant_colors, hair_dominant_percentages, bar_width
186
+ )
187
+ skin_color_bar = create_dominant_color_bar(
188
+ image_np, skin_dominant_colors, skin_dominant_percentages, bar_width
189
+ )
190
+ eye_color_bar = create_dominant_color_bar(
191
+ image_np, eye_dominant_colors, eye_dominant_percentages, bar_width
192
+ )
193
+
194
+ hair_palette_bar = create_tone_palette_bar(
195
+ image_np,
196
+ list(hair_palette.palette.keys()).index(hair_color),
197
+ hair_palette.palette,
198
+ bar_width,
199
+ )
200
+ skin_palette_bar = create_tone_palette_bar(
201
+ image_np,
202
+ list(skin_palette.palette.keys()).index(skin_color),
203
+ skin_palette.palette,
204
+ bar_width,
205
+ )
206
+ eye_palette_bar = create_tone_palette_bar(
207
+ image_np,
208
+ list(eye_palette.palette.keys()).index(eye_color),
209
+ eye_palette.palette,
210
+ bar_width,
211
+ )
212
+
213
+ output_image = np.hstack(
214
+ [
215
+ combined_overlay,
216
+ hair_color_bar,
217
+ hair_palette_bar,
218
+ skin_color_bar,
219
+ skin_palette_bar,
220
+ eye_color_bar,
221
+ eye_palette_bar,
222
+ ]
223
+ )
224
+ img_shape = output_image.shape
225
+ msg_bar_hair = create_message_bar(
226
+ hair_dominant_colors,
227
+ hair_dominant_percentages,
228
+ hair_hex,
229
+ hair_distance,
230
+ img_shape,
231
+ )
232
+ msg_bar_skin = create_message_bar(
233
+ skin_dominant_colors,
234
+ skin_dominant_percentages,
235
+ skin_hex,
236
+ skin_distance,
237
+ img_shape,
238
+ )
239
+ msg_bar_eye = create_message_bar(
240
+ eye_dominant_colors, eye_dominant_percentages, eye_hex, eye_distance, img_shape
241
+ )
242
+
243
+ output_image = np.vstack([output_image, msg_bar_hair, msg_bar_skin, msg_bar_eye])
244
+ # Create JSON record
245
+ analysis_record = {
246
+ "hair": {
247
+ "dominant_colors": [rgb_to_hex(color) for color in hair_dominant_colors],
248
+ "dominant_percentages": hair_dominant_percentages,
249
+ "closest_color": hair_color,
250
+ "closest_color_hex": hair_hex,
251
+ "distance": hair_distance,
252
+ },
253
+ "skin": {
254
+ "dominant_colors": [rgb_to_hex(color) for color in skin_dominant_colors],
255
+ "dominant_percentages": skin_dominant_percentages,
256
+ "closest_color": skin_color,
257
+ "closest_color_hex": skin_hex,
258
+ "distance": skin_distance,
259
+ },
260
+ "eyes": {
261
+ "dominant_colors": [rgb_to_hex(color) for color in eye_dominant_colors],
262
+ "dominant_percentages": eye_dominant_percentages,
263
+ "closest_color": eye_color,
264
+ "closest_color_hex": eye_hex,
265
+ "distance": eye_distance,
266
+ },
267
+ }
268
+
269
+ # Save output image and JSON record
270
+ # cv2.imwrite("output_image.png", output_image)
271
+ # with open("analysis_record.json", "w") as json_file:
272
+ # json.dump(analysis_record, json_file, indent=4)
273
+
274
+ return output_image, analysis_record
275
+ # cv2.imwrite("output_image.png", output_image)
276
+
277
+
278
+ # Example usage
279
+ # analyze_and_visualize('path_to_image.jpg', 'path_to_hair_mask.png', 'path_to_skin_mask.png', 'path_to_eye_mask.png')
src/api.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import Union, Literal
4
+
5
+ import cv2
6
+ from stone.analyze import analyze_and_visualize
7
+ from stone.image import ImageBundle
8
+ from stone.utils import ArgumentError
9
+
10
+ LOG = logging.getLogger(__name__)
11
+
12
+
13
+ def process(
14
+ filename_or_url: Union[str, Path],
15
+ n_dominant_colors: int = 3,
16
+ min_size: tuple[int, int] = (90, 90),
17
+ return_report_image=False,
18
+ ):
19
+ """
20
+ Process the image and return the result.
21
+ :param filename_or_url: The filename (in local devices) or URL (in Internet) of the image.
22
+ :param image_type: Specify whether the input image(s) is/are colored or black/white.
23
+ Valid choices are: "auto", "color" or "bw", Defaults to "auto", which will be detected automatically.
24
+ :param convert_to_black_white: Whether to convert the image to black/white before processing. Defaults to False.
25
+ :param n_dominant_colors: Number of dominant colors to be extracted from the image. Defaults to 2.
26
+ :param min_size: Minimum possible face size. Faces smaller than that are ignored, defaults to (90, 90).
27
+ :param return_report_image: Whether to return the report image(s) in the result. Defaults to False.
28
+ :return:
29
+
30
+ """
31
+
32
+ ib = ImageBundle(filename_or_url)
33
+ basename, ext = ib.basename, ib.ext
34
+ faces = ib._get_faces(detector_backend='retinaface', min_size=min_size)
35
+ if len(faces) == 0:
36
+ raise ArgumentError("No face detected in the image.")
37
+ segmentation_maps = ib._segment_image()
38
+ image = ib.numpy_image()
39
+
40
+ hair_mask = segmentation_maps["hair_mask"]
41
+ skin_mask = segmentation_maps["face_skin_mask"]
42
+ eye_mask = segmentation_maps["eyes_mask"]
43
+
44
+ report_images, records = analyze_and_visualize(image, hair_mask, skin_mask, eye_mask, n_colors=n_dominant_colors)
45
+ return {
46
+ "basename": basename,
47
+ "extension": ext,
48
+ "faces": records,
49
+ "report_images": report_images if return_report_image else None,
50
+ }
51
+
52
+
src/color_utils.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from colormath.color_objects import sRGBColor, LabColor
2
+ from colormath.color_conversions import convert_color
3
+ from colormath.color_diff import delta_e_cie2000
4
+
5
+ def rgb_to_lab(rgb):
6
+ """
7
+ Convert an RGB color to LAB color.
8
+ :param rgb: A tuple (R, G, B) with values ranging from 0 to 255.
9
+ :return: A LabColor object.
10
+ """
11
+ srgb_color = sRGBColor(rgb[0], rgb[1], rgb[2], is_upscaled=True)
12
+ lab_color = convert_color(srgb_color, LabColor)
13
+ return lab_color
14
+
15
+ # Helper function to convert RGB to hex
16
+ def rgb_to_hex(rgb_color):
17
+ r, g, b = rgb_color
18
+ return "#%02X%02X%02X" % (int(r), int(g), int(b))
19
+ # Function to convert hex color to BGR
20
+ def hex_to_bgr(hex_color):
21
+ hex_value = hex_color.lstrip("#")
22
+ r, g, b = [int(hex_value[i:i + 2], 16) for i in (0, 2, 4)]
23
+ return [b, g, r]
24
+
25
+ def hex_to_rgb(hex_color):
26
+ hex_value = hex_color.lstrip("#")
27
+ r, g, b = [int(hex_value[i:i + 2], 16) for i in (0, 2, 4)]
28
+ return [r, g, b]
29
+
30
+ def calculate_color_distance_lab(color1, color2):
31
+ """
32
+ Calculate the distance between two colors in LAB space using the CIE2000 formula.
33
+ :param color1: A tuple (R, G, B) with values ranging from 0 to 255.
34
+ :param color2: A tuple (R, G, B) with values ranging from 0 to 255.
35
+ :return: The distance between the two colors.
36
+ """
37
+ lab1 = rgb_to_lab(color1)
38
+ lab2 = rgb_to_lab(color2)
39
+ distance = delta_e_cie2000(lab1, lab2)
40
+ return distance
41
+
42
+ # # Example usage
43
+ # color1 = (255, 215, 0) # Golden Blonde RGB
44
+ # color2 = (255, 248, 220) # Platinum Blonde RGB
45
+
46
+ # distance = calculate_color_distance_lab(color1, color2)
47
+ # print(f"Color distance in LAB space: {distance}")
src/eyes_utils.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ from sklearn.cluster import KMeans
4
+ from collections import Counter
5
+ from src.color_utils import calculate_color_distance_lab
6
+
7
+
8
+ class EyeColorPalette:
9
+ def __init__(self):
10
+ self.palette = {
11
+ "Light Blue": ((173, 216, 230), "#ADD8E6"),
12
+ "Dark Blue": ((0, 0, 139), "#00008B"),
13
+ "Light Green": ((144, 238, 144), "#90EE90"),
14
+ "Dark Green": ((0, 100, 0), "#006400"),
15
+ "Hazel": ((218, 165, 32), "#DAA520"),
16
+ "Light Brown": ((210, 180, 140), "#D2B48C"),
17
+ "Dark Brown": ((101, 67, 33), "#654321"),
18
+ "Gray": ((169, 169, 169), "#A9A9A9"),
19
+ "Amber": ((255, 191, 0), "#FFBF00"),
20
+ }
21
+
22
+ def get_dominant_colors(self, image_np, n_colors):
23
+ # Reshape the image to be a list of pixels
24
+ pixels = image_np.reshape((-1, 3))
25
+
26
+ # Use KMeans to find n_colors clusters in the image
27
+ kmeans = KMeans(n_clusters=n_colors)
28
+ kmeans.fit(pixels)
29
+
30
+ # Get the cluster centers (dominant colors)
31
+ dominant_colors = kmeans.cluster_centers_
32
+
33
+ # Get the number of pixels in each cluster
34
+ counts = Counter(kmeans.labels_)
35
+
36
+ # Sort the colors by the number of pixels in each cluster
37
+ dominant_colors = [dominant_colors[i] for i in counts.keys()]
38
+
39
+ return dominant_colors
40
+
41
+ def get_closest_color(self, image, mask, n_colors=3):
42
+ # Convert images to numpy arrays
43
+ image_np = np.array(image)
44
+ mask_np = np.array(mask)
45
+
46
+ # Ensure the images have the same size
47
+ if image_np.shape[:2] != mask_np.shape[:2]:
48
+ raise ValueError("Image and mask must have the same dimensions")
49
+
50
+ # Extract the eye region from the image
51
+ eye_pixels = image_np[mask_np > 0]
52
+
53
+ # Get dominant colors from the eye region
54
+ dominant_colors = self.get_dominant_colors(eye_pixels, n_colors)
55
+
56
+ # Find the closest color in the palette
57
+ closest_color = None
58
+ closest_hex = None
59
+ min_distance = float("inf")
60
+ for dom_color in dominant_colors:
61
+ for color_name, (color_value, color_hex) in self.palette.items():
62
+ distance = calculate_color_distance_lab(dom_color, color_value)
63
+ # distance = np.linalg.norm(dom_color - np.array(color_value))
64
+ if distance < min_distance:
65
+ min_distance = distance
66
+ closest_color = color_name
67
+ closest_hex = color_hex
68
+
69
+ return closest_color, closest_hex
70
+
71
+
72
+ # Example usage
73
+ # palette = EyeColorPalette()
74
+ # closest_color, closest_hex = palette.get_closest_color('path_to_image.jpg', 'path_to_mask.png')
75
+ # print(f"The closest eye color is: {closest_color} with hex code: {closest_hex}")
src/hair_utils.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ from sklearn.cluster import KMeans
4
+ from collections import Counter
5
+ from stone.color_utils import calculate_color_distance_lab
6
+ class HairColorPalette:
7
+ def __init__(self):
8
+ self.palette = {
9
+ "Platinum Blonde": ((255, 248, 220), "#FFF8DC"),
10
+ "Ash Blonde": ((190, 190, 190), "#BEBEBE"),
11
+ "Golden Blonde": ((255, 215, 0), "#FFD700"),
12
+ "Strawberry Blonde": ((255, 204, 153), "#FFCC99"),
13
+ "Light Brown": ((165, 42, 42), "#A52A2A"),
14
+ "Medium Brown": ((139, 69, 19), "#8B4513"),
15
+ "Dark Brown": ((92, 64, 51), "#5C4033"),
16
+ "Chestnut Brown": ((152, 105, 96), "#986960"),
17
+ "Light Auburn": ((255, 69, 0), "#FF4500"),
18
+ "Copper Red": ((184, 115, 51), "#B87333"),
19
+ "Dark Auburn": ((139, 0, 0), "#8B0000"),
20
+ "Mahogany": ((192, 64, 0), "#C04000"),
21
+ "Jet Black": ((10, 10, 10), "#0A0A0A"),
22
+ "Soft Black": ((59, 59, 59), "#3B3B3B"),
23
+ "Blue Black": ((20, 30, 60), "#141E3C"),
24
+ "Pastel Pink": ((255, 209, 220), "#FFD1DC"),
25
+ "Electric Blue": ((0, 127, 255), "#007FFF"),
26
+ "Lavender": ((230, 230, 250), "#E6E6FA"),
27
+ "Emerald Green": ((80, 200, 120), "#50C878"),
28
+ "Silver": ((192, 192, 192), "#C0C0C0"),
29
+ }
30
+
31
+ def get_dominant_colors(self, image_np, n_colors):
32
+ # Reshape the image to be a list of pixels
33
+ pixels = image_np.reshape((-1, 3))
34
+
35
+ # Use KMeans to find n_colors clusters in the image
36
+ kmeans = KMeans(n_clusters=n_colors)
37
+ kmeans.fit(pixels)
38
+
39
+ # Get the cluster centers (dominant colors)
40
+ dominant_colors = kmeans.cluster_centers_
41
+
42
+ # Get the number of pixels in each cluster
43
+ counts = Counter(kmeans.labels_)
44
+
45
+ # Sort the colors by the number of pixels in each cluster
46
+ dominant_colors = [dominant_colors[i] for i in counts.keys()]
47
+
48
+ return dominant_colors
49
+
50
+ def get_closest_color(self, image, mask, n_colors=3):
51
+ # Convert images to numpy arrays
52
+ image_np = np.array(image)
53
+ mask_np = np.array(mask)
54
+
55
+ # Ensure the images have the same size
56
+ if image_np.shape[:2] != mask_np.shape[:2]:
57
+ raise ValueError("Image and mask must have the same dimensions")
58
+
59
+
60
+ # Extract the hair region from the image
61
+ hair_pixels = image_np[mask_np > 0]
62
+
63
+ # Get dominant colors from the hair region
64
+ dominant_colors = self.get_dominant_colors(hair_pixels, n_colors)
65
+
66
+ # Find the closest color in the palette
67
+ closest_color = None
68
+ closest_hex = None
69
+ min_distance = float('inf')
70
+ for dom_color in dominant_colors:
71
+ for color_name, (color_value, color_hex) in self.palette.items():
72
+ distance = calculate_color_distance_lab(dom_color, color_value)
73
+ # distance = np.linalg.norm(dom_color - np.array(color_value))
74
+ if distance < min_distance:
75
+ min_distance = distance
76
+ closest_color = color_name
77
+ closest_hex = color_hex
78
+
79
+ return closest_color, closest_hex
80
+
81
+ # Example usage
82
+ # palette = HairColorPalette()
83
+ # closest_color, closest_hex = palette.get_closest_color('path_to_image.jpg', 'path_to_mask.png')
84
+ # print(f"The closest hair color is: {closest_color} with hex code: {closest_hex}")
src/image.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Description: Image handling utilities for the Skin Tone Classifier.
2
+ import logging
3
+ import cv2
4
+ import numpy as np
5
+ from deepface import DeepFace
6
+ from stone.utils import is_url, extract_filename_and_extension, alphabet_id, ArgumentError
7
+ from stone.segmentation_utils import detect_eye_mask, mediapipe_selfie_segmentor
8
+ import requests
9
+ from PIL import Image, ExifTags, ImageCms
10
+ from io import BytesIO
11
+ import pillow_heif
12
+ # Register HEIF opener
13
+ pillow_heif.register_heif_opener()
14
+
15
+
16
+
17
+ LOG = logging.getLogger(__name__)
18
+
19
+ class ImageBundle:
20
+ def __init__(self, image_source):
21
+ """
22
+ Initialize the ImageHandler object.
23
+ :param image_source: Path to the image file or URL of the image.
24
+ """
25
+ self.image_source = image_source
26
+ self.image = None
27
+ self.exif_data = {}
28
+ self.segmentation_maps = {}
29
+ self._open_image()
30
+
31
+ def _open_image(self):
32
+ """
33
+ Open the image and preserve EXIF data.
34
+ """
35
+ try:
36
+ if is_url(self.image_source):
37
+ self._open_image_from_url(self.image_source)
38
+ else:
39
+ self._open_image_from_path(self.image_source)
40
+ self._handle_color_profile()
41
+ self._extract_exif_data()
42
+ self.is_black_and_white = self._is_black_and_white()
43
+ self.basename, self.ext = extract_filename_and_extension(self.image_source)
44
+ if self.is_black_and_white:
45
+ raise ValueError("The image is black and white. Please provide a colored image.")
46
+
47
+ except Exception as e:
48
+ print(f"An error occurred while opening the image: {e}")
49
+ self.image = None
50
+
51
+ def _open_image_from_path(self, image_path):
52
+ """
53
+ Open an image from a file path.
54
+ :param image_path: Path to the image file.
55
+ """
56
+ self.image = Image.open(image_path)
57
+
58
+ def _open_image_from_url(self, image_url):
59
+ """
60
+ Open an image from a URL.
61
+ :param image_url: URL of the image.
62
+ """
63
+ response = requests.get(image_url)
64
+ response.raise_for_status() # Raise an exception for HTTP errors
65
+ self.image = Image.open(BytesIO(response.content))
66
+
67
+ def _handle_color_profile(self):
68
+ """
69
+ Handle the color profile of the image if mentioned.
70
+ """
71
+ if 'icc_profile' in self.image.info:
72
+ icc_profile = self.image.info.get('icc_profile')
73
+ if icc_profile:
74
+ io = BytesIO(icc_profile)
75
+ src_profile = ImageCms.ImageCmsProfile(io)
76
+ dst_profile = ImageCms.createProfile("sRGB")
77
+ self.image = ImageCms.profileToProfile(self.image, src_profile, dst_profile)
78
+
79
+ def _extract_exif_data(self):
80
+ """
81
+ Extract EXIF data from the image.
82
+ """
83
+ if hasattr(self.image, '_getexif'):
84
+ exif_info = self.image._getexif()
85
+ if exif_info is not None:
86
+ for tag, value in exif_info.items():
87
+ decoded_tag = ExifTags.TAGS.get(tag, tag)
88
+ self.exif_data[decoded_tag] = value
89
+
90
+ def _is_black_and_white(self):
91
+ """
92
+ Check if the image is black and white even if it has 3 channels.
93
+ :return: True if the image is black and white, otherwise False.
94
+ """
95
+ if self.image.mode not in ['RGB', 'RGBA']:
96
+ return self.image.mode in ['1', 'L']
97
+
98
+ np_image = np.array(self.image)
99
+ if len(np_image.shape) == 3 and np_image.shape[2] == 3:
100
+ r, g, b = np_image[:,:,0], np_image[:,:,1], np_image[:,:,2]
101
+ return np.all(r == g) and np.all(g == b)
102
+ return False
103
+
104
+ def _get_faces(self, detector_backend='retinaface', is_bw=False, min_size=(90, 90)):
105
+ """
106
+ Get the coordinates of the detected faces in the image.
107
+ :param detector_backend: Face detector backend to use.
108
+ :param is_bw: Whether the image is black and white.
109
+ gets a list of faces detected, with each face as dict of facial_area, face, confidence
110
+ """
111
+ self.faces = DeepFace.extract_faces(self.numpy_image(), detector_backend=detector_backend, grayscale=is_bw)
112
+ if len(self.faces) == 0:
113
+ raise ValueError("No face is detected in the image.")
114
+ elif len(self.faces) > 1:
115
+ raise ValueError("Multiple faces are detected in the image.")
116
+ else:
117
+ # check if the face is too small
118
+ face = self.faces[0]
119
+ if face["facial_area"]["w"] < min_size[0] or face["facial_area"]["h"] < min_size[1]:
120
+ raise ValueError("The face is too small.")
121
+ else:
122
+ self.faces = self.faces[0]
123
+
124
+ return self.faces
125
+
126
+ def numpy_image(self):
127
+ """
128
+ Convert the image to a numpy array.
129
+ :return: Numpy array of the image.
130
+ """
131
+ return np.array(self.image)
132
+
133
+
134
+ def _segment_image(self, segment=["face_skin", "body_skin", "hair", "eyes", "clothes"]):
135
+ """
136
+ Get a segmentation map by name.
137
+ :param name: Name of the segmentation map.
138
+ :return: Segmentation map as a numpy array or None if not found.
139
+ """
140
+ if "eyes" in segment:
141
+ del segment[segment.index("eyes")]
142
+ eye_mask = detect_eye_mask(self.numpy_image(), self.faces)
143
+ self.segmentation_maps["eyes_mask"] = eye_mask
144
+ if "face_skin" in segment or "body_skin" in segment or "hair" in segment or "clothes" in segment:
145
+ # import IPython; IPython.embed()
146
+ mask_dict = mediapipe_selfie_segmentor(self.numpy_image(), segment=segment)
147
+ for seg in segment:
148
+ self.segmentation_maps[seg+"_mask"] = mask_dict[seg]
149
+ return self.segmentation_maps
150
+
151
+
152
+
153
+
src/package.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "1.2.4"
2
+ __package_name__ = "skin-tone-classifier"
3
+ __app_name__ = "Skin Tone Classifier"
4
+ __description__ = "An easy-to-use library for skin tone classification"
5
+ __author__ = "Chenglong Ma"
6
+ __author_email__ = "chenglong.m@outlook.com"
7
+ __author_website__ = "https://chenglongma.com/"
8
+ __url__ = "https://chenglongma.com/SkinToneClassifier/"
9
+ __code__ = "https://github.com/ChenglongMa/SkinToneClassifier/"
10
+ __issues__ = "https://github.com/ChenglongMa/SkinToneClassifier/issues"
11
+ __license__ = "GPLv3"
12
+ __copyright__ = "2022"
src/segmentation_utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mediapipe.tasks import python
2
+ from mediapipe.tasks.python import vision
3
+ import mediapipe as mp
4
+ import cv2
5
+ import numpy as np
6
+
7
+ def mediapipe_selfie_segmentor(image: np.ndarray, segment: list = ["face_skin", "body_skin", "hair"]):
8
+ # Create the options that will be used for ImageSegmenter
9
+ base_options = python.BaseOptions(
10
+ model_asset_path="model_weights/selfie_segmenter.tflite"
11
+ )
12
+ options = vision.ImageSegmenterOptions(
13
+ base_options=base_options,
14
+ output_category_mask=True,
15
+ output_confidence_masks=True,
16
+ )
17
+ with vision.ImageSegmenter.create_from_options(options) as segmenter:
18
+ # Create the MediaPipe image file that will be segmented
19
+ image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
20
+
21
+ # Retrieve the masks for the segmented image
22
+ segmentation_result = segmenter.segment(image)
23
+ category_mask = segmentation_result.category_mask.numpy_view()
24
+ bg_mask = (category_mask == 0)
25
+ hair_mask = (category_mask == 1)
26
+ body_skin_mask = (category_mask == 2)
27
+ face_skin_mask = (category_mask == 3)
28
+ clothes_mask = (category_mask == 4)
29
+
30
+ return_dict = {}
31
+ for seg in segment:
32
+ return_dict[seg] = locals()[seg + "_mask"]
33
+ return return_dict
34
+
35
+
36
+ def detect_eye_mask(image, face_coord):
37
+
38
+ h, w = image.shape[0:2]
39
+ imgMask = np.zeros((h, w), np.uint8)
40
+
41
+ left_eye = face_coord["facial_area"]["left_eye"]
42
+ right_eye = face_coord["facial_area"]["right_eye"]
43
+
44
+ eye_distance = np.linalg.norm(np.array(left_eye)-np.array(right_eye))
45
+ eye_radius = eye_distance/12 # approximate
46
+
47
+ cv2.circle(imgMask, left_eye, int(eye_radius), (255,255,255), -1)
48
+ cv2.circle(imgMask, right_eye, int(eye_radius), (255,255,255), -1)
49
+
50
+ return imgMask
src/skin_utils.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ from sklearn.cluster import KMeans
4
+ from collections import Counter
5
+ from stone.color_utils import calculate_color_distance_lab
6
+ class SkinTonePalette:
7
+ def __init__(self):
8
+ self.palette = {
9
+ "Ebony": ((55, 48, 40), "#373028"),
10
+ "Deep Mocha": ((66, 40, 17), "#422811"),
11
+ "Dark Chocolate": ((81, 59, 46), "#513b2e"),
12
+ "Warm Almond": ((111, 80, 60), "#6f503c"),
13
+ "Golden Bronze": ((129, 101, 79), "#81654f"),
14
+ "Honey": ((157, 122, 84), "#9d7a54"),
15
+ "Caramel": ((190, 160, 126), "#bea07e"),
16
+ "Light Tan": ((229, 200, 166), "#e5c8a6"),
17
+ "Peach": ((231, 193, 184), "#e7c1b8"),
18
+ "Fair": ((243, 218, 214), "#f3dad6"),
19
+ "Ivory": ((251, 242, 243), "#fbf2f3"),
20
+ }
21
+
22
+ def get_dominant_colors(self, image_np, n_colors):
23
+ # Reshape the image to be a list of pixels
24
+ pixels = image_np.reshape((-1, 3))
25
+
26
+ # Use KMeans to find n_colors clusters in the image
27
+ kmeans = KMeans(n_clusters=n_colors)
28
+ kmeans.fit(pixels)
29
+
30
+ # Get the cluster centers (dominant colors)
31
+ dominant_colors = kmeans.cluster_centers_
32
+
33
+ # Get the number of pixels in each cluster
34
+ counts = Counter(kmeans.labels_)
35
+
36
+ # Sort the colors by the number of pixels in each cluster
37
+ dominant_colors = [dominant_colors[i] for i in counts.keys()]
38
+
39
+ return dominant_colors
40
+
41
+ def get_closest_color(self, image, mask, n_colors=3):
42
+
43
+ # Convert images to numpy arrays
44
+ image_np = np.array(image)
45
+ mask_np = np.array(mask)
46
+
47
+ # Ensure the images have the same h and w
48
+ if image_np.shape[:2] != mask_np.shape[:2]:
49
+ raise ValueError("Image and mask must have the same dimensions")
50
+
51
+
52
+ # Extract the skin region from the image
53
+ skin_pixels = image_np[mask_np > 0]
54
+
55
+ # Get dominant colors from the skin region
56
+ dominant_colors = self.get_dominant_colors(skin_pixels, n_colors)
57
+
58
+ # Find the closest color in the palette
59
+ closest_color = None
60
+ closest_hex = None
61
+ min_distance = float('inf')
62
+ for dom_color in dominant_colors:
63
+ for color_name, (color_value, color_hex) in self.palette.items():
64
+ distance = calculate_color_distance_lab(dom_color, color_value)
65
+ # distance = np.linalg.norm(dom_color - np.array(color_value))
66
+ if distance < min_distance:
67
+ min_distance = distance
68
+ closest_color = color_name
69
+ closest_hex = color_hex
70
+
71
+ return closest_color, closest_hex
72
+
73
+ # Example usage
74
+ # palette = SkinTonePalette()
75
+ # closest_color, closest_hex = palette.get_closest_color('path_to_image.jpg', 'path_to_mask.png')
76
+ # print(f"The closest skin tone is: {closest_color} with hex code: {closest_hex}")
77
+ # def dominant_colors(image, to_bw, n_clusters=2):
78
+ # if to_bw:
79
+ # data = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
80
+ # data = cv2.cvtColor(data, cv2.COLOR_GRAY2BGR)
81
+ # else:
82
+ # data = image
83
+ # data = np.reshape(data, (-1, 3))
84
+ # data = data[np.all(data != 0, axis=1)]
85
+ # data = np.float32(data)
86
+
87
+ # criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
88
+ # flags = cv2.KMEANS_RANDOM_CENTERS
89
+ # compactness, labels, colors = cv2.kmeans(data, n_clusters, None, criteria, 10, flags)
90
+ # labels, counts = np.unique(labels, return_counts=True)
91
+
92
+ # order = (-counts).argsort()
93
+ # colors = colors[order]
94
+ # counts = counts[order]
95
+
96
+ # percents = counts / counts.sum()
97
+
98
+ # return colors, percents
99
+
100
+ # DEFAULT_TONE_PALETTE = {
101
+ # "color": [
102
+ # "#373028",
103
+ # "#422811",
104
+ # "#513b2e",
105
+ # "#6f503c",
106
+ # "#81654f",
107
+ # "#9d7a54",
108
+ # "#bea07e",
109
+ # "#e5c8a6",
110
+ # "#e7c1b8",
111
+ # "#f3dad6",
112
+ # "#fbf2f3",
113
+ # ],
114
+ # # Refer to this paper:
115
+ # # Leigh, A., & Susilo, T. (2009). Is voting skin-deep? Estimating the effect of candidate ballot photographs on election outcomes.
116
+ # # Journal of Economic Psychology, 30(1), 61-70.
117
+ # "bw": [
118
+ # "#FFFFFF",
119
+ # "#F0F0F0",
120
+ # "#E0E0E0",
121
+ # "#D0D0D0",
122
+ # "#C0C0C0",
123
+ # "#B0B0B0",
124
+ # "#A0A0A0",
125
+ # "#909090",
126
+ # "#808080",
127
+ # "#707070",
128
+ # "#606060",
129
+ # "#505050",
130
+ # "#404040",
131
+ # "#303030",
132
+ # "#202020",
133
+ # "#101010",
134
+ # "#000000",
135
+ # ],
136
+ # }
137
+
138
+ # DEFAULT_TONE_LABELS = {
139
+ # "color": ["C" + alphabet_id(i) for i in range(len(DEFAULT_TONE_PALETTE["color"]))],
140
+ # "bw": ["B" + alphabet_id(i) for i in range(len(DEFAULT_TONE_PALETTE["bw"]))],
141
+ # }
142
+ # def skin_tone(colors, percents, skin_tone_palette, tone_labels):
143
+ # lab_tones = [convert_color(sRGBColor.new_from_rgb_hex(rgb), LabColor) for rgb in skin_tone_palette]
144
+ # lab_colors = [convert_color(sRGBColor(rgb_r=r, rgb_g=g, rgb_b=b, is_upscaled=True), LabColor) for b, g, r in colors]
145
+ # distances = [np.sum([delta_e_cie2000(c, label) * p for c, p in zip(lab_colors, percents)]) for label in lab_tones]
146
+ # tone_id = np.argmin(distances)
147
+ # distance: float = distances[tone_id]
148
+ # tone_hex = skin_tone_palette[tone_id].upper()
149
+ # tone_label = tone_labels[tone_id]
150
+ # return tone_id, tone_hex, tone_label, distance
src/utils.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import functools
3
+ import logging
4
+ import os
5
+ import re
6
+ import string
7
+ import sys
8
+ from pathlib import Path
9
+ from typing import Union
10
+ from urllib.parse import urlparse
11
+
12
+ from stone.package import __version__, __package_name__, __description__, __app_name__
13
+
14
+ LOG = logging.getLogger(__name__)
15
+
16
+
17
+ class ArgumentError(ValueError):
18
+ """
19
+ Wrapper for argument error. This exception will be raised when the arguments are invalid.
20
+ """
21
+
22
+ pass
23
+
24
+
25
+ def Gooey(*args, **kwargs):
26
+ """
27
+ Dummy decorator for Gooey.
28
+ Used in CLI mode to avoid the import error when the Gooey package is not installed.
29
+ :param args:
30
+ :param kwargs:
31
+ :return:
32
+ """
33
+
34
+ def inner(func):
35
+ return func
36
+
37
+ return inner
38
+
39
+
40
+ @functools.cache
41
+ def alphabet_id(n):
42
+ letters = string.ascii_uppercase
43
+ n_letters = len(letters)
44
+ if n < n_letters:
45
+ return letters[n]
46
+ _id = ""
47
+
48
+ while n > 0:
49
+ remainder = (n - 1) % n_letters
50
+ _id = letters[remainder] + _id
51
+ n = (n - 1) // n_letters
52
+
53
+ return _id
54
+
55
+
56
+ def is_url(text):
57
+ return urlparse(text).scheme in ["http", "https"]
58
+
59
+
60
+ def extract_filename_and_extension(url):
61
+ """
62
+ Extract base filename and extension from the url.
63
+ :param url: URL with filename and extension, e.g., https://example.com/images/pic.jpg?param=value
64
+ :return: Base filename and extension, e.g., pic, jpg
65
+ """
66
+ parsed_url = urlparse(url)
67
+ path = parsed_url.path
68
+ filename = path.split("/")[-1]
69
+ basename, *extension = filename.split(".")
70
+ return basename, f".{extension[0]}" if extension else None
71
+
72
+
73
+ def build_image_paths(images_paths, recursive=False):
74
+ filenames, urls = [], []
75
+ valid_images = ["*.jpg", "*.gif", "*.png", "*.jpeg", "*.webp", "*.tif"]
76
+ excluded_folders = ["debug", "log"]
77
+ if isinstance(images_paths, str):
78
+ images_paths = [images_paths]
79
+
80
+ for filename in images_paths:
81
+ if is_url(filename):
82
+ urls.append(filename)
83
+ continue
84
+ p = Path(filename)
85
+ if p.is_dir():
86
+ images = [p.glob(pattern) for pattern in valid_images]
87
+ if recursive:
88
+ subfolders = [f for f in p.glob("*/") if f.name not in excluded_folders]
89
+ images.extend([sp.rglob(pattern) for pattern in valid_images for sp in subfolders])
90
+
91
+ filenames.extend(images)
92
+ elif p.is_file():
93
+ filenames.append([p])
94
+ paths = set([f.resolve() for fs in filenames for f in fs] + urls)
95
+ paths = list(paths)
96
+ if len(paths) == 0:
97
+ raise FileNotFoundError("No valid images in the specified path.")
98
+ # Sort paths by (first) number extracted from the filename string
99
+ paths.sort(key=sort_file)
100
+ return paths
101
+
102
+
103
+ def sort_file(path: Union[str, Path]):
104
+ if isinstance(path, Path):
105
+ basename = path.stem
106
+ else:
107
+ basename, *_ = extract_filename_and_extension(path)
108
+ nums = re.findall(r"\d+", basename)
109
+ return (int(nums[0]) if nums else float("inf")), basename
110
+
111
+
112
+ def is_windows():
113
+ return sys.platform in ["win32", "cygwin"]
114
+
115
+
116
+ def is_debugging():
117
+ gettrace = getattr(sys, "gettrace", None)
118
+ return gettrace is not None and gettrace()
119
+
120
+
121
+ def build_arguments():
122
+ try:
123
+ from gooey import GooeyParser
124
+
125
+ in_gui = True
126
+ except ImportError:
127
+ from argparse import ArgumentParser as GooeyParser
128
+
129
+ in_gui = False
130
+
131
+ kwargs = dict(formatter_class=argparse.RawTextHelpFormatter) if not in_gui else {}
132
+ parser = GooeyParser(
133
+ description=__description__,
134
+ **kwargs,
135
+ )
136
+ kwargs = (
137
+ {
138
+ "gooey_options": {"show_border": False, "columns": 1},
139
+ }
140
+ if in_gui
141
+ else {}
142
+ )
143
+ files = parser.add_argument_group(
144
+ "Images to process",
145
+ "The locations of images to process, which can be directories, files, or URLs.\n"
146
+ "Multiple values are separated by space;\n"
147
+ 'You can mix folders, filenames and web links together, e.g., "/path/to/dir1 /path/to/pic.jpg https://example.com/pic.png".\n',
148
+ **kwargs,
149
+ )
150
+ kwargs = {"gooey_options": {"visible": False}} if in_gui else {}
151
+
152
+ files.add_argument(
153
+ "-i",
154
+ "--images",
155
+ nargs="+",
156
+ default=[os.getcwd()],
157
+ metavar="Image Filenames",
158
+ help="Image filename(s), Directories or URLs to process. Separated by space.",
159
+ **kwargs,
160
+ )
161
+ if in_gui:
162
+ files.add_argument(
163
+ "--image_dirs",
164
+ nargs="+",
165
+ metavar="Image Directories",
166
+ widget="DirChooser",
167
+ # widget="MultiDirChooser", # fixme: enable this widget when issues are fixed
168
+ gooey_options={
169
+ "message": "Select directories to process",
170
+ "initial_value": os.getcwd(),
171
+ "default_path": os.getcwd(),
172
+ "placeholder": "e.g., /path/to/dir1 /path/to/dir2",
173
+ },
174
+ )
175
+ kwargs = dict(metavar="Recursive Search") if in_gui else {}
176
+ files.add_argument(
177
+ "-r",
178
+ "--recursive",
179
+ action="store_true",
180
+ help="Search images recursively in the specified directory.",
181
+ **kwargs,
182
+ )
183
+ if in_gui:
184
+ files.add_argument(
185
+ "--image_files",
186
+ nargs="+",
187
+ metavar="Image Filenames",
188
+ help="Add individual image file(s)",
189
+ widget="MultiFileChooser",
190
+ gooey_options={
191
+ "wildcard": "All images|*.jpg;*.jpeg;*.png;*.bmp;*.gif;*.tif;*.webp|"
192
+ "JPG (*.jpg)|*.jpg|"
193
+ "JPEG (*.jpeg)|*.jpeg|"
194
+ "PNG (*.png)|*.png|"
195
+ "BMP (*.bmp)|*.bmp|"
196
+ "GIF (*.gif)|*.gif|"
197
+ "TIFF (*.tif)|*.tif|"
198
+ "WEBP (*.webp)|*.webp|"
199
+ "All files (*.*)|*.*",
200
+ "message": "Select the image file(s) to process",
201
+ "default_dir": os.getcwd(),
202
+ "full_width": False,
203
+ "placeholder": "e.g., a.jpg b.png",
204
+ },
205
+ )
206
+
207
+ files.add_argument(
208
+ "--image_urls",
209
+ nargs="+",
210
+ metavar="Image URLs",
211
+ help="Add image URLs",
212
+ gooey_options={
213
+ "full_width": False,
214
+ "placeholder": "e.g., https://example.com/a.jpg https://example.com/b.png",
215
+ },
216
+ )
217
+
218
+ kwargs = {"gooey_options": {"show_border": False, "columns": 2}} if in_gui else {}
219
+ images = parser.add_argument_group(
220
+ "Image Settings",
221
+ **kwargs,
222
+ )
223
+ bw_option = "black/white" if in_gui else "bw"
224
+ images.add_argument(
225
+ "-t",
226
+ "--image_type",
227
+ default="auto",
228
+ metavar="Image Type",
229
+ help="Specify whether the input image(s) is/are colored or black/white.\n"
230
+ f'Defaults to "auto", which will be detected automatically. Other options are "color" and "{bw_option}".\n',
231
+ choices=["auto", "color", bw_option],
232
+ )
233
+ kwargs = {"gooey_options": {"full_width": True}} if in_gui else {}
234
+ images.add_argument(
235
+ "-p",
236
+ "--palette",
237
+ nargs="+",
238
+ metavar="Palette",
239
+ help="Skin tone palette;\n"
240
+ 'Input RGB hex values leading by "#" or RGB values separated by comma(,),\n'
241
+ "E.g., #373028 #422811 or 255,255,255 100,100,100\n"
242
+ "Leave blank to use the default palette as mentioned in the document.\n",
243
+ **kwargs,
244
+ )
245
+ images.add_argument(
246
+ "-l",
247
+ "--labels",
248
+ nargs="+",
249
+ metavar="Labels",
250
+ help="Skin tone labels;\n"
251
+ "Leave blank to use the default values: the uppercase alphabet list leading by the image type ('C' for 'color'; 'B' for 'Black&White'), "
252
+ "e.g., ['CA', 'CB', ..., 'CZ'] or ['BA', 'BB', ..., 'BZ'].\n"
253
+ "Since v1.2.0, supports range of labels, e.g., 'A-Z' or '1-10'.\n"
254
+ "Refer to https://github.com/ChenglongMa/SkinToneClassifier#3-specify-category-labels for more details.",
255
+ **kwargs,
256
+ )
257
+
258
+ kwargs = dict(metavar="Convert to Black/White") if in_gui else {}
259
+ images.add_argument(
260
+ "-bw",
261
+ "--black_white",
262
+ action="store_true",
263
+ help="Whether to convert the input to black/white image(s)?\n"
264
+ "If true, the app will convert the input to black/white image(s) and use the black/white palette for classification.",
265
+ **kwargs,
266
+ )
267
+
268
+ kwargs = (
269
+ {"gooey_options": {"initial_value": 2, "min": 1, "max": 99999, "full_width": False}, "widget": "IntegerField"}
270
+ if in_gui
271
+ else {}
272
+ )
273
+ images.add_argument(
274
+ "--n_colors",
275
+ metavar="Number of Dominant Colors",
276
+ type=int,
277
+ help="Specify the number of dominant colors to be extracted.\n"
278
+ "The colors will be used to compare with the colors in the palette.\n",
279
+ default=2,
280
+ **kwargs,
281
+ )
282
+
283
+ kwargs = (
284
+ {
285
+ "gooey_options": {"initial_value": 250, "min": 10, "max": 99999, "full_width": False},
286
+ "widget": "IntegerField",
287
+ }
288
+ if in_gui
289
+ else {}
290
+ )
291
+ images.add_argument(
292
+ "--new_width",
293
+ type=int,
294
+ metavar="New Width (pixels)",
295
+ help="Resize the images with the specified width.\n"
296
+ "Sometimes smaller images will be processed faster and more accurately.\n"
297
+ "No resizing will be performed if the value is negative.",
298
+ default=250,
299
+ **kwargs,
300
+ )
301
+
302
+ kwargs = {"gooey_options": {"show_border": True}} if in_gui else {}
303
+ outputs = parser.add_argument_group("Output Settings", **kwargs)
304
+
305
+ kwargs = (
306
+ {
307
+ "gooey_options": {"message": "Select the output directory", "default_path": os.getcwd()},
308
+ "widget": "DirChooser",
309
+ }
310
+ if in_gui
311
+ else {}
312
+ )
313
+ outputs.add_argument(
314
+ "-o",
315
+ "--output",
316
+ metavar="Output Directory",
317
+ default=os.getcwd(),
318
+ help="Specify the path of output file, defaults to current directory.",
319
+ **kwargs,
320
+ )
321
+
322
+ kwargs = dict(metavar="Generate Report Images") if in_gui else {}
323
+ outputs.add_argument(
324
+ "-d",
325
+ "--debug",
326
+ action="store_true",
327
+ default=in_gui,
328
+ help="Whether to generate report images?\n"
329
+ "If true, the report images will be saved in the '<OUTPUT_DIRECTORY>/debug' directory.",
330
+ **kwargs,
331
+ )
332
+
333
+ kwargs = {"gooey_options": {"show_border": False, "columns": 2}} if in_gui else {}
334
+ advanced = parser.add_argument_group(
335
+ "Advanced Settings",
336
+ "For advanced users only, please refer to https://stackoverflow.com/a/20805153/8860079",
337
+ **kwargs,
338
+ )
339
+
340
+ kwargs = (
341
+ {"gooey_options": {"initial_value": 1.1, "min": 0.1, "max": 2.0}, "widget": "DecimalField"} if in_gui else {}
342
+ )
343
+ advanced.add_argument(
344
+ "--scale",
345
+ type=float,
346
+ metavar="Scale",
347
+ help="Specify how much the image size is reduced at each image scale.",
348
+ default=1.1,
349
+ **kwargs,
350
+ )
351
+
352
+ kwargs = {"gooey_options": {"initial_value": 5, "min": 1, "max": 99999}, "widget": "IntegerField"} if in_gui else {}
353
+ advanced.add_argument(
354
+ "--min_nbrs",
355
+ type=int,
356
+ metavar="Minimum Neighbors",
357
+ help="Specify how many neighbors each candidate rectangle should have to retain it.\n"
358
+ "Higher value results in less detections but with higher quality.",
359
+ default=5,
360
+ **kwargs,
361
+ )
362
+ default_min_width = 90
363
+ default_min_height = 90
364
+
365
+ kwargs = {"gooey_options": {"visible": False}} if in_gui else {}
366
+ advanced.add_argument(
367
+ "--min_size",
368
+ type=int,
369
+ nargs="+",
370
+ metavar="Minimum Possible Face Size, format: <Width Height>",
371
+ help=f'Specify the minimum possible face size. Faces smaller than that are ignored, defaults to "{default_min_width} {default_min_height}".',
372
+ default=(default_min_width, default_min_height),
373
+ **kwargs,
374
+ )
375
+ if in_gui:
376
+ min_size = advanced.add_argument_group(
377
+ "Minimum Possible Face Size (pixels)",
378
+ 'Specify the minimum possible face size. Faces smaller than that are ignored, defaults to "90 90".',
379
+ gooey_options={"show_border": True, "columns": 2},
380
+ )
381
+
382
+ min_size.add_argument(
383
+ "--min_width",
384
+ type=int,
385
+ metavar="Minimum Width",
386
+ # help="Specify the minimum possible face width. Faces smaller than that are ignored, defaults to 90.",
387
+ default=default_min_width,
388
+ widget="IntegerField",
389
+ gooey_options={"initial_value": default_min_width, "min": 10, "max": 99999},
390
+ )
391
+
392
+ min_size.add_argument(
393
+ "--min_height",
394
+ type=int,
395
+ metavar="Minimum Height",
396
+ # help="Specify the minimum possible face height. Faces smaller than that are ignored, defaults to 90.",
397
+ default=default_min_height,
398
+ widget="IntegerField",
399
+ gooey_options={"initial_value": default_min_height, "min": 10, "max": 99999},
400
+ )
401
+
402
+ kwargs = (
403
+ {"gooey_options": {"initial_value": 0.15, "min": 0.01, "max": 1.0}, "widget": "DecimalField"} if in_gui else {}
404
+ )
405
+ advanced.add_argument(
406
+ "--threshold",
407
+ type=float,
408
+ metavar="Minimum Possible Face Proportion",
409
+ help="Specify the minimum proportion of the skin area required to identify the face, defaults to 0.15.",
410
+ default=0.15,
411
+ **kwargs,
412
+ )
413
+
414
+ kwargs = {"gooey_options": {"initial_value": 0, "min": 0, "max": 99999}, "widget": "IntegerField"} if in_gui else {}
415
+ advanced.add_argument(
416
+ "--n_workers",
417
+ type=int,
418
+ metavar="Number of CPU Workers",
419
+ help="Specify the number of workers to process the images.\n"
420
+ "0 means the total number of CPU cores in the system.",
421
+ default=0,
422
+ **kwargs,
423
+ )
424
+
425
+ kwargs = dict(gooey_options={"visible": False}) if in_gui else {}
426
+ advanced.add_argument(
427
+ "-v",
428
+ "--version",
429
+ action="version",
430
+ version=f"%(prog)s {__version__}",
431
+ help="Show the version number and exit.",
432
+ **kwargs,
433
+ )
434
+ args = parser.parse_args()
435
+ images = args.images or []
436
+ if getattr(args, "image_dirs", False):
437
+ images.extend(args.image_dirs)
438
+ if getattr(args, "image_files", False):
439
+ images.extend(args.image_files)
440
+ if getattr(args, "image_urls", False):
441
+ images.extend(args.image_urls)
442
+ args.images = images
443
+ if (
444
+ tuple(args.min_size) == (default_min_width, default_min_height)
445
+ and getattr(args, "min_width", False)
446
+ and getattr(args, "min_height", False)
447
+ ):
448
+ args.min_size = (args.min_width, args.min_height)
449
+ return args
450
+
451
+
452
+ def resolve_labels(labels):
453
+ if not labels or len(labels) != 1:
454
+ return labels
455
+ label = labels[0]
456
+
457
+ separator = r"[-,~:;_]"
458
+ pattern = rf"^([a-zA-Z0-9]+){separator}([a-zA-Z0-9]+)(?:{separator}([-+]?\d+))?$"
459
+ match = re.match(pattern, label)
460
+ if match is None:
461
+ return labels
462
+ start, end, step = match.groups()
463
+ if not step:
464
+ step = 1
465
+ else:
466
+ step = int(step)
467
+ if step == 0:
468
+ LOG.warning(f"The specified step in the '--label' setting ('{label}') cannot be 0; resetting to 1.")
469
+ step = 1
470
+ if step < 0:
471
+ start, end = end, start
472
+
473
+ if start.isdigit() and end.isalpha() or start.isalpha() and end.isdigit():
474
+ LOG.warning(
475
+ f"Invalid '--label' setting ('{label}'): The start value ({start}) and the end value ({end}) should be both digits or both letters."
476
+ )
477
+ return labels
478
+ if start >= end:
479
+ LOG.warning(
480
+ f"Invalid '--label' setting ('{label}'): The start value ({start}) should be less than the end value ({end})."
481
+ )
482
+ return labels
483
+ if start.isdigit() and end.isdigit():
484
+ start, end = int(start), int(end)
485
+ return [str(i) for i in range(start, end + 1, step)]
486
+ if start.isalpha() and end.isalpha():
487
+ start, end = start.upper(), end.upper()
488
+ return [chr(i) for i in range(ord(start), ord(end) + 1, step)]
489
+ return labels
490
+
491
+
492
+ def get_latest_version_from_pypi(package_name):
493
+ try:
494
+ import requests
495
+
496
+ response = requests.get(f"https://pypi.org/pypi/{package_name}/json")
497
+ response.raise_for_status()
498
+
499
+ data = response.json()
500
+ latest_version = data["info"]["version"]
501
+ return latest_version
502
+ except Exception:
503
+ pass
504
+
505
+
506
+ def check_version():
507
+ if "STONE_UPGRADE_FLAG" in os.environ:
508
+ return
509
+ try:
510
+ from packaging.version import parse
511
+ import importlib.metadata
512
+
513
+ latest_version = get_latest_version_from_pypi(__package_name__)
514
+ if not latest_version:
515
+ return
516
+ distribution = importlib.metadata.distribution(__package_name__)
517
+ installed_version = distribution.version
518
+ if parse(installed_version) < parse(latest_version):
519
+ from colorama import just_fix_windows_console, Fore
520
+
521
+ just_fix_windows_console()
522
+ print(
523
+ Fore.YELLOW + f"You are using an outdated version of {__package_name__} ({installed_version}).\n"
524
+ f"Please upgrade to the latest version ({latest_version}) with the following command:\n",
525
+ Fore.GREEN + f"pip install {__package_name__}[all] --upgrade\n" + Fore.RESET,
526
+ )
527
+ os.environ["STONE_UPGRADE_FLAG"] = "1"
528
+ except Exception:
529
+ pass
test.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ from argparse import ArgumentParser
4
+ from pillow_heif import register_heif_opener
5
+ import src
6
+ import os
7
+ from json import dumps # Optional
8
+
9
+ register_heif_opener()
10
+
11
+
12
+ def process_image(image_path: str):
13
+ im_basename = os.path.basename(image_path).split(".")[0]
14
+ result = src.process(image_path, return_report_image=True)
15
+ report_images = result.pop("report_images")
16
+
17
+ # Save the faces
18
+ # for i, face in enumerate(report_images.keys()):
19
+ cv2.imwrite(
20
+ f"outputs/face_{im_basename}.png",
21
+ cv2.cvtColor(report_images, cv2.COLOR_RGB2BGR),
22
+ )
23
+ # report_images.save(f"outputs/face_{im_basename}.png")
24
+
25
+ # convert the result to json
26
+ result_json = dumps(result)
27
+ # save the json dump
28
+ with open(f"outputs/faces_{im_basename}.json", "w") as f:
29
+ f.write(result_json)
30
+
31
+ return result
32
+
33
+
34
+ if __name__ == "__main__":
35
+ args = ArgumentParser()
36
+ args.add_argument(
37
+ "-i",
38
+ "--image_path",
39
+ required=True,
40
+ help="Path to the image",
41
+ )
42
+
43
+ args = args.parse_args()
44
+ # import IPython
45
+
46
+ # IPython.embed()
47
+ print("Image path:", args.image_path)
48
+ # image = cv2.imread(args.image)
49
+ result = process_image(args.image_path)
50
+ print(result)