Oamitai commited on
Commit
1f1a91e
·
verified ·
1 Parent(s): 9b24efb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +272 -240
app.py CHANGED
@@ -1,240 +1,272 @@
1
- import cv2
2
- import numpy as np
3
- import tensorflow as tf
4
- import torch
5
- from ultralytics import YOLO
6
- from PIL import Image
7
- import gradio as gr
8
- import traceback
9
- import pandas as pd
10
- from itertools import combinations
11
- from huggingface_hub import hf_hub_download
12
- import spaces # For ZeroGPU support
13
-
14
- # =============================================================================
15
- # MODEL LOADING (Keras Models on CPU)
16
- # =============================================================================
17
- # These models can be loaded globally.
18
- shape_classification_model = tf.keras.models.load_model(
19
- hf_hub_download("Oamitai/shape-classification", "shape_model.keras")
20
- )
21
- fill_classification_model = tf.keras.models.load_model(
22
- hf_hub_download("Oamitai/fill-classification", "fill_model.keras")
23
- )
24
-
25
- # Global YOLO models will be loaded lazily inside the GPU function.
26
- global_card_detection_model = None
27
- global_shape_detection_model = None
28
-
29
- # =============================================================================
30
- # ORIENTATION CORRECTION FUNCTIONS
31
- # =============================================================================
32
- def check_and_rotate_input_image(board_image, card_boxes):
33
- if len(card_boxes) == 0:
34
- return board_image, False
35
- total_width = total_height = 0
36
- for box in card_boxes:
37
- x1, y1, x2, y2 = box
38
- total_width += (x2 - x1)
39
- total_height += (y2 - y1)
40
- avg_width = total_width / len(card_boxes)
41
- avg_height = total_height / len(card_boxes)
42
- if avg_height > avg_width:
43
- rotated_image = cv2.rotate(board_image, cv2.ROTATE_90_CLOCKWISE)
44
- return rotated_image, True
45
- else:
46
- return board_image, False
47
-
48
- def restore_original_orientation(image, was_rotated):
49
- if was_rotated:
50
- return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
51
- return image
52
-
53
- # =============================================================================
54
- # PREDICTION FUNCTIONS
55
- # =============================================================================
56
- def predict_color(shape_image):
57
- hsv_image = cv2.cvtColor(shape_image, cv2.COLOR_BGR2HSV)
58
- green_mask = cv2.inRange(hsv_image, np.array([40, 50, 50]), np.array([80, 255, 255]))
59
- purple_mask = cv2.inRange(hsv_image, np.array([120, 50, 50]), np.array([160, 255, 255]))
60
- red_mask1 = cv2.inRange(hsv_image, np.array([0, 50, 50]), np.array([10, 255, 255]))
61
- red_mask2 = cv2.inRange(hsv_image, np.array([170, 50, 50]), np.array([180, 255, 255]))
62
- red_mask = cv2.bitwise_or(red_mask1, red_mask2)
63
- color_counts = {
64
- 'green': cv2.countNonZero(green_mask),
65
- 'purple': cv2.countNonZero(purple_mask),
66
- 'red': cv2.countNonZero(red_mask)
67
- }
68
- return max(color_counts, key=color_counts.get)
69
-
70
- def predict_card_features(card_image, shape_detection_model, fill_model, shape_model, box):
71
- shape_results = shape_detection_model(card_image)
72
- card_height, card_width = card_image.shape[:2]
73
- card_area = card_width * card_height
74
-
75
- filtered_boxes = []
76
- for detected_box in shape_results[0].boxes.xyxy.cpu().numpy():
77
- x1, y1, x2, y2 = detected_box.astype(int)
78
- shape_area = (x2 - x1) * (y2 - y1)
79
- if shape_area > 0.03 * card_area:
80
- filtered_boxes.append([x1, y1, x2, y2])
81
-
82
- if len(filtered_boxes) == 0:
83
- return {'count': 0, 'color': 'unknown', 'fill': 'unknown', 'shape': 'unknown', 'box': box}
84
-
85
- fill_input_shape = fill_model.input_shape[1:3]
86
- shape_input_shape = shape_model.input_shape[1:3]
87
- fill_imgs = []
88
- shape_imgs = []
89
- color_list = []
90
- for fb in filtered_boxes:
91
- x1, y1, x2, y2 = fb
92
- shape_img = card_image[y1:y2, x1:x2]
93
- fill_img = cv2.resize(shape_img, tuple(fill_input_shape)) / 255.0
94
- shape_img_resized = cv2.resize(shape_img, tuple(shape_input_shape)) / 255.0
95
- fill_imgs.append(fill_img)
96
- shape_imgs.append(shape_img_resized)
97
- color_list.append(predict_color(shape_img))
98
- fill_imgs = np.array(fill_imgs)
99
- shape_imgs = np.array(shape_imgs)
100
-
101
- fill_preds = fill_model.predict(fill_imgs, batch_size=len(fill_imgs))
102
- shape_preds = shape_model.predict(shape_imgs, batch_size=len(shape_imgs))
103
- fill_labels_list = ['empty', 'full', 'striped']
104
- shape_labels_list = ['diamond', 'oval', 'squiggle']
105
- predicted_fill = [fill_labels_list[np.argmax(pred)] for pred in fill_preds]
106
- predicted_shape = [shape_labels_list[np.argmax(pred)] for pred in shape_preds]
107
-
108
- count = min(len(filtered_boxes), 3)
109
- final_color = max(set(color_list), key=color_list.count)
110
- final_fill = max(set(predicted_fill), key=predicted_fill.count)
111
- final_shape = max(set(predicted_shape), key=predicted_shape.count)
112
-
113
- return {'count': count, 'color': final_color, 'fill': final_fill, 'shape': final_shape, 'box': box}
114
-
115
- def is_set(cards):
116
- for feature in ['Count', 'Color', 'Fill', 'Shape']:
117
- if len({card[feature] for card in cards}) not in [1, 3]:
118
- return False
119
- return True
120
-
121
- def find_sets(card_df):
122
- sets_found = []
123
- for combo in combinations(card_df.iterrows(), 3):
124
- cards = [entry[1] for entry in combo]
125
- if is_set(cards):
126
- set_info = {
127
- 'set_indices': [entry[0] for entry in combo],
128
- 'cards': [{feature: card[feature] for feature in ['Count', 'Color', 'Fill', 'Shape', 'Coordinates']} for card in cards]
129
- }
130
- sets_found.append(set_info)
131
- return sets_found
132
-
133
- def detect_cards_from_image(board_image, card_detection_model):
134
- card_results = card_detection_model(board_image)
135
- card_boxes = card_results[0].boxes.xyxy.cpu().numpy().astype(int)
136
- cards = []
137
- for box in card_boxes:
138
- x1, y1, x2, y2 = box
139
- card_img = board_image[y1:y2, x1:x2]
140
- cards.append((card_img, box))
141
- return cards, card_boxes
142
-
143
- def classify_cards_from_board_image(board_image, card_boxes, shape_detection_model, fill_model, shape_model):
144
- card_data = []
145
- for box in card_boxes:
146
- x1, y1, x2, y2 = box
147
- card_img = board_image[y1:y2, x1:x2]
148
- features = predict_card_features(card_img, shape_detection_model, fill_model, shape_model, box)
149
- card_data.append({
150
- "Count": features['count'],
151
- "Color": features['color'],
152
- "Fill": features['fill'],
153
- "Shape": features['shape'],
154
- "Coordinates": f"{box[0]}, {box[1]}, {box[2]}, {box[3]}"
155
- })
156
- return pd.DataFrame(card_data)
157
-
158
- def classify_and_find_sets_from_array(board_image, card_detection_model, shape_detection_model, fill_model, shape_model):
159
- _, card_boxes = detect_cards_from_image(board_image, card_detection_model)
160
- board_image, was_rotated = check_and_rotate_input_image(board_image, card_boxes)
161
- if was_rotated:
162
- _, card_boxes = detect_cards_from_image(board_image, card_detection_model)
163
- card_df = classify_cards_from_board_image(board_image, card_boxes, shape_detection_model, fill_model, shape_model)
164
- sets_found = find_sets(card_df)
165
- annotated_image = draw_sets_on_image(board_image.copy(), sets_found)
166
- final_image = restore_original_orientation(annotated_image, was_rotated)
167
- return sets_found, final_image
168
-
169
- # =============================================================================
170
- # DRAWING FUNCTIONS
171
- # =============================================================================
172
- def draw_sets_on_image(board_image, sets_info):
173
- colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255),
174
- (255, 255, 0), (255, 0, 255), (0, 255, 255)]
175
- base_thickness = 8
176
- base_expansion = 5
177
- for index, set_info in enumerate(sets_info):
178
- color = colors[index % len(colors)]
179
- thickness = base_thickness + 2 * index
180
- expansion = base_expansion + 15 * index
181
- for i, card in enumerate(set_info['cards']):
182
- coordinates = list(map(int, card['Coordinates'].split(',')))
183
- x1, y1, x2, y2 = coordinates
184
- x1_expanded = max(0, x1 - expansion)
185
- y1_expanded = max(0, y1 - expansion)
186
- x2_expanded = min(board_image.shape[1], x2 + expansion)
187
- y2_expanded = min(board_image.shape[0], y2 + expansion)
188
- cv2.rectangle(board_image, (x1_expanded, y1_expanded),
189
- (x2_expanded, y2_expanded), color, thickness)
190
- if i == 0:
191
- cv2.putText(board_image, f"Set {index + 1}", (x1_expanded, y1_expanded - 10),
192
- cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, thickness)
193
- return board_image
194
-
195
- # =============================================================================
196
- # GRADIO INTERFACE FUNCTION (ZeroGPU)
197
- # =============================================================================
198
- @spaces.GPU(duration=280)
199
- def detect_and_display_sets_interface(input_image):
200
- try:
201
- image_cv = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
202
- global global_card_detection_model, global_shape_detection_model
203
- # Lazy load YOLO models on GPU after allocation.
204
- if global_card_detection_model is None:
205
- card_model_path = hf_hub_download("Oamitai/card-detection", "best.pt")
206
- global_card_detection_model = YOLO(card_model_path)
207
- global_card_detection_model.conf = 0.5
208
- global_card_detection_model.to("cuda")
209
- if global_shape_detection_model is None:
210
- shape_model_path = hf_hub_download("Oamitai/shape-detection", "best.pt")
211
- global_shape_detection_model = YOLO(shape_model_path)
212
- global_shape_detection_model.conf = 0.5
213
- global_shape_detection_model.to("cuda")
214
- sets_found, final_image = classify_and_find_sets_from_array(
215
- image_cv,
216
- global_card_detection_model,
217
- global_shape_detection_model,
218
- fill_classification_model,
219
- shape_classification_model
220
- )
221
- final_image_rgb = cv2.cvtColor(final_image, cv2.COLOR_BGR2RGB)
222
- return Image.fromarray(final_image_rgb), "Sets detected successfully."
223
- except Exception as e:
224
- err = f"❌ Error: {str(e)}\n{traceback.format_exc()}"
225
- return Image.fromarray(np.zeros((100, 100, 3), dtype=np.uint8)), err
226
-
227
- # =============================================================================
228
- # LAUNCH GRADIO
229
- # =============================================================================
230
- iface = gr.Interface(
231
- fn=detect_and_display_sets_interface,
232
- inputs=gr.Image(type="pil", label="Upload Board Image"),
233
- outputs=[gr.Image(type="pil", label="Annotated Image"), gr.Textbox(label="Status")],
234
- title="Set Game Detector",
235
- description=("Upload an image of a Set game board to detect cards, "
236
- "classify their features, and highlight valid sets.")
237
- )
238
-
239
- if __name__ == "__main__":
240
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from huggingface_hub import hf_hub_download
4
+ import cv2
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ from tensorflow.keras.models import load_model
8
+ import torch
9
+ from ultralytics import YOLO
10
+ from PIL import Image
11
+ import traceback
12
+ import json
13
+ import pandas as pd
14
+ from itertools import combinations
15
+ from pathlib import Path
16
+
17
+ # =============================================================================
18
+ # MODEL LOADING
19
+ # =============================================================================
20
+
21
+ # Load YOLO Card Detection Model from HuggingFace Hub
22
+ card_model_path = hf_hub_download("Oamitai/card-detection", "best.pt")
23
+ card_detection_model = YOLO(card_model_path)
24
+ card_detection_model.conf = 0.5
25
+
26
+ # Load YOLO Shape Detection Model from HuggingFace Hub
27
+ shape_model_path = hf_hub_download("Oamitai/shape-detection", "best.pt")
28
+ shape_detection_model = YOLO(shape_model_path)
29
+ shape_detection_model.conf = 0.5
30
+
31
+ # Load Shape Classification Model (Keras) from HuggingFace Hub
32
+ shape_classification_model = load_model(
33
+ hf_hub_download("Oamitai/shape-classification", "shape_model.keras")
34
+ )
35
+
36
+ # Load Fill Classification Model (Keras) from HuggingFace Hub
37
+ fill_classification_model = load_model(
38
+ hf_hub_download("Oamitai/fill-classification", "fill_model.keras")
39
+ )
40
+
41
+ # =============================================================================
42
+ # UTILITY & PROCESSING FUNCTIONS
43
+ # =============================================================================
44
+
45
+ def check_and_rotate_input_image(board_image: np.ndarray, detector) -> (np.ndarray, bool):
46
+ """
47
+ Detect card regions and determine if the image needs to be rotated.
48
+ """
49
+ card_results = detector(board_image)
50
+ card_boxes = card_results[0].boxes.xyxy.cpu().numpy().astype(int)
51
+ if card_boxes.size == 0:
52
+ return board_image, False
53
+
54
+ widths = card_boxes[:, 2] - card_boxes[:, 0]
55
+ heights = card_boxes[:, 3] - card_boxes[:, 1]
56
+ if np.mean(heights) > np.mean(widths):
57
+ return cv2.rotate(board_image, cv2.ROTATE_90_CLOCKWISE), True
58
+ return board_image, False
59
+
60
+ def restore_original_orientation(image: np.ndarray, was_rotated: bool) -> np.ndarray:
61
+ """
62
+ Restore the original orientation of the image if it was rotated.
63
+ """
64
+ if was_rotated:
65
+ return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
66
+ return image
67
+
68
+ def predict_color(shape_image: np.ndarray) -> str:
69
+ """
70
+ Determine the dominant color in a shape image using HSV thresholds.
71
+ """
72
+ hsv_image = cv2.cvtColor(shape_image, cv2.COLOR_BGR2HSV)
73
+ green_mask = cv2.inRange(hsv_image, np.array([40, 50, 50]), np.array([80, 255, 255]))
74
+ purple_mask = cv2.inRange(hsv_image, np.array([120, 50, 50]), np.array([160, 255, 255]))
75
+ red_mask1 = cv2.inRange(hsv_image, np.array([0, 50, 50]), np.array([10, 255, 255]))
76
+ red_mask2 = cv2.inRange(hsv_image, np.array([170, 50, 50]), np.array([180, 255, 255]))
77
+ red_mask = cv2.bitwise_or(red_mask1, red_mask2)
78
+
79
+ color_counts = {
80
+ 'green': cv2.countNonZero(green_mask),
81
+ 'purple': cv2.countNonZero(purple_mask),
82
+ 'red': cv2.countNonZero(red_mask)
83
+ }
84
+ return max(color_counts, key=color_counts.get)
85
+
86
+ def predict_card_features(card_image: np.ndarray, shape_detector, fill_model, shape_model, box: list) -> dict:
87
+ """
88
+ Detect and classify features on a card image.
89
+ """
90
+ shape_results = shape_detector(card_image)
91
+ card_h, card_w = card_image.shape[:2]
92
+ card_area = card_w * card_h
93
+
94
+ filtered_boxes = [
95
+ [int(x1), int(y1), int(x2), int(y2)]
96
+ for x1, y1, x2, y2 in shape_results[0].boxes.xyxy.cpu().numpy()
97
+ if (x2 - x1) * (y2 - y1) > 0.03 * card_area
98
+ ]
99
+
100
+ if not filtered_boxes:
101
+ return {'count': 0, 'color': 'unknown', 'fill': 'unknown', 'shape': 'unknown', 'box': box}
102
+
103
+ fill_input_shape = fill_model.input_shape[1:3]
104
+ shape_input_shape = shape_model.input_shape[1:3]
105
+ fill_imgs, shape_imgs, color_list = [], [], []
106
+
107
+ for fb in filtered_boxes:
108
+ x1, y1, x2, y2 = fb
109
+ shape_img = card_image[y1:y2, x1:x2]
110
+ fill_img = cv2.resize(shape_img, tuple(fill_input_shape)) / 255.0
111
+ shape_img_resized = cv2.resize(shape_img, tuple(shape_input_shape)) / 255.0
112
+ fill_imgs.append(fill_img)
113
+ shape_imgs.append(shape_img_resized)
114
+ color_list.append(predict_color(shape_img))
115
+
116
+ fill_imgs = np.array(fill_imgs)
117
+ shape_imgs = np.array(shape_imgs)
118
+
119
+ fill_preds = fill_model.predict(fill_imgs, batch_size=len(fill_imgs))
120
+ shape_preds = shape_model.predict(shape_imgs, batch_size=len(shape_imgs))
121
+
122
+ fill_labels_list = ['empty', 'full', 'striped']
123
+ shape_labels_list = ['diamond', 'oval', 'squiggle']
124
+
125
+ predicted_fill = [fill_labels_list[np.argmax(pred)] for pred in fill_preds]
126
+ predicted_shape = [shape_labels_list[np.argmax(pred)] for pred in shape_preds]
127
+
128
+ color_label = max(set(color_list), key=color_list.count)
129
+ fill_label = max(set(predicted_fill), key=predicted_fill.count)
130
+ shape_label = max(set(predicted_shape), key=predicted_shape.count)
131
+
132
+ return {'count': len(filtered_boxes), 'color': color_label,
133
+ 'fill': fill_label, 'shape': shape_label, 'box': box}
134
+
135
+ def is_set(cards: list) -> bool:
136
+ """
137
+ Check if a group of cards forms a valid set. For each feature,
138
+ values must be all identical or all distinct.
139
+ """
140
+ for feature in ['Count', 'Color', 'Fill', 'Shape']:
141
+ if len({card[feature] for card in cards}) not in [1, 3]:
142
+ return False
143
+ return True
144
+
145
+ def find_sets(card_df: pd.DataFrame) -> list:
146
+ """
147
+ Iterate over all combinations of three cards to identify valid sets.
148
+ """
149
+ sets_found = []
150
+ for combo in combinations(card_df.iterrows(), 3):
151
+ cards = [entry[1] for entry in combo]
152
+ if is_set(cards):
153
+ sets_found.append({
154
+ 'set_indices': [entry[0] for entry in combo],
155
+ 'cards': [{feature: card[feature] for feature in
156
+ ['Count', 'Color', 'Fill', 'Shape', 'Coordinates']} for card in cards]
157
+ })
158
+ return sets_found
159
+
160
+ def detect_cards_from_image(board_image: np.ndarray, detector) -> list:
161
+ """
162
+ Extract card regions from the board image using the YOLO card detection model.
163
+ """
164
+ card_results = detector(board_image)
165
+ card_boxes = card_results[0].boxes.xyxy.cpu().numpy().astype(int)
166
+ return [(board_image[y1:y2, x1:x2], [x1, y1, x2, y2]) for x1, y1, x2, y2 in card_boxes]
167
+
168
+ def classify_cards_from_board_image(board_image: np.ndarray, card_detector, shape_detector, fill_model, shape_model) -> pd.DataFrame:
169
+ """
170
+ Detect cards from the board image and classify their features.
171
+ """
172
+ cards = detect_cards_from_image(board_image, card_detector)
173
+ card_data = []
174
+ for card_image, box in cards:
175
+ features = predict_card_features(card_image, shape_detector, fill_model, shape_model, box)
176
+ card_data.append({
177
+ "Count": features['count'],
178
+ "Color": features['color'],
179
+ "Fill": features['fill'],
180
+ "Shape": features['shape'],
181
+ "Coordinates": f"{box[0]}, {box[1]}, {box[2]}, {box[3]}"
182
+ })
183
+ return pd.DataFrame(card_data)
184
+
185
+ def draw_sets_on_image(board_image: np.ndarray, sets_info: list) -> np.ndarray:
186
+ """
187
+ Draw bounding boxes and labels for each detected set on the board image.
188
+ """
189
+ colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255),
190
+ (255, 255, 0), (255, 0, 255), (0, 255, 255)]
191
+ base_thickness = 8
192
+ base_expansion = 5
193
+ for index, set_info in enumerate(sets_info):
194
+ color = colors[index % len(colors)]
195
+ thickness = base_thickness + 2 * index
196
+ expansion = base_expansion + 15 * index
197
+ for i, card in enumerate(set_info['cards']):
198
+ coordinates = list(map(int, card['Coordinates'].split(',')))
199
+ x1, y1, x2, y2 = coordinates
200
+ x1_exp = max(0, x1 - expansion)
201
+ y1_exp = max(0, y1 - expansion)
202
+ x2_exp = min(board_image.shape[1], x2 + expansion)
203
+ y2_exp = min(board_image.shape[0], y2 + expansion)
204
+ cv2.rectangle(board_image, (x1_exp, y1_exp), (x2_exp, y2_exp), color, thickness)
205
+ if i == 0:
206
+ cv2.putText(board_image, f"Set {index + 1}", (x1_exp, y1_exp - 10),
207
+ cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, thickness)
208
+ return board_image
209
+
210
+ def classify_and_find_sets_from_array(board_image: np.ndarray, card_detector, shape_detector, fill_model, shape_model) -> (list, np.ndarray):
211
+ """
212
+ Process the input image: adjust orientation, classify card features, detect sets, and annotate the image.
213
+ """
214
+ processed_image, was_rotated = check_and_rotate_input_image(board_image, card_detector)
215
+ card_df = classify_cards_from_board_image(processed_image, card_detector, shape_detector, fill_model, shape_model)
216
+ sets_found = find_sets(card_df)
217
+ annotated_image = draw_sets_on_image(processed_image.copy(), sets_found)
218
+ final_image = restore_original_orientation(annotated_image, was_rotated)
219
+ return sets_found, final_image
220
+
221
+ # =============================================================================
222
+ # GRADIO INFERENCE FUNCTION
223
+ # =============================================================================
224
+
225
+ @spaces.GPU()
226
+ def detect_sets(input_image: Image.Image):
227
+ """
228
+ Process an uploaded image and return the annotated image along with detected sets info.
229
+ """
230
+ try:
231
+ # Convert the PIL image to OpenCV BGR format
232
+ image_cv = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
233
+ # Run the detection pipeline
234
+ sets_info, annotated_image = classify_and_find_sets_from_array(
235
+ image_cv,
236
+ card_detection_model,
237
+ shape_detection_model,
238
+ fill_classification_model,
239
+ shape_classification_model
240
+ )
241
+ # Convert annotated image back to RGB for display
242
+ annotated_image_rgb = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
243
+ return annotated_image_rgb, json.dumps(sets_info, indent=2)
244
+ except Exception:
245
+ return None, f"Error occurred: {traceback.format_exc()}"
246
+
247
+ # =============================================================================
248
+ # GRADIO INTERFACE
249
+ # =============================================================================
250
+
251
+ with gr.Blocks(css="#col-container { margin: 0 auto; max-width: 800px; }") as demo:
252
+ gr.Markdown("# Set Game Detector\nUpload an image of a Set game board to detect valid sets.")
253
+
254
+ with gr.Row(elem_id="col-container"):
255
+ image_input = gr.Image(label="Upload Set Game Board", type="pil")
256
+ detect_button = gr.Button("Detect Sets")
257
+
258
+ with gr.Row():
259
+ result_image = gr.Image(label="Annotated Image")
260
+ result_info = gr.JSON(label="Detected Sets Info")
261
+
262
+ detect_button.click(
263
+ detect_sets,
264
+ inputs=[image_input],
265
+ outputs=[result_image, result_info]
266
+ )
267
+
268
+ # =============================================================================
269
+ # LAUNCH THE APP
270
+ # =============================================================================
271
+
272
+ demo.launch()