paulnhuu174 commited on
Commit
5ea9152
·
verified ·
1 Parent(s): 92273b3

Upload 24 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ imgs/0a3c588e-bbbe-11e8-b2ba-ac1f6b6435d0_blue.png filter=lfs diff=lfs merge=lfs -text
37
+ imgs/0a8caf00-bb9b-11e8-b2b9-ac1f6b6435d0_blue.png filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ from matplotlib.patches import Rectangle
5
+ from pathlib import Path
6
+ from skimage import io, measure, color, segmentation
7
+ import os
8
+ import warnings
9
+ from PIL import Image
10
+ import pandas as pd
11
+
12
+ try:
13
+ from cellpose import models
14
+ CELLPOSE_AVAILABLE = True
15
+ except ImportError:
16
+ CELLPOSE_AVAILABLE = False
17
+
18
+ try:
19
+ from ultralytics import YOLO
20
+ YOLO_AVAILABLE = True
21
+ except ImportError:
22
+ YOLO_AVAILABLE = False
23
+
24
+ # Configuration
25
+ IMAGE_FOLDER = "./imgs"
26
+ CSV_FILE = "train.csv"
27
+
28
+ # Category names mapping (0-27)
29
+ CATEGORY_NAMES = {
30
+ 0: "Nucleoplasm", 1: "Nuclear membrane", 2: "Nucleoli",
31
+ 3: "Nucleoli fibrillar center", 4: "Nuclear speckles", 5: "Nuclear bodies",
32
+ 6: "Endoplasmic reticulum", 7: "Golgi apparatus", 8: "Peroxisomes",
33
+ 9: "Endosomes", 10: "Lysosomes", 11: "Intermediate filaments",
34
+ 12: "Actin filaments", 13: "Focal adhesion sites", 14: "Microtubules",
35
+ 15: "Microtubule ends", 16: "Cytokinetic bridge", 17: "Mitotic spindle",
36
+ 18: "Microtubule organizing center", 19: "Centrosome", 20: "Lipid droplets",
37
+ 21: "Plasma membrane", 22: "Cell junctions", 23: "Mitochondria",
38
+ 24: "Aggresome", 25: "Cytosol", 26: "Cytoplasmic bodies", 27: "Rods & rings"
39
+ }
40
+
41
+ # Global state
42
+ class AppState:
43
+ def __init__(self):
44
+ self.image_files = []
45
+ self.selected_image = None
46
+ self.current_image = None
47
+ self.masks = None
48
+ self.cell_properties = []
49
+ self.cellpose_model = None
50
+ self.yolo_model = None
51
+ self.current_model_type = None
52
+ self.selected_cell = None
53
+ self.csv_data = None
54
+ self.image_categories = {}
55
+
56
+ state = AppState()
57
+
58
+ def extract_image_id(filename):
59
+ """Extract image ID from filename."""
60
+ basename = os.path.basename(filename)
61
+ name_without_ext = os.path.splitext(basename)[0]
62
+ for color in ['_blue', '_green', '_red', '_yellow']:
63
+ if name_without_ext.endswith(color):
64
+ return name_without_ext.replace(color, '')
65
+ return name_without_ext
66
+
67
+ def load_csv_data():
68
+ """Auto-load CSV file."""
69
+ if not os.path.exists(CSV_FILE):
70
+ return
71
+
72
+ try:
73
+ state.csv_data = pd.read_csv(CSV_FILE)
74
+ state.image_categories = {}
75
+
76
+ for _, row in state.csv_data.iterrows():
77
+ img_id = row['Id']
78
+ target = str(row['Target'])
79
+ category_indices = [int(x) for x in target.split()]
80
+ category_names = [CATEGORY_NAMES.get(idx, f"Unknown-{idx}") for idx in category_indices]
81
+
82
+ state.image_categories[img_id] = {
83
+ 'indices': category_indices,
84
+ 'names': category_names
85
+ }
86
+ except Exception as e:
87
+ print(f"Could not load CSV: {e}")
88
+
89
+ def scan_folder():
90
+ """Auto-scan folder for images."""
91
+ if not os.path.exists(IMAGE_FOLDER) or not os.path.isdir(IMAGE_FOLDER):
92
+ return None
93
+
94
+ try:
95
+ extensions = {'.png', '.jpg', '.jpeg', '.tif', '.tiff', '.bmp'}
96
+ state.image_files = []
97
+
98
+ for f in sorted(Path(IMAGE_FOLDER).iterdir()):
99
+ if f.suffix.lower() in extensions:
100
+ state.image_files.append(str(f))
101
+
102
+ if len(state.image_files) == 0:
103
+ return None
104
+
105
+ # Generate gallery
106
+ gallery_items = [(img, os.path.basename(img)) for img in state.image_files]
107
+ return gallery_items
108
+ except Exception as e:
109
+ print(f"Scan error: {e}")
110
+ return None
111
+
112
+ def prepare_image_for_yolo(image):
113
+ """Convert grayscale to RGB for YOLO."""
114
+ if image.ndim == 2:
115
+ return np.stack([image, image, image], axis=-1)
116
+ elif image.ndim == 3 and image.shape[2] == 3:
117
+ return image
118
+ elif image.ndim == 3 and image.shape[2] == 1:
119
+ gray = image[:, :, 0]
120
+ return np.stack([gray, gray, gray], axis=-1)
121
+ return image
122
+
123
+ def select_image_from_gallery(evt: gr.SelectData):
124
+ """Handle image selection from gallery."""
125
+ if not state.image_files or evt.index >= len(state.image_files):
126
+ return None, "Invalid selection", "", gr.update(choices=[])
127
+
128
+ state.selected_image = state.image_files[evt.index]
129
+
130
+ try:
131
+ with warnings.catch_warnings():
132
+ warnings.simplefilter("ignore")
133
+ state.current_image = io.imread(state.selected_image)
134
+
135
+ if state.current_image.dtype == np.uint16:
136
+ state.current_image = ((state.current_image / state.current_image.max()) * 255).astype(np.uint8)
137
+
138
+ # Reset segmentation
139
+ state.masks = None
140
+ state.cell_properties = []
141
+ state.selected_cell = None
142
+
143
+ # Get categories
144
+ categories_text = get_image_categories()
145
+
146
+ # Show original image
147
+ fig = create_visualization(show_numbers=False)
148
+
149
+ return fig, f"Loaded: {os.path.basename(state.selected_image)}", categories_text, gr.update(choices=[])
150
+ except Exception as e:
151
+ return None, f"Load failed: {str(e)}", "", gr.update(choices=[])
152
+
153
+ def get_image_categories():
154
+ """Get category information for selected image."""
155
+ if not state.image_categories or not state.selected_image:
156
+ return ""
157
+
158
+ img_id = extract_image_id(state.selected_image)
159
+ categories = state.image_categories.get(img_id)
160
+
161
+ if categories:
162
+ result = "Image Categories\n" + "=" * 30 + "\n"
163
+ for idx, name in zip(categories['indices'], categories['names']):
164
+ result += f"[{idx}] {name}\n"
165
+ return result
166
+ return ""
167
+
168
+ def run_cellpose_segmentation(model_type, diameter, use_gpu):
169
+ """Run Cellpose segmentation."""
170
+ if state.current_image is None:
171
+ return None, "No image selected", gr.update(choices=[])
172
+
173
+ if not CELLPOSE_AVAILABLE:
174
+ return None, "Cellpose not installed", gr.update(choices=[])
175
+
176
+ try:
177
+ with warnings.catch_warnings():
178
+ warnings.simplefilter("ignore")
179
+
180
+ # Parse diameter
181
+ if diameter == "auto":
182
+ diam = None
183
+ else:
184
+ try:
185
+ diam = float(diameter)
186
+ except:
187
+ diam = None
188
+
189
+ # Load model
190
+ if state.cellpose_model is None or state.current_model_type != model_type:
191
+ state.cellpose_model = models.CellposeModel(
192
+ gpu=use_gpu,
193
+ model_type=model_type
194
+ )
195
+ state.current_model_type = model_type
196
+
197
+ # Run segmentation
198
+ channels = [0, 0]
199
+ state.masks, flows, styles = state.cellpose_model.eval(
200
+ state.current_image,
201
+ diameter=diam,
202
+ channels=channels
203
+ )
204
+
205
+ if state.masks is None or state.masks.max() == 0:
206
+ return None, "No cells detected", gr.update(choices=[])
207
+
208
+ return finalize_segmentation()
209
+
210
+ except Exception as e:
211
+ return None, f"Error: {str(e)}", gr.update(choices=[])
212
+
213
+ def run_yolo_segmentation(model_path, confidence, iou, use_gpu):
214
+ """Run YOLO segmentation."""
215
+ if state.current_image is None:
216
+ return None, "No image selected", gr.update(choices=[])
217
+
218
+ if not YOLO_AVAILABLE:
219
+ return None, "YOLO not installed", gr.update(choices=[])
220
+
221
+ try:
222
+ with warnings.catch_warnings():
223
+ warnings.simplefilter("ignore")
224
+
225
+ # Load model
226
+ if state.yolo_model is None or state.current_model_type != model_path:
227
+ state.yolo_model = YOLO(model_path)
228
+ state.current_model_type = model_path
229
+
230
+ device = 'cuda' if use_gpu else 'cpu'
231
+ yolo_image = prepare_image_for_yolo(state.current_image)
232
+
233
+ # Run prediction
234
+ results = state.yolo_model.predict(
235
+ yolo_image,
236
+ conf=confidence,
237
+ iou=iou,
238
+ device=device,
239
+ verbose=False
240
+ )
241
+
242
+ # Convert to masks
243
+ state.masks = yolo_results_to_masks(results[0])
244
+
245
+ if state.masks is None or state.masks.max() == 0:
246
+ return None, "No objects detected", gr.update(choices=[])
247
+
248
+ return finalize_segmentation()
249
+
250
+ except Exception as e:
251
+ return None, f"Error: {str(e)}", gr.update(choices=[])
252
+
253
+ def yolo_results_to_masks(result):
254
+ """Convert YOLO results to mask format."""
255
+ if result.masks is None:
256
+ return None
257
+
258
+ h, w = state.current_image.shape[:2]
259
+ combined_mask = np.zeros((h, w), dtype=np.int32)
260
+ masks = result.masks.data.cpu().numpy()
261
+
262
+ for idx, mask in enumerate(masks, start=1):
263
+ mask_resized = np.array(Image.fromarray(mask).resize((w, h), Image.NEAREST))
264
+ combined_mask[mask_resized > 0.5] = idx
265
+
266
+ return combined_mask
267
+
268
+ def finalize_segmentation():
269
+ """Finalize segmentation (common for both methods)."""
270
+ try:
271
+ if state.current_image.ndim == 3:
272
+ from skimage.color import rgb2gray
273
+ intensity = (rgb2gray(state.current_image) * 255).astype(np.uint8)
274
+ else:
275
+ intensity = state.current_image
276
+
277
+ state.cell_properties = measure.regionprops(state.masks, intensity_image=intensity)
278
+
279
+ # Create visualization
280
+ fig = create_visualization(show_numbers=False)
281
+
282
+ # Create cell list
283
+ cell_list = [f"Cell {prop.label} | Area: {prop.area}px²" for prop in state.cell_properties]
284
+
285
+ return fig, f"{state.masks.max()} cells detected", gr.update(choices=cell_list)
286
+
287
+ except Exception as e:
288
+ return None, f"Error: {str(e)}", gr.update(choices=[])
289
+
290
+ def create_visualization(show_numbers=False, highlight_cell=None):
291
+ """Create segmentation visualization."""
292
+ if state.current_image is None:
293
+ return None
294
+
295
+ try:
296
+ with warnings.catch_warnings():
297
+ warnings.simplefilter("ignore")
298
+
299
+ fig, ax = plt.subplots(figsize=(8, 8))
300
+
301
+ if state.masks is not None:
302
+ # Prepare display image
303
+ if state.current_image.ndim == 2:
304
+ display_img = state.current_image
305
+ else:
306
+ from skimage.color import rgb2gray
307
+ display_img = (rgb2gray(state.current_image) * 255).astype(np.uint8)
308
+
309
+ # Create overlay
310
+ overlay = color.label2rgb(state.masks, display_img, bg_label=0, alpha=0.4)
311
+ ax.imshow(overlay)
312
+
313
+ # Add outlines
314
+ outlines = segmentation.find_boundaries(state.masks, mode='outer')
315
+ outline_img = np.zeros((*state.masks.shape, 4))
316
+ outline_img[outlines] = [1, 0, 0, 1]
317
+ ax.imshow(outline_img)
318
+
319
+ # Show cell numbers
320
+ if show_numbers and state.cell_properties:
321
+ for prop in state.cell_properties:
322
+ cy, cx = prop.centroid
323
+ ax.text(cx, cy, str(prop.label),
324
+ color='yellow',
325
+ fontsize=8,
326
+ fontweight='bold',
327
+ ha='center',
328
+ va='center',
329
+ bbox=dict(boxstyle='round,pad=0.3',
330
+ facecolor='black',
331
+ alpha=0.5,
332
+ edgecolor='yellow',
333
+ linewidth=1))
334
+
335
+ # Highlight selected cell
336
+ if highlight_cell is not None:
337
+ cell_mask = state.masks == highlight_cell
338
+ cell_outline = segmentation.find_boundaries(cell_mask, mode='outer')
339
+ highlight_img = np.zeros((*state.masks.shape, 4))
340
+ highlight_img[cell_outline] = [1, 1, 0, 1]
341
+ ax.imshow(highlight_img)
342
+
343
+ for prop in state.cell_properties:
344
+ if prop.label == highlight_cell:
345
+ minr, minc, maxr, maxc = prop.bbox
346
+ rect = Rectangle((minc, minr), maxc-minc, maxr-minr,
347
+ fill=False, edgecolor='yellow', linewidth=2)
348
+ ax.add_patch(rect)
349
+ break
350
+
351
+ ax.set_title(f'Segmentation Overlay ({state.masks.max()} cells)')
352
+ else:
353
+ # Show original
354
+ if state.current_image.ndim == 2:
355
+ ax.imshow(state.current_image, cmap='gray')
356
+ else:
357
+ ax.imshow(state.current_image)
358
+ ax.set_title('Original Image')
359
+
360
+ ax.axis('off')
361
+ plt.tight_layout()
362
+ return fig
363
+
364
+ except Exception as e:
365
+ print(f"Visualization error: {e}")
366
+ return None
367
+
368
+ def toggle_view(view_type, show_numbers):
369
+ """Toggle between original and overlay view."""
370
+ if view_type == "Original" and state.masks is not None:
371
+ # Show original without overlay
372
+ fig, ax = plt.subplots(figsize=(8, 8))
373
+ if state.current_image.ndim == 2:
374
+ ax.imshow(state.current_image, cmap='gray')
375
+ else:
376
+ ax.imshow(state.current_image)
377
+ ax.set_title('Original Image')
378
+ ax.axis('off')
379
+ plt.tight_layout()
380
+ return fig
381
+ else:
382
+ return create_visualization(show_numbers=show_numbers, highlight_cell=state.selected_cell)
383
+
384
+ def toggle_cell_numbers(show_numbers):
385
+ """Toggle cell number display."""
386
+ if state.masks is None:
387
+ return None
388
+ fig = create_visualization(show_numbers=show_numbers, highlight_cell=state.selected_cell)
389
+ return fig
390
+
391
+ def select_cell(cell_choice):
392
+ """Handle cell selection from dropdown."""
393
+ if not cell_choice or not state.cell_properties:
394
+ return None, ""
395
+
396
+ try:
397
+ # Extract cell ID from choice string "Cell X | Area: Ypx²"
398
+ cell_id = int(cell_choice.split('|')[0].replace('Cell', '').strip())
399
+ state.selected_cell = cell_id
400
+
401
+ # Find cell properties
402
+ for prop in state.cell_properties:
403
+ if prop.label == cell_id:
404
+ details = f"Cell {cell_id}\n"
405
+ details += "=" * 25 + "\n"
406
+ details += f"Area: {prop.area}px²\n"
407
+ details += f"Centroid: ({prop.centroid[1]:.0f}, {prop.centroid[0]:.0f})\n"
408
+ details += f"Eccentricity: {prop.eccentricity:.3f}\n"
409
+ details += f"Solidity: {prop.solidity:.3f}\n"
410
+ details += f"Intensity: {prop.mean_intensity:.1f}\n"
411
+
412
+ # Add categories if available
413
+ categories = get_image_categories()
414
+ if categories:
415
+ details += "\n" + categories
416
+
417
+ # Update visualization
418
+ fig = create_visualization(show_numbers=False, highlight_cell=cell_id)
419
+ return fig, details
420
+
421
+ return None, "Cell not found"
422
+ except Exception as e:
423
+ return None, f"Error: {str(e)}"
424
+
425
+ def run_segmentation(method, cp_model, diameter, yolo_model, confidence, iou, use_gpu):
426
+ """Run segmentation based on selected method."""
427
+ if method == "Cellpose":
428
+ return run_cellpose_segmentation(cp_model, diameter, use_gpu)
429
+ else:
430
+ return run_yolo_segmentation(yolo_model, confidence, iou, use_gpu)
431
+
432
+ def save_results():
433
+ """Save segmentation results."""
434
+ if state.masks is None:
435
+ return None, "No results to save"
436
+
437
+ try:
438
+ import tempfile
439
+ temp_dir = tempfile.mkdtemp()
440
+
441
+ base_name = Path(state.selected_image).stem if state.selected_image else "segmentation"
442
+
443
+ # Save mask
444
+ mask_path = os.path.join(temp_dir, f"{base_name}_masks.npy")
445
+ np.save(mask_path, state.masks)
446
+
447
+ # Save CSV
448
+ csv_path = os.path.join(temp_dir, f"{base_name}_measurements.csv")
449
+ with open(csv_path, 'w') as f:
450
+ f.write("ID,Area,Centroid_X,Centroid_Y,Eccentricity,Solidity,Mean_Intensity\n")
451
+ for prop in state.cell_properties:
452
+ f.write(f"{prop.label},{prop.area},{prop.centroid[1]:.1f},"
453
+ f"{prop.centroid[0]:.1f},{prop.eccentricity:.3f},"
454
+ f"{prop.solidity:.3f},{prop.mean_intensity:.1f}\n")
455
+
456
+ return [mask_path, csv_path], "Results saved"
457
+ except Exception as e:
458
+ return None, f"Error: {str(e)}"
459
+
460
+ # Initialize: Load CSV and scan folder
461
+ load_csv_data()
462
+ initial_gallery = scan_folder()
463
+
464
+ # Create Gradio interface
465
+ with gr.Blocks(title="Cell Segmentation Tool", theme=gr.themes.Soft()) as demo:
466
+ gr.Markdown("# Cell Segmentation Application")
467
+
468
+ with gr.Row():
469
+ # LEFT COLUMN - Image Gallery
470
+ with gr.Column(scale=1):
471
+ gr.Markdown("### Image Gallery")
472
+
473
+ image_gallery = gr.Gallery(
474
+ value=initial_gallery,
475
+ label=f"{len(state.image_files)} images" if state.image_files else "No images",
476
+ show_label=True,
477
+ elem_id="gallery",
478
+ columns=1,
479
+ rows=None,
480
+ height=600,
481
+ object_fit="contain"
482
+ )
483
+
484
+ status_text = gr.Textbox(label="Status", interactive=False)
485
+
486
+ # CENTER COLUMN - Image View
487
+ with gr.Column(scale=2):
488
+ gr.Markdown("### Image View")
489
+
490
+ with gr.Row():
491
+ view_mode = gr.Radio(
492
+ ["Original", "Overlay"],
493
+ value="Overlay",
494
+ label="View Mode"
495
+ )
496
+ show_numbers = gr.Checkbox(label="Show Cell Numbers", value=False)
497
+
498
+ image_display = gr.Plot(label="")
499
+
500
+ # RIGHT COLUMN - Controls & Results
501
+ with gr.Column(scale=1):
502
+ gr.Markdown("### Segmentation Settings")
503
+
504
+ method = gr.Radio(
505
+ ["Cellpose", "YOLO"],
506
+ label="Method",
507
+ value="Cellpose"
508
+ )
509
+
510
+ # Cellpose controls
511
+ with gr.Group(visible=True) as cellpose_group:
512
+ cp_model = gr.Dropdown(
513
+ ["nuclei", "cyto", "cyto2", "cyto3"],
514
+ label="Cellpose Model",
515
+ value="nuclei"
516
+ )
517
+ diameter = gr.Textbox(label="Diameter", value="auto")
518
+
519
+ # YOLO controls
520
+ with gr.Group(visible=False) as yolo_group:
521
+ yolo_model = gr.Textbox(label="YOLO Model", value="yolov8n-seg.pt")
522
+ confidence = gr.Slider(0, 1, value=0.25, label="Confidence")
523
+ iou = gr.Slider(0, 1, value=0.45, label="IoU")
524
+
525
+ use_gpu = gr.Checkbox(label="Use GPU", value=False)
526
+
527
+ run_button = gr.Button("Run Segmentation", variant="primary", size="lg")
528
+
529
+ gr.Markdown("### Detected Cells")
530
+
531
+ cell_dropdown = gr.Dropdown(
532
+ label="Select Cell",
533
+ choices=[],
534
+ interactive=True
535
+ )
536
+
537
+ gr.Markdown("### Cell Details")
538
+ cell_details = gr.Textbox(
539
+ label="",
540
+ lines=12,
541
+ interactive=False
542
+ )
543
+
544
+ save_button = gr.Button("Save Results", variant="secondary")
545
+ output_files = gr.File(label="Download", file_count="multiple")
546
+
547
+ # Event handlers
548
+ def toggle_method(method_choice):
549
+ return (
550
+ gr.update(visible=method_choice == "Cellpose"),
551
+ gr.update(visible=method_choice == "YOLO")
552
+ )
553
+
554
+ method.change(toggle_method, inputs=[method], outputs=[cellpose_group, yolo_group])
555
+
556
+ image_gallery.select(
557
+ select_image_from_gallery,
558
+ outputs=[image_display, status_text, cell_details, cell_dropdown]
559
+ )
560
+
561
+ view_mode.change(
562
+ toggle_view,
563
+ inputs=[view_mode, show_numbers],
564
+ outputs=[image_display]
565
+ )
566
+
567
+ show_numbers.change(
568
+ toggle_cell_numbers,
569
+ inputs=[show_numbers],
570
+ outputs=[image_display]
571
+ )
572
+
573
+ run_button.click(
574
+ run_segmentation,
575
+ inputs=[method, cp_model, diameter, yolo_model, confidence, iou, use_gpu],
576
+ outputs=[image_display, status_text, cell_dropdown]
577
+ )
578
+
579
+ cell_dropdown.change(
580
+ select_cell,
581
+ inputs=[cell_dropdown],
582
+ outputs=[image_display, cell_details]
583
+ )
584
+
585
+ save_button.click(
586
+ save_results,
587
+ outputs=[output_files, status_text]
588
+ )
589
+
590
+ if __name__ == "__main__":
591
+ demo.launch(share=False)
imgs/0a007b34-bba5-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a00aab2-bbbb-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a00b510-bbc1-11e8-b2bb-ac1f6b6435d0_blue.png ADDED
imgs/0a06de86-bbb7-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a0af552-bbb7-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a0bd7c4-bbc6-11e8-b2bc-ac1f6b6435d0_blue.png ADDED
imgs/0a0bf050-bbc2-11e8-b2bb-ac1f6b6435d0_blue.png ADDED
imgs/0a1d66b8-bbaa-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a2abec8-bbb7-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a2ade02-bba9-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a3c588e-bbbe-11e8-b2ba-ac1f6b6435d0_blue.png ADDED

Git LFS Details

  • SHA256: 9b1ff997c8c0b290c7312a6f37fccb0f204e5511b9540de163806e4e111dafda
  • Pointer size: 131 Bytes
  • Size of remote file: 111 kB
imgs/0a3eb75e-bba6-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a6eb934-bbb7-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a7e47d2-bbb2-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a8b9d16-bbac-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a8be01c-bbae-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a8bf146-bbbe-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a8caf00-bb9b-11e8-b2b9-ac1f6b6435d0_blue.png ADDED

Git LFS Details

  • SHA256: 9474b9fa14c821528e495e7d09e65f456b47c4420190e6d5ff2d527f53bff9d6
  • Pointer size: 131 Bytes
  • Size of remote file: 139 kB
imgs/0a8d03ac-bbae-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a8e9110-bbbb-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
imgs/0a9a8b6a-bbab-11e8-b2ba-ac1f6b6435d0_blue.png ADDED
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ numpy
3
+ matplotlib
4
+ scikit-image
5
+ pandas
6
+ Pillow
7
+ cellpose
8
+ torch
9
+ torchvision
10
+ ultralytics
11
+ opencv-python-headless
train.csv ADDED
The diff for this file is too large to render. See raw diff