Shengxiao0709 commited on
Commit
1978e37
·
verified ·
1 Parent(s): 1803e27

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +2 -0
  2. 003_img.png +3 -0
  3. 1977_Well_F-5_Field_1.png +3 -0
  4. README.md +12 -0
  5. app.py +844 -0
  6. requirements.txt +47 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 003_img.png filter=lfs diff=lfs merge=lfs -text
37
+ 1977_Well_F-5_Field_1.png filter=lfs diff=lfs merge=lfs -text
003_img.png ADDED

Git LFS Details

  • SHA256: 41515cf5d7405135db4656c2cc61b59ab341143bfbee952b44a9542944e8528f
  • Pointer size: 131 Bytes
  • Size of remote file: 302 kB
1977_Well_F-5_Field_1.png ADDED

Git LFS Details

  • SHA256: 145a99e724048ed40db7843e57a1d93cd2e1f6e221d167a29b732740d6302c52
  • Pointer size: 132 Bytes
  • Size of remote file: 2.43 MB
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Celltool
3
+ emoji: 🌖
4
+ colorFrom: purple
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 5.49.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,844 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # from gradio_bbox_annotator import BBoxAnnotator
3
+ # from PIL import Image
4
+ # import numpy as np
5
+ # import torch
6
+ # import os
7
+ # import shutil
8
+ # import subprocess
9
+ # import time, json, uuid
10
+ # from pathlib import Path
11
+ # import tempfile
12
+ # from inference import load_model, run
13
+ # from skimage import measure
14
+ # # === 图像处理依赖 ===
15
+ # from scipy.ndimage import label
16
+ # from matplotlib import cm
17
+ # # ===== 清理缓存目录 =====
18
+ # print("===== Space Usage =====")
19
+ # subprocess.run("du -sh *", shell=True)
20
+ # print("===== ~/.cache =====")
21
+ # subprocess.run("ls -lh ~/.cache", shell=True)
22
+ # cache_path = os.path.expanduser("~/.cache")
23
+ # if os.path.exists(cache_path):
24
+ # shutil.rmtree(cache_path)
25
+ # print("✅ Deleted ~/.cache to free space.")
26
+
27
+ # # ===== 模型初始化 =====
28
+ # MODEL = None
29
+ # DEVICE = torch.device("cpu")
30
+ # CUDA_READY = False
31
+
32
+ # def load_model_cpu():
33
+ # global MODEL, DEVICE
34
+ # MODEL, DEVICE = load_model(use_box=False)
35
+ # load_model_cpu()
36
+
37
+ # def prepare_cuda():
38
+ # global MODEL, DEVICE, CUDA_READY
39
+ # if torch.cuda.is_available() and not CUDA_READY:
40
+ # MODEL.to("cuda")
41
+ # DEVICE = torch.device("cuda")
42
+ # CUDA_READY = True
43
+ # _ = torch.zeros(1, device=DEVICE)
44
+
45
+ # # ===== BBox 解析 =====
46
+ # def parse_first_bbox(bboxes):
47
+ # if not bboxes:
48
+ # return None
49
+ # b = bboxes[0]
50
+ # if isinstance(b, dict):
51
+ # x, y = float(b.get("x", 0)), float(b.get("y", 0))
52
+ # w, h = float(b.get("width", 0)), float(b.get("height", 0))
53
+ # return x, y, x + w, y + h
54
+ # if isinstance(b, (list, tuple)) and len(b) >= 4:
55
+ # return float(b[0]), float(b[1]), float(b[2]), float(b[3])
56
+ # return None
57
+
58
+ # # ===== 保存用户反馈 =====
59
+ # DATASET_DIR = Path("solver_cache")
60
+ # DATASET_DIR.mkdir(parents=True, exist_ok=True)
61
+
62
+ # def save_feedback(query_id, feedback_type, feedback_text=None, img_path=None, bboxes=None):
63
+ # feedback_data = {
64
+ # "query_id": query_id,
65
+ # "feedback_type": feedback_type,
66
+ # "feedback_text": feedback_text,
67
+ # "image": img_path,
68
+ # "bboxes": bboxes,
69
+ # "datetime": time.strftime("%Y%m%d_%H%M%S")
70
+ # }
71
+ # feedback_file = DATASET_DIR / query_id / "feedback.json"
72
+ # feedback_file.parent.mkdir(parents=True, exist_ok=True)
73
+ # if feedback_file.exists():
74
+ # with feedback_file.open("r") as f:
75
+ # existing = json.load(f)
76
+ # if not isinstance(existing, list):
77
+ # existing = [existing]
78
+ # existing.append(feedback_data)
79
+ # feedback_data = existing
80
+ # else:
81
+ # feedback_data = [feedback_data]
82
+ # with feedback_file.open("w") as f:
83
+ # json.dump(feedback_data, f, indent=4, ensure_ascii=False)
84
+
85
+ # # ===== 彩色 mask 可视化 =====
86
+ # def colorize_mask(mask: np.ndarray, num_colors: int = 512) -> np.ndarray:
87
+ # mask = mask.astype(np.int32)
88
+
89
+ # def hsv_to_rgb(hh, ss, vv):
90
+ # i = int(hh * 6.0)
91
+ # f = hh * 6.0 - i
92
+ # p = vv * (1.0 - ss)
93
+ # q = vv * (1.0 - f * ss)
94
+ # t = vv * (1.0 - (1.0 - f) * ss)
95
+ # i = i % 6
96
+ # if i == 0: r, g, b = vv, t, p
97
+ # elif i == 1: r, g, b = q, vv, p
98
+ # elif i == 2: r, g, b = p, vv, t
99
+ # elif i == 3: r, g, b = p, q, vv
100
+ # elif i == 4: r, g, b = t, p, vv
101
+ # else: r, g, b = vv, p, q
102
+ # return int(r*255), int(g*255), int(b*255)
103
+
104
+ # palette = [(0, 0, 0)]
105
+ # for k in range(1, num_colors):
106
+ # hue = (k % num_colors) / float(num_colors)
107
+ # palette.append(hsv_to_rgb(hue, 1.0, 0.95))
108
+
109
+ # color_idx = mask % num_colors
110
+ # palette_arr = np.array(palette, dtype=np.uint8)
111
+ # return palette_arr[color_idx]
112
+
113
+ # # ===== 推理 + 实例彩色可视化 =====
114
+ # def segment_with_choice(use_box_choice, annot_value, mode="Overlay"):
115
+ # prepare_cuda()
116
+ # if annot_value is None or len(annot_value) < 1:
117
+ # print("❌ No annotation input")
118
+ # return None
119
+
120
+ # img_path = annot_value[0]
121
+ # bboxes = annot_value[1] if len(annot_value) > 1 else []
122
+
123
+ # print(f"🖼️ Image path: {img_path}")
124
+ # box_array = None
125
+ # if use_box_choice == "Yes" and bboxes:
126
+ # box = parse_first_bbox(bboxes)
127
+ # if box:
128
+ # xmin, ymin, xmax, ymax = map(int, box)
129
+ # box_array = [[xmin, ymin, xmax, ymax]]
130
+ # print(f"📦 Using box: {box_array}")
131
+
132
+ # try:
133
+ # mask = run(MODEL, img_path, box=box_array, device=DEVICE)
134
+ # print("📏 Mask shape:", mask.shape, "dtype:", mask.dtype, "unique:", np.unique(mask))
135
+ # except Exception as e:
136
+ # print(f"❌ Error during inference: {e}")
137
+ # return None
138
+
139
+ # try:
140
+ # img = Image.open(img_path)
141
+ # print("📷 Image mode:", img.mode, "size:", img.size)
142
+ # except Exception as e:
143
+ # print(f"❌ Failed to open image: {e}")
144
+ # return None
145
+
146
+ # try:
147
+ # img_rgb = img.convert("RGB").resize(mask.shape[::-1], resample=Image.BILINEAR)
148
+ # img_np = np.array(img_rgb, dtype=np.float32)
149
+ # if img_np.max() > 1.5:
150
+ # img_np = img_np / 255.0
151
+ # except Exception as e:
152
+ # print(f"❌ Error in image conversion/resizing: {e}")
153
+ # return None
154
+
155
+ # mask_np = np.array(mask)
156
+ # inst_mask = mask_np.astype(np.int32)
157
+ # unique_ids = np.unique(inst_mask)
158
+ # num_instances = len(unique_ids[unique_ids != 0])
159
+ # print(f"✅ Instance IDs found: {unique_ids}, Total instances: {num_instances}")
160
+
161
+ # if num_instances == 0:
162
+ # print("⚠️ No instance found, returning dummy red image")
163
+ # return Image.new("RGB", mask.shape[::-1], (255, 0, 0))
164
+
165
+ # # ==== Color Overlay (每个实例一个颜色) ====
166
+ # overlay = img_np.copy()
167
+ # alpha = 0.5
168
+ # cmap = cm.get_cmap("nipy_spectral", num_instances + 1)
169
+
170
+ # for inst_id in np.unique(inst_mask):
171
+ # if inst_id == 0:
172
+ # continue
173
+ # binary_mask = (inst_mask == inst_id).astype(np.uint8)
174
+ # color = np.array(cmap(inst_id / (num_instances + 1))[:3]) # RGB only, ignore alpha
175
+ # overlay[binary_mask == 1] = (1 - alpha) * overlay[binary_mask == 1] + alpha * color
176
+
177
+ # # 可选:绘制轮廓
178
+ # contours = measure.find_contours(binary_mask, 0.5)
179
+ # for contour in contours:
180
+ # contour = contour.astype(np.int32)
181
+ # overlay[contour[:, 0], contour[:, 1]] = [1.0, 1.0, 0.0] # 黄色轮廓
182
+
183
+ # overlay = np.clip(overlay * 255.0, 0, 255).astype(np.uint8)
184
+
185
+ # if mode == "Instance Mask Only":
186
+ # return Image.fromarray(colorize_mask(inst_mask, num_colors=512))
187
+
188
+ # return Image.fromarray(overlay)
189
+
190
+ # # ===== 示例图像 =====
191
+ # example_data = [
192
+ # ("003_img.png", [(50, 60, 120, 150, "cell")]),
193
+ # ("1977_Well_F-5_Field_1.png", [(30, 40, 100, 130, "cell")]),
194
+ # ]
195
+ # gallery_images = [p for p, _ in example_data]
196
+
197
+ # # ===== Gradio UI =====
198
+ # with gr.Blocks(title="Microscopy Cell Segmentation") as demo:
199
+ # gr.Markdown("## 🧬 Microscopy Image Segmentation — One Cell, One Color")
200
+
201
+ # with gr.Row():
202
+ # with gr.Column(scale=1):
203
+ # annotator = BBoxAnnotator(label="🖼️ Upload & Annotate", categories=["cell"])
204
+
205
+ # example_gallery = gr.Gallery(
206
+ # value=gallery_images,
207
+ # label="📁 Example Inputs",
208
+ # columns=[3], object_fit="cover", height=128
209
+ # )
210
+
211
+ # image_uploader = gr.Image(label="➕ Upload Image", type="filepath")
212
+
213
+ # run_btn = gr.Button("▶️ Run Segmentation")
214
+ # use_box_radio = gr.Radio(choices=["Yes", "No"], label="🔲 Use Bounding Box?", visible=False)
215
+ # confirm_btn = gr.Button("✅ Confirm", visible=False)
216
+ # mode_radio = gr.Radio(choices=["Overlay", "Instance Mask Only"], value="Overlay",
217
+ # label="🎨 Display Mode")
218
+
219
+ # with gr.Column(scale=2):
220
+ # image_output = gr.Image(type="pil", label="📸 Segmentation Result", height=400)
221
+ # score = gr.Slider(1, 5, step=1, value=3, label="🌟 Satisfaction (1–5)")
222
+ # comment_box = gr.Textbox(placeholder="Type your feedback...", lines=2, label="💬 Feedback")
223
+ # submit_score = gr.Button("💾 Submit Rating")
224
+
225
+ # user_uploaded_images = gr.State([])
226
+
227
+ # def add_uploaded_image(img_path, current_gallery):
228
+ # if not img_path:
229
+ # return current_gallery
230
+ # try:
231
+ # img = Image.open(img_path)
232
+ # img.thumbnail((128, 128))
233
+ # temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
234
+ # img.save(temp_file.name, format="PNG")
235
+ # thumb_path = temp_file.name
236
+ # if thumb_path not in current_gallery:
237
+ # current_gallery.append(thumb_path)
238
+ # except Exception as e:
239
+ # print(f"❌ Failed image: {e}")
240
+ # return current_gallery
241
+
242
+ # image_uploader.upload(add_uploaded_image, [image_uploader, user_uploaded_images], [example_gallery, user_uploaded_images])
243
+
244
+ # def on_gallery_select(evt: gr.SelectData, gallery_images):
245
+ # index = evt.index
246
+ # if index < len(example_data):
247
+ # selected_path, selected_boxes = example_data[index]
248
+ # return selected_path, selected_boxes
249
+ # else:
250
+ # selected_path = gallery_images[index]
251
+ # return selected_path, []
252
+
253
+ # example_gallery.select(on_gallery_select, inputs=[user_uploaded_images], outputs=[annotator])
254
+
255
+ # def show_radio():
256
+ # return gr.update(visible=True), gr.update(visible=True)
257
+
258
+ # run_btn.click(fn=show_radio, outputs=[use_box_radio, confirm_btn])
259
+ # confirm_btn.click(fn=segment_with_choice,
260
+ # inputs=[use_box_radio, annotator, mode_radio],
261
+ # outputs=image_output)
262
+
263
+ # def handle_comment(comment, annot_value):
264
+ # save_feedback(time.strftime("%Y%m%d_%H%M%S") + "_" + str(uuid.uuid4())[:8], "comment", comment, annot_value[0], annot_value[1])
265
+ # return ""
266
+
267
+ # def handle_rating(score, annot_value):
268
+ # save_feedback(time.strftime("%Y%m%d_%H%M%S") + "_" + str(uuid.uuid4())[:8], "rating", f"Satisfaction Score: {score}", annot_value[0], annot_value[1])
269
+ # return 3
270
+
271
+ # comment_box.submit(fn=handle_comment, inputs=[comment_box, annotator], outputs=[comment_box])
272
+ # submit_score.click(fn=handle_rating, inputs=[score, annotator], outputs=[score])
273
+
274
+ # if __name__ == "__main__":
275
+ # demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True, show_error=True)
276
+ import gradio as gr
277
+ from gradio_bbox_annotator import BBoxAnnotator
278
+ from PIL import Image
279
+ import numpy as np
280
+ import torch
281
+ import os
282
+ import shutil
283
+ import subprocess
284
+ import time, json, uuid
285
+ from pathlib import Path
286
+ import tempfile
287
+ from inference import load_model, run
288
+ from skimage import measure
289
+ # === 图像处理依赖 ===
290
+ from scipy.ndimage import label
291
+ from matplotlib import cm
292
+
293
+ # ===== 清理缓存目录 =====
294
+ print("===== Space Usage =====")
295
+ subprocess.run("du -sh *", shell=True)
296
+ print("===== ~/.cache =====")
297
+ subprocess.run("ls -lh ~/.cache", shell=True)
298
+ cache_path = os.path.expanduser("~/.cache")
299
+ if os.path.exists(cache_path):
300
+ shutil.rmtree(cache_path)
301
+ print("✅ Deleted ~/.cache to free space.")
302
+
303
+ # ===== 模型初始化 =====
304
+ MODEL = None
305
+ DEVICE = torch.device("cpu")
306
+ CUDA_READY = False
307
+
308
+ # 用于counting和tracking的模型
309
+ COUNTING_MODEL = None
310
+ TRACKING_MODEL = None
311
+
312
+ def load_model_cpu():
313
+ global MODEL, DEVICE
314
+ MODEL, DEVICE = load_model(use_box=False)
315
+
316
+ load_model_cpu()
317
+
318
+ def load_counting_model():
319
+ """
320
+ 加载计数模型
321
+ 替换为你的计数模型加载代码
322
+ """
323
+ global COUNTING_MODEL
324
+ # TODO: 替换为实际的计数模型
325
+ # 例如: COUNTING_MODEL = torch.load("counting_model.pth")
326
+ print("✅ Counting model loaded (placeholder)")
327
+ pass
328
+
329
+ def load_tracking_model():
330
+ """
331
+ 加载跟踪模型
332
+ 替换为你的跟踪模型加载代码
333
+ """
334
+ global TRACKING_MODEL
335
+ # TODO: 替换为实际的跟踪模型
336
+ # 例如: TRACKING_MODEL = torch.load("tracking_model.pth")
337
+ print("✅ Tracking model loaded (placeholder)")
338
+ pass
339
+
340
+ def prepare_cuda():
341
+ global MODEL, DEVICE, CUDA_READY
342
+ if torch.cuda.is_available() and not CUDA_READY:
343
+ MODEL.to("cuda")
344
+ DEVICE = torch.device("cuda")
345
+ CUDA_READY = True
346
+ _ = torch.zeros(1, device=DEVICE)
347
+
348
+ # ===== BBox 解析 =====
349
+ def parse_first_bbox(bboxes):
350
+ if not bboxes:
351
+ return None
352
+ b = bboxes[0]
353
+ if isinstance(b, dict):
354
+ x, y = float(b.get("x", 0)), float(b.get("y", 0))
355
+ w, h = float(b.get("width", 0)), float(b.get("height", 0))
356
+ return x, y, x + w, y + h
357
+ if isinstance(b, (list, tuple)) and len(b) >= 4:
358
+ return float(b[0]), float(b[1]), float(b[2]), float(b[3])
359
+ return None
360
+
361
+ # ===== 保存用户反馈 =====
362
+ DATASET_DIR = Path("solver_cache")
363
+ DATASET_DIR.mkdir(parents=True, exist_ok=True)
364
+
365
+ def save_feedback(query_id, feedback_type, feedback_text=None, img_path=None, bboxes=None):
366
+ feedback_data = {
367
+ "query_id": query_id,
368
+ "feedback_type": feedback_type,
369
+ "feedback_text": feedback_text,
370
+ "image": img_path,
371
+ "bboxes": bboxes,
372
+ "datetime": time.strftime("%Y%m%d_%H%M%S")
373
+ }
374
+ feedback_file = DATASET_DIR / query_id / "feedback.json"
375
+ feedback_file.parent.mkdir(parents=True, exist_ok=True)
376
+ if feedback_file.exists():
377
+ with feedback_file.open("r") as f:
378
+ existing = json.load(f)
379
+ if not isinstance(existing, list):
380
+ existing = [existing]
381
+ existing.append(feedback_data)
382
+ feedback_data = existing
383
+ else:
384
+ feedback_data = [feedback_data]
385
+ with feedback_file.open("w") as f:
386
+ json.dump(feedback_data, f, indent=4, ensure_ascii=False)
387
+
388
+ # ===== 彩色 mask 可视化 =====
389
+ def colorize_mask(mask: np.ndarray, num_colors: int = 512) -> np.ndarray:
390
+ mask = mask.astype(np.int32)
391
+
392
+ def hsv_to_rgb(hh, ss, vv):
393
+ i = int(hh * 6.0)
394
+ f = hh * 6.0 - i
395
+ p = vv * (1.0 - ss)
396
+ q = vv * (1.0 - f * ss)
397
+ t = vv * (1.0 - (1.0 - f) * ss)
398
+ i = i % 6
399
+ if i == 0: r, g, b = vv, t, p
400
+ elif i == 1: r, g, b = q, vv, p
401
+ elif i == 2: r, g, b = p, vv, t
402
+ elif i == 3: r, g, b = p, q, vv
403
+ elif i == 4: r, g, b = t, p, vv
404
+ else: r, g, b = vv, p, q
405
+ return int(r*255), int(g*255), int(b*255)
406
+
407
+ palette = [(0, 0, 0)]
408
+ for k in range(1, num_colors):
409
+ hue = (k % num_colors) / float(num_colors)
410
+ palette.append(hsv_to_rgb(hue, 1.0, 0.95))
411
+
412
+ color_idx = mask % num_colors
413
+ palette_arr = np.array(palette, dtype=np.uint8)
414
+ return palette_arr[color_idx]
415
+
416
+ # ===== 推理 + 实例彩色可视化 (Segmentation) =====
417
+ def segment_with_choice(use_box_choice, annot_value, mode="Overlay"):
418
+ prepare_cuda()
419
+ if annot_value is None or len(annot_value) < 1:
420
+ print("❌ No annotation input")
421
+ return None
422
+
423
+ img_path = annot_value[0]
424
+ bboxes = annot_value[1] if len(annot_value) > 1 else []
425
+
426
+ print(f"🖼️ Image path: {img_path}")
427
+ box_array = None
428
+ if use_box_choice == "Yes" and bboxes:
429
+ box = parse_first_bbox(bboxes)
430
+ if box:
431
+ xmin, ymin, xmax, ymax = map(int, box)
432
+ box_array = [[xmin, ymin, xmax, ymax]]
433
+ print(f"📦 Using box: {box_array}")
434
+
435
+ try:
436
+ mask = run(MODEL, img_path, box=box_array, device=DEVICE)
437
+ print("📏 Mask shape:", mask.shape, "dtype:", mask.dtype, "unique:", np.unique(mask))
438
+ except Exception as e:
439
+ print(f"❌ Error during inference: {e}")
440
+ return None
441
+
442
+ try:
443
+ img = Image.open(img_path)
444
+ print("📷 Image mode:", img.mode, "size:", img.size)
445
+ except Exception as e:
446
+ print(f"❌ Failed to open image: {e}")
447
+ return None
448
+
449
+ try:
450
+ img_rgb = img.convert("RGB").resize(mask.shape[::-1], resample=Image.BILINEAR)
451
+ img_np = np.array(img_rgb, dtype=np.float32)
452
+ if img_np.max() > 1.5:
453
+ img_np = img_np / 255.0
454
+ except Exception as e:
455
+ print(f"❌ Error in image conversion/resizing: {e}")
456
+ return None
457
+
458
+ mask_np = np.array(mask)
459
+ inst_mask = mask_np.astype(np.int32)
460
+ unique_ids = np.unique(inst_mask)
461
+ num_instances = len(unique_ids[unique_ids != 0])
462
+ print(f"✅ Instance IDs found: {unique_ids}, Total instances: {num_instances}")
463
+
464
+ if num_instances == 0:
465
+ print("⚠️ No instance found, returning dummy red image")
466
+ return Image.new("RGB", mask.shape[::-1], (255, 0, 0))
467
+
468
+ # ==== Color Overlay (每个实例一个颜色) ====
469
+ overlay = img_np.copy()
470
+ alpha = 0.5
471
+ cmap = cm.get_cmap("nipy_spectral", num_instances + 1)
472
+
473
+ for inst_id in np.unique(inst_mask):
474
+ if inst_id == 0:
475
+ continue
476
+ binary_mask = (inst_mask == inst_id).astype(np.uint8)
477
+ color = np.array(cmap(inst_id / (num_instances + 1))[:3]) # RGB only, ignore alpha
478
+ overlay[binary_mask == 1] = (1 - alpha) * overlay[binary_mask == 1] + alpha * color
479
+
480
+ # 可选:绘制轮廓
481
+ contours = measure.find_contours(binary_mask, 0.5)
482
+ for contour in contours:
483
+ contour = contour.astype(np.int32)
484
+ overlay[contour[:, 0], contour[:, 1]] = [1.0, 1.0, 0.0] # 黄色轮廓
485
+
486
+ overlay = np.clip(overlay * 255.0, 0, 255).astype(np.uint8)
487
+
488
+ if mode == "Instance Mask Only":
489
+ return Image.fromarray(colorize_mask(inst_mask, num_colors=512))
490
+
491
+ return Image.fromarray(overlay)
492
+
493
+ # ===== Counting 功能 =====
494
+ def count_cells(image_path):
495
+ """
496
+ 计数功能
497
+ TODO: 替换为你的计数模型推理代码
498
+ """
499
+ if image_path is None:
500
+ return None, "请先上传图像"
501
+
502
+ try:
503
+ img = Image.open(image_path)
504
+ img_np = np.array(img)
505
+
506
+ # TODO: 替换为实际的计数模型推理
507
+ # 示例代码:
508
+ # results = COUNTING_MODEL(img_np)
509
+ # count = len(results)
510
+
511
+ # 临时使用简单的计数方法作为演示
512
+ from skimage import filters, morphology
513
+ gray = np.array(img.convert('L'))
514
+ thresh = filters.threshold_otsu(gray)
515
+ binary = gray > thresh
516
+ labeled = morphology.label(binary)
517
+ count = labeled.max()
518
+
519
+ # 可视化
520
+ import matplotlib.pyplot as plt
521
+ from matplotlib import cm
522
+
523
+ fig, ax = plt.subplots(1, 1, figsize=(10, 10))
524
+ ax.imshow(img)
525
+
526
+ # 标注每个对象
527
+ for region_id in range(1, count + 1):
528
+ region_mask = labeled == region_id
529
+ coords = np.argwhere(region_mask)
530
+ if len(coords) > 0:
531
+ y, x = coords.mean(axis=0)
532
+ ax.text(x, y, str(region_id), color='red',
533
+ fontsize=12, fontweight='bold',
534
+ bbox=dict(boxstyle='round', facecolor='yellow', alpha=0.7))
535
+
536
+ ax.axis('off')
537
+
538
+ # 保存到临时文件
539
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
540
+ plt.savefig(temp_file.name, bbox_inches='tight', dpi=150)
541
+ plt.close()
542
+
543
+ result_text = f"🔢 检测到 {count} 个细胞"
544
+ print(f"✅ Counting result: {count} cells")
545
+
546
+ return temp_file.name, result_text
547
+
548
+ except Exception as e:
549
+ print(f"❌ Counting error: {e}")
550
+ return None, f"计数失败: {str(e)}"
551
+
552
+ # ===== Tracking 功能 =====
553
+ def track_video(video_path, progress=gr.Progress()):
554
+ """
555
+ 视频跟踪功能
556
+ TODO: 替换为你的跟踪模型推理代码
557
+ """
558
+ if video_path is None:
559
+ return None, "请先上传视频"
560
+
561
+ try:
562
+ import cv2
563
+
564
+ # 读取视频
565
+ cap = cv2.VideoCapture(video_path)
566
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
567
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
568
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
569
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
570
+
571
+ # 创建输出视频
572
+ output_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
573
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
574
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
575
+
576
+ print(f"📹 Processing video: {total_frames} frames, {fps} fps")
577
+
578
+ # TODO: 初始化跟踪器
579
+ # tracker = initialize_your_tracker()
580
+
581
+ frame_count = 0
582
+ while cap.isOpened():
583
+ ret, frame = cap.read()
584
+ if not ret:
585
+ break
586
+
587
+ # TODO: 替换为实际的跟踪模型推理
588
+ # tracked_frame, tracks = TRACKING_MODEL.update(frame)
589
+
590
+ # 临时演示: 在帧上添加文字
591
+ tracked_frame = frame.copy()
592
+ cv2.putText(tracked_frame, f"Frame {frame_count}/{total_frames}",
593
+ (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
594
+
595
+ out.write(tracked_frame)
596
+ frame_count += 1
597
+
598
+ # 更新进度条
599
+ if frame_count % 10 == 0:
600
+ progress((frame_count / total_frames, f"处理中: {frame_count}/{total_frames}"))
601
+
602
+ cap.release()
603
+ out.release()
604
+
605
+ result_text = f"✅ 跟踪完成! 处理了 {frame_count} 帧"
606
+ print(result_text)
607
+
608
+ return output_path, result_text
609
+
610
+ except Exception as e:
611
+ print(f"❌ Tracking error: {e}")
612
+ return None, f"跟踪失败: {str(e)}"
613
+
614
+ # ===== 示例图像 =====
615
+ example_data = [
616
+ ("003_img.png", [(50, 60, 120, 150, "cell")]),
617
+ ("1977_Well_F-5_Field_1.png", [(30, 40, 100, 130, "cell")]),
618
+ ]
619
+ gallery_images = [p for p, _ in example_data]
620
+
621
+ # ===== Gradio UI =====
622
+ with gr.Blocks(title="Microscopy Analysis Suite", theme=gr.themes.Soft()) as demo:
623
+ gr.Markdown(
624
+ """
625
+ # 🔬 显微图像分析工具套件
626
+ 支持三种分析模式: 分割 (Segmentation) | 计数 (Counting) | 跟踪 (Tracking)
627
+ """
628
+ )
629
+
630
+ with gr.Tabs():
631
+ # ===== Tab 1: Segmentation =====
632
+ with gr.Tab("🎨 分割 (Segmentation)"):
633
+ gr.Markdown("## 🧬 细胞分割 — 每个细胞一个颜色")
634
+
635
+ with gr.Row():
636
+ with gr.Column(scale=1):
637
+ annotator = BBoxAnnotator(label="🖼️ 上传 & 标注", categories=["cell"])
638
+
639
+ example_gallery = gr.Gallery(
640
+ value=gallery_images,
641
+ label="📁 示例图像",
642
+ columns=[3], object_fit="cover", height=128
643
+ )
644
+
645
+ image_uploader = gr.Image(label="➕ 上传图像", type="filepath")
646
+
647
+ run_btn = gr.Button("▶️ 运行分割", variant="primary")
648
+ use_box_radio = gr.Radio(choices=["Yes", "No"], label="🔲 使用边界框?", visible=False)
649
+ confirm_btn = gr.Button("✅ 确认", visible=False)
650
+ mode_radio = gr.Radio(choices=["Overlay", "Instance Mask Only"], value="Overlay",
651
+ label="🎨 显示模式")
652
+
653
+ with gr.Column(scale=2):
654
+ image_output = gr.Image(type="pil", label="📸 分割结果", height=400)
655
+ score = gr.Slider(1, 5, step=1, value=3, label="🌟 满意度 (1–5)")
656
+ comment_box = gr.Textbox(placeholder="输入您的反馈...", lines=2, label="💬 反馈")
657
+ submit_score = gr.Button("💾 提交评分")
658
+
659
+ user_uploaded_images = gr.State([])
660
+
661
+ def add_uploaded_image(img_path, current_gallery):
662
+ if not img_path:
663
+ return current_gallery
664
+ try:
665
+ img = Image.open(img_path)
666
+ img.thumbnail((128, 128))
667
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
668
+ img.save(temp_file.name, format="PNG")
669
+ thumb_path = temp_file.name
670
+ if thumb_path not in current_gallery:
671
+ current_gallery.append(thumb_path)
672
+ except Exception as e:
673
+ print(f"❌ Failed image: {e}")
674
+ return current_gallery
675
+
676
+ image_uploader.upload(add_uploaded_image, [image_uploader, user_uploaded_images],
677
+ [example_gallery, user_uploaded_images])
678
+
679
+ def on_gallery_select(evt: gr.SelectData, gallery_images):
680
+ index = evt.index
681
+ if index < len(example_data):
682
+ selected_path, selected_boxes = example_data[index]
683
+ return selected_path, selected_boxes
684
+ else:
685
+ selected_path = gallery_images[index]
686
+ return selected_path, []
687
+
688
+ example_gallery.select(on_gallery_select, inputs=[user_uploaded_images], outputs=[annotator])
689
+
690
+ def show_radio():
691
+ return gr.update(visible=True), gr.update(visible=True)
692
+
693
+ run_btn.click(fn=show_radio, outputs=[use_box_radio, confirm_btn])
694
+ confirm_btn.click(fn=segment_with_choice,
695
+ inputs=[use_box_radio, annotator, mode_radio],
696
+ outputs=image_output)
697
+
698
+ def handle_comment(comment, annot_value):
699
+ save_feedback(time.strftime("%Y%m%d_%H%M%S") + "_" + str(uuid.uuid4())[:8],
700
+ "comment", comment, annot_value[0], annot_value[1])
701
+ return ""
702
+
703
+ def handle_rating(score, annot_value):
704
+ save_feedback(time.strftime("%Y%m%d_%H%M%S") + "_" + str(uuid.uuid4())[:8],
705
+ "rating", f"Satisfaction Score: {score}", annot_value[0], annot_value[1])
706
+ return 3
707
+
708
+ comment_box.submit(fn=handle_comment, inputs=[comment_box, annotator], outputs=[comment_box])
709
+ submit_score.click(fn=handle_rating, inputs=[score, annotator], outputs=[score])
710
+
711
+ # ===== Tab 2: Counting =====
712
+ with gr.Tab("🔢 计数 (Counting)"):
713
+ gr.Markdown("## 细胞计数分析")
714
+
715
+ with gr.Row():
716
+ with gr.Column(scale=1):
717
+ count_input = gr.Image(
718
+ label="🖼️ 上传图像",
719
+ type="filepath"
720
+ )
721
+ count_btn = gr.Button("▶️ 运行计数", variant="primary")
722
+
723
+ gr.Markdown(
724
+ """
725
+ **说明:**
726
+ - 自动检测并计数图像中的细胞
727
+ - 结果会在图像上标注编号
728
+ """
729
+ )
730
+
731
+ with gr.Column(scale=2):
732
+ count_output_img = gr.Image(
733
+ label="📸 计数结果",
734
+ type="filepath"
735
+ )
736
+ count_output_text = gr.Textbox(
737
+ label="🔢 统计信息",
738
+ lines=2
739
+ )
740
+
741
+ count_score = gr.Slider(1, 5, step=1, value=3, label="🌟 满意度 (1–5)")
742
+ count_comment = gr.Textbox(placeholder="输入反馈...", lines=2, label="💬 反馈")
743
+ count_submit = gr.Button("💾 提交评分")
744
+
745
+ # 绑定事件
746
+ count_btn.click(
747
+ fn=count_cells,
748
+ inputs=count_input,
749
+ outputs=[count_output_img, count_output_text]
750
+ )
751
+
752
+ def handle_count_feedback(score, comment, img_path):
753
+ if img_path:
754
+ save_feedback(
755
+ time.strftime("%Y%m%d_%H%M%S") + "_count_" + str(uuid.uuid4())[:8],
756
+ "counting",
757
+ f"Score: {score}, Comment: {comment}",
758
+ img_path,
759
+ None
760
+ )
761
+ return 3, ""
762
+
763
+ count_submit.click(
764
+ fn=handle_count_feedback,
765
+ inputs=[count_score, count_comment, count_input],
766
+ outputs=[count_score, count_comment]
767
+ )
768
+
769
+ # ===== Tab 3: Tracking =====
770
+ with gr.Tab("🎬 跟踪 (Tracking)"):
771
+ gr.Markdown("## 视频细胞跟踪")
772
+
773
+ with gr.Row():
774
+ with gr.Column(scale=1):
775
+ track_input = gr.Video(
776
+ label="📹 上传视频"
777
+ )
778
+ track_btn = gr.Button("▶️ 运行跟踪", variant="primary")
779
+
780
+ gr.Markdown(
781
+ """
782
+ **说明:**
783
+ - 支持格式: MP4, AVI, MOV
784
+ - 自动跟踪视频中的细胞运动
785
+ - 处理时间取决于视频长度
786
+ """
787
+ )
788
+
789
+ with gr.Column(scale=2):
790
+ track_output_video = gr.Video(
791
+ label="📸 跟踪结果"
792
+ )
793
+ track_output_text = gr.Textbox(
794
+ label="📊 处理状态",
795
+ lines=2
796
+ )
797
+
798
+ track_score = gr.Slider(1, 5, step=1, value=3, label="🌟 满意度 (1–5)")
799
+ track_comment = gr.Textbox(placeholder="输入反馈...", lines=2, label="💬 反馈")
800
+ track_submit = gr.Button("💾 提交评分")
801
+
802
+ # 绑定事件
803
+ track_btn.click(
804
+ fn=track_video,
805
+ inputs=track_input,
806
+ outputs=[track_output_video, track_output_text]
807
+ )
808
+
809
+ def handle_track_feedback(score, comment, video_path):
810
+ if video_path:
811
+ save_feedback(
812
+ time.strftime("%Y%m%d_%H%M%S") + "_track_" + str(uuid.uuid4())[:8],
813
+ "tracking",
814
+ f"Score: {score}, Comment: {comment}",
815
+ video_path,
816
+ None
817
+ )
818
+ return 3, ""
819
+
820
+ track_submit.click(
821
+ fn=handle_track_feedback,
822
+ inputs=[track_score, track_comment, track_input],
823
+ outputs=[track_score, track_comment]
824
+ )
825
+
826
+ # ===== 页脚 =====
827
+ gr.Markdown(
828
+ """
829
+ ---
830
+ ### 💡 功能说明
831
+ - **Segmentation**: 分割并可视化图像中的每个细胞
832
+ - **Counting**: 自动计数图像中的细胞数量
833
+ - **Tracking**: 跟踪视频中细胞的运动轨迹
834
+ """
835
+ )
836
+
837
+ if __name__ == "__main__":
838
+ demo.queue().launch(
839
+ server_name="0.0.0.0",
840
+ server_port=7860,
841
+ share=True,
842
+ show_error=True
843
+ )
844
+
requirements.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PyTorch 2.4.1 + torchvision
2
+ torch==2.4.1
3
+ torchvision==0.19.1
4
+ torchaudio==2.4.1
5
+ # Core dependencies
6
+ diffusers==0.29.0
7
+ transformers==4.37.2
8
+ huggingface_hub==0.24.1
9
+ accelerate==0.23.0
10
+ pyrallis
11
+ easydict
12
+ omegaconf==2.1.1
13
+ einops==0.3.0
14
+ torch-fidelity==0.3.0
15
+ torchmetrics>=0.11.0
16
+ pytorch-lightning==2.0.9
17
+ taming-transformers @ git+https://github.com/CompVis/taming-transformers.git@master
18
+ clip @ git+https://github.com/openai/CLIP.git@main
19
+
20
+ # Computer vision
21
+ opencv-python==4.5.5.64 # ✅ 避免 dnn.DictValue 错误
22
+ opencv-python-headless==4.5.5.64 # ✅ 防止系统默认装新版导致冲突(和 GUI 冲突不大)
23
+ kornia==0.6
24
+ albumentations==0.4.3
25
+ imageio>=2.27 # ✅ 保证兼容 scikit-image 0.21.0
26
+ imageio-ffmpeg==0.4.2
27
+ matplotlib
28
+ scikit-image==0.21.0
29
+ Pillow
30
+ segment-anything
31
+ numpy==1.24.4 # ✅ 避免 numpy 2.x 引发 ABI 问题
32
+
33
+ # Gradio
34
+ gradio
35
+ gradio-bbox-annotator
36
+
37
+ # Utilities
38
+ natsort
39
+ roifile
40
+ fill-voids
41
+ configargparse
42
+ ipywidgets
43
+ ftfy
44
+ sniffio
45
+ websocket-client
46
+ dask
47
+ tensorboard