Celltool_public / app.py
Shengxiao0709's picture
Update app.py
c5c9315 verified
import gradio as gr
from gradio_bbox_annotator import BBoxAnnotator
from PIL import Image
import numpy as np
import torch
import os
from inference import load_model, run # 注意是你新改过的 inference.py
import shutil
import os
import os
import subprocess
# 示例:查看当前目录大小
print("===== Space 中的目录结构和占用 =====")
subprocess.run("du -sh *", shell=True)
# 示例:列出缓存目录
print("===== .cache 目录 =====")
subprocess.run("ls -lh ~/.cache", shell=True)
# 删除 Hugging Face 默认缓存目录(~/.cache)
cache_path = os.path.expanduser("~/.cache")
if os.path.exists(cache_path):
shutil.rmtree(cache_path)
print("✅ Deleted ~/.cache to free space.")
# 初始化模型全局变量
MODEL = None
DEVICE = torch.device("cpu")
CUDA_READY = False
# 加载模型(默认 CPU)
def load_model_cpu():
global MODEL, DEVICE
MODEL, DEVICE = load_model() # 你新写的load_model()封装在 inference.py 里
load_model_cpu()
# 自动 GPU 切换(Spaces 环境支持)
def prepare_cuda():
global MODEL, DEVICE, CUDA_READY
if torch.cuda.is_available() and not CUDA_READY:
print("CUDA is available. Moving model to GPU...")
MODEL.to("cuda")
DEVICE = torch.device("cuda")
CUDA_READY = True
_ = torch.zeros(1, device=DEVICE)
print("Model moved to CUDA.")
else:
print("CUDA not available or already initialized.")
# 解析框坐标
def parse_first_bbox(bboxes):
if not bboxes:
return None
b = bboxes[0]
if isinstance(b, dict):
x, y = float(b["x"]), float(b["y"])
w, h = float(b["width"]), float(b["height"])
return x, y, x + w, y + h
if isinstance(b, (list, tuple)) and len(b) >= 4:
return float(b[0]), float(b[1]), float(b[2]), float(b[3])
return None
# 推理逻辑:输入图像和 bbox,输出 mask + 坐标信息
def segment(annot_value):
prepare_cuda() # 如果可用 GPU 就切换
if annot_value is None or len(annot_value) < 1:
return None,
img_path = annot_value[0]
bboxes = annot_value[1] if len(annot_value) > 1 else []
if not bboxes:
return None,
box = parse_first_bbox(bboxes)
if box is None:
return None, "解析矩形框失败,请重画。"
xmin, ymin, xmax, ymax = map(int, [box[0], box[1], box[2], box[3]])
box_array = [[xmin, ymin, xmax, ymax]]
# 推理
mask = run(MODEL, img_path, box_array, DEVICE)
mask_rgb = np.stack([mask * 255] * 3, axis=-1).astype(np.uint8)
bbox_text = f"xmin={xmin}, ymin={ymin}, xmax={xmax}, ymax={ymax}"
return Image.fromarray(mask_rgb), bbox_text
# 示例图(可选)
example = ("003_img.png", [(50, 60, 120, 150, "cell")])
# 创建 Gradio 界面
demo = gr.Interface(
fn=segment,
inputs=BBoxAnnotator(
value=example,
categories=["cell", "nucleus"],
label="Upload Microscopy Image"
),
outputs=[
gr.Image(type="pil", label="Segmentation Mask"),
gr.Textbox(label="Bounding Box Coordinates")
],
examples=[[example]],
cache_examples=False,
title="Microscopy Cell Segmentation with Stable Diffusion + LoCA",
description="Upload a microscopy image and draw a bounding box to segment cell instances using an attention-guided diffusion model."
)
# 启动
if __name__ == "__main__":
demo.queue().launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True,
ssr_mode=False
)