Update README.md
Browse files
README.md
CHANGED
|
@@ -59,14 +59,32 @@ Example usage:
|
|
| 59 |
import cv2
|
| 60 |
from transformers import AutoModel
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
model = AutoModel.from_pretrained("ianpan/mammoscreen", trust_remote_code=True)
|
| 63 |
-
model = model.eval().to(
|
| 64 |
|
| 65 |
cc_img = cv2.imread("mammo_cc.png", cv2.IMREAD_GRAYSCALE)
|
| 66 |
mlo_img = cv2.imread("mammo_mlo.png", cv2.IMREAD_GRAYSCALE)
|
| 67 |
|
|
|
|
|
|
|
|
|
|
| 68 |
with torch.inference_mode():
|
| 69 |
-
output = model({"cc": cc_img, "mlo": mlo_img}, device=
|
| 70 |
```
|
| 71 |
|
| 72 |
Note that the model preprocesses the data within the `forward` function into the necessary format.
|
|
@@ -75,7 +93,7 @@ If you want the predicted density class, take the argmax: `output['density'].arg
|
|
| 75 |
|
| 76 |
You can also access each neural net separately using `model.net{i}`. However, you must apply the preprocessing outside of the `forward` function.
|
| 77 |
```
|
| 78 |
-
input_dict = model.net0.preprocess({"cc": cc_img, "mlo": mlo_img}, device=
|
| 79 |
with torch.inference_mode():
|
| 80 |
out = model.net0(input_dict)
|
| 81 |
```
|
|
@@ -90,7 +108,10 @@ mlo_images = ["rt_pt1_mlo.png", lt_pt1_mlo.png", "rt_pt2_mlo.png", "lt_pt2_mlo.p
|
|
| 90 |
cc_images = [cv2.imread(_, cv2.IMREAD_GRAYSCALE) for _ in cc_images]
|
| 91 |
mlo_images = [cv2.imread(_, cv2.IMREAD_GRAYSCALE) for _ in mlo_images]
|
| 92 |
|
|
|
|
|
|
|
|
|
|
| 93 |
input_dict = [{"cc": cc_img, "mlo": mlo_img} for cc_img, mlo_img in zip(cc_images, mlo_images)]
|
| 94 |
with torch.inference_mode():
|
| 95 |
-
output = model(input_dict, device=
|
| 96 |
```
|
|
|
|
| 59 |
import cv2
|
| 60 |
from transformers import AutoModel
|
| 61 |
|
| 62 |
+
def crop_mammo(img, crop_model, device):
|
| 63 |
+
img_shape = torch.tensor([img.shape[:2]])
|
| 64 |
+
x = crop_model.preprocess(img)
|
| 65 |
+
x = torch.from_numpy(x).expand(1, 1, -1, -1).float().to(device)
|
| 66 |
+
with torch.inference_mode():
|
| 67 |
+
coords = model(x, img_shape)
|
| 68 |
+
coords = coords[0].numpy()
|
| 69 |
+
x, y, w, h = coords
|
| 70 |
+
return img[y: y + h, x: x + w]
|
| 71 |
+
|
| 72 |
+
device = "cuda:0"
|
| 73 |
+
|
| 74 |
+
crop_model = AutoModel.from_pretrained("ianpan/mammo-crop", trust_remote_code=True)
|
| 75 |
+
crop_model = crop_model.eval().to(device)
|
| 76 |
+
|
| 77 |
model = AutoModel.from_pretrained("ianpan/mammoscreen", trust_remote_code=True)
|
| 78 |
+
model = model.eval().to(device)
|
| 79 |
|
| 80 |
cc_img = cv2.imread("mammo_cc.png", cv2.IMREAD_GRAYSCALE)
|
| 81 |
mlo_img = cv2.imread("mammo_mlo.png", cv2.IMREAD_GRAYSCALE)
|
| 82 |
|
| 83 |
+
cc_img = crop_mammo(cc_img, crop_model, device)
|
| 84 |
+
mlo_img = crop_mammo(mlo_img, crop_model, device)
|
| 85 |
+
|
| 86 |
with torch.inference_mode():
|
| 87 |
+
output = model({"cc": cc_img, "mlo": mlo_img}, device=device)
|
| 88 |
```
|
| 89 |
|
| 90 |
Note that the model preprocesses the data within the `forward` function into the necessary format.
|
|
|
|
| 93 |
|
| 94 |
You can also access each neural net separately using `model.net{i}`. However, you must apply the preprocessing outside of the `forward` function.
|
| 95 |
```
|
| 96 |
+
input_dict = model.net0.preprocess({"cc": cc_img, "mlo": mlo_img}, device=device)
|
| 97 |
with torch.inference_mode():
|
| 98 |
out = model.net0(input_dict)
|
| 99 |
```
|
|
|
|
| 108 |
cc_images = [cv2.imread(_, cv2.IMREAD_GRAYSCALE) for _ in cc_images]
|
| 109 |
mlo_images = [cv2.imread(_, cv2.IMREAD_GRAYSCALE) for _ in mlo_images]
|
| 110 |
|
| 111 |
+
cc_images = [crop_mammo(_, crop_model, device) for _ in cc_images]
|
| 112 |
+
mlo_images = [crop_mammo(_, crop_model, device), for _ in mlo_images]
|
| 113 |
+
|
| 114 |
input_dict = [{"cc": cc_img, "mlo": mlo_img} for cc_img, mlo_img in zip(cc_images, mlo_images)]
|
| 115 |
with torch.inference_mode():
|
| 116 |
+
output = model(input_dict, device=device)
|
| 117 |
```
|