Datasets:

ArXiv:
File size: 13,890 Bytes
b4d7ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# =========================================================================
# Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/sampler.py
# which has the following license...
# https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
#
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#    http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The functions in this script are adapted from nnDetection,
https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/sampler.py
"""

from __future__ import annotations

import logging

import torch
from torch import Tensor


class HardNegativeSamplerBase:
    """
    Base class of hard negative sampler.

    Hard negative sampler is used to suppress false positive rate in classification tasks.
    During training, it select negative samples with high prediction scores.

    The training workflow is described as the follows:
    1) forward network and get prediction scores (classification prob/logits) for all the samples;
    2) use hard negative sampler to choose negative samples with high prediction scores and some positive samples;
    3) compute classification loss for the selected samples;
    4) do back propagation.

    Args:
        pool_size: when we need ``num_neg`` hard negative samples, they will be randomly selected from
            ``num_neg * pool_size`` negative samples with the highest prediction scores.
            Larger ``pool_size`` gives more randomness, yet selects negative samples that are less 'hard',
            i.e., negative samples with lower prediction scores.
    """

    def __init__(self, pool_size: float = 10) -> None:
        self.pool_size = pool_size

    def select_negatives(self, negative: Tensor, num_neg: int, fg_probs: Tensor) -> Tensor:
        """
        Select hard negative samples.

        Args:
            negative: indices of all the negative samples, sized (P,),
                where P is the number of negative samples
            num_neg: number of negative samples to sample
            fg_probs: maximum foreground prediction scores (probability) across all the classes
                for each sample, sized (A,), where A is the number of samples.

        Returns:
            binary mask of negative samples to choose, sized (A,),
                where A is the number of samples in one image
        """
        if negative.numel() > fg_probs.numel():
            raise ValueError("The number of negative samples should not be larger than the number of all samples.")

        # sample pool size is ``num_neg * self.pool_size``
        pool = int(num_neg * self.pool_size)
        pool = min(negative.numel(), pool)  # protect against not enough negatives

        # create a sample pool of highest scoring negative samples
        _, negative_idx_pool = fg_probs[negative].to(torch.float32).topk(pool, dim=0, sorted=True)
        hard_negative = negative[negative_idx_pool]

        # select negatives from pool
        perm2 = torch.randperm(hard_negative.numel(), device=hard_negative.device)[:num_neg]
        selected_neg_idx = hard_negative[perm2]

        # output a binary mask with same size of fg_probs that indicates selected negative samples.
        neg_mask = torch.zeros_like(fg_probs, dtype=torch.uint8)
        neg_mask[selected_neg_idx] = 1
        return neg_mask


class HardNegativeSampler(HardNegativeSamplerBase):
    """
    HardNegativeSampler is used to suppress false positive rate in classification tasks.
    During training, it selects negative samples with high prediction scores.

    The training workflow is described as the follows:
    1) forward network and get prediction scores (classification prob/logits) for all the samples;
    2) use hard negative sampler to choose negative samples with high prediction scores and some positive samples;
    3) compute classification loss for the selected samples;
    4) do back propagation.

    Args:
        batch_size_per_image: number of training samples to be randomly selected per image
        positive_fraction: percentage of positive elements in the selected samples
        min_neg: minimum number of negative samples to select if possible.
        pool_size: when we need ``num_neg`` hard negative samples, they will be randomly selected from
            ``num_neg * pool_size`` negative samples with the highest prediction scores.
            Larger ``pool_size`` gives more randomness, yet selects negative samples that are less 'hard',
            i.e., negative samples with lower prediction scores.
    """

    def __init__(
        self, batch_size_per_image: int, positive_fraction: float, min_neg: int = 1, pool_size: float = 10
    ) -> None:
        super().__init__(pool_size=pool_size)
        self.min_neg = min_neg
        self.batch_size_per_image = batch_size_per_image
        self.positive_fraction = positive_fraction
        logging.info("Sampling hard negatives on a per batch basis")

    def __call__(self, target_labels: list[Tensor], concat_fg_probs: Tensor) -> tuple[list[Tensor], list[Tensor]]:
        """
        Select positives and hard negatives from list samples per image.
        Hard negative sampler will be applied to each image independently.

        Args:
            target_labels: list of labels per image.
                For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
                where A_i is the number of samples in image i.
                Positive samples have positive labels, negative samples have label 0.
            concat_fg_probs: concatenated maximum foreground probability for all the images, sized (R,),
                where R is the sum of all samples inside one batch, i.e., R = A_0 + A_1 + ...

        Returns:
            - list of binary mask for positive samples
            - list of binary mask for negative samples

        Example:
            .. code-block:: python

                sampler = HardNegativeSampler(
                    batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
                )
                # two images with different number of samples
                target_labels = [ torch.tensor([0,1]), torch.tensor([1,0,2,1])]
                concat_fg_probs = torch.rand(6)
                pos_idx_list, neg_idx_list = sampler(target_labels, concat_fg_probs)
        """
        samples_per_image = [samples_in_image.shape[0] for samples_in_image in target_labels]
        fg_probs = concat_fg_probs.split(samples_per_image, 0)
        return self.select_samples_img_list(target_labels, fg_probs)

    def select_samples_img_list(
        self, target_labels: list[Tensor], fg_probs: list[Tensor]
    ) -> tuple[list[Tensor], list[Tensor]]:
        """
        Select positives and hard negatives from list samples per image.
        Hard negative sampler will be applied to each image independently.

        Args:
            target_labels: list of labels per image.
                For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
                where A_i is the number of samples in image i.
                Positive samples have positive labels, negative samples have label 0.
            fg_probs: list of maximum foreground probability per images,
                For image i in the batch, target_labels[i] is a Tensor sized (A_i,),
                where A_i is the number of samples in image i.

        Returns:
            - list of binary mask for positive samples
            - list binary mask for negative samples

        Example:
            .. code-block:: python

                sampler = HardNegativeSampler(
                    batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
                )
                # two images with different number of samples
                target_labels = [ torch.tensor([0,1]), torch.tensor([1,0,2,1])]
                fg_probs = [ torch.rand(2), torch.rand(4)]
                pos_idx_list, neg_idx_list = sampler.select_samples_img_list(target_labels, fg_probs)
        """
        pos_idx = []
        neg_idx = []

        if len(target_labels) != len(fg_probs):
            raise ValueError(
                "Require len(target_labels) == len(fg_probs). "
                f"Got len(target_labels)={len(target_labels)}, len(fg_probs)={len(fg_probs)}."
            )
        for labels_per_img, fg_probs_per_img in zip(target_labels, fg_probs):
            pos_idx_per_image_mask, neg_idx_per_image_mask = self.select_samples_per_img(
                labels_per_img, fg_probs_per_img
            )
            pos_idx.append(pos_idx_per_image_mask)
            neg_idx.append(neg_idx_per_image_mask)

        return pos_idx, neg_idx

    def select_samples_per_img(self, labels_per_img: Tensor, fg_probs_per_img: Tensor) -> tuple[Tensor, Tensor]:
        """
        Select positives and hard negatives from samples.

        Args:
            labels_per_img: labels, sized (A,).
                Positive samples have positive labels, negative samples have label 0.
            fg_probs_per_img: maximum foreground probability, sized (A,)

        Returns:
            - binary mask for positive samples, sized (A,)
            - binary mask for negative samples, sized (A,)

        Example:
            .. code-block:: python

                sampler = HardNegativeSampler(
                    batch_size_per_image=6, positive_fraction=0.5, min_neg=1, pool_size=2
                )
                # two images with different number of samples
                target_labels = torch.tensor([1,0,2,1])
                fg_probs = torch.rand(4)
                pos_idx, neg_idx = sampler.select_samples_per_img(target_labels, fg_probs)
        """
        # for each image, find positive sample indices and negative sample indices
        if labels_per_img.numel() != fg_probs_per_img.numel():
            raise ValueError("labels_per_img and fg_probs_per_img should have same number of elements.")

        positive = torch.where(labels_per_img >= 1)[0]
        negative = torch.where(labels_per_img == 0)[0]

        num_pos = self.get_num_pos(positive)
        pos_idx_per_image_mask = self.select_positives(positive, num_pos, labels_per_img)

        num_neg = self.get_num_neg(negative, num_pos)
        neg_idx_per_image_mask = self.select_negatives(negative, num_neg, fg_probs_per_img)

        return pos_idx_per_image_mask, neg_idx_per_image_mask

    def get_num_pos(self, positive: torch.Tensor) -> int:
        """
        Number of positive samples to draw

        Args:
            positive: indices of positive samples

        Returns:
            number of positive sample
        """
        # positive sample sampling
        num_pos = int(self.batch_size_per_image * self.positive_fraction)
        # protect against not enough positive examples
        num_pos = min(positive.numel(), num_pos)
        return num_pos

    def get_num_neg(self, negative: torch.Tensor, num_pos: int) -> int:
        """
        Sample enough negatives to fill up ``self.batch_size_per_image``

        Args:
            negative: indices of positive samples
            num_pos: number of positive samples to draw

        Returns:
            number of negative samples
        """
        # always assume at least one pos sample was sampled
        num_neg = int(max(1, num_pos) * abs(1 - 1.0 / float(self.positive_fraction)))
        # protect against not enough negative examples and sample at least self.min_neg if possible
        num_neg = min(negative.numel(), max(num_neg, self.min_neg))
        return num_neg

    def select_positives(self, positive: Tensor, num_pos: int, labels: Tensor) -> Tensor:
        """
        Select positive samples

        Args:
            positive: indices of positive samples, sized (P,),
                where P is the number of positive samples
            num_pos: number of positive samples to sample
            labels: labels for all samples, sized (A,),
                where A is the number of samples.

        Returns:
            binary mask of positive samples to choose, sized (A,),
                where A is the number of samples in one image
        """
        if positive.numel() > labels.numel():
            raise ValueError("The number of positive samples should not be larger than the number of all samples.")

        perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
        pos_idx_per_image = positive[perm1]

        # output a binary mask with same size of labels that indicates selected positive samples.
        pos_idx_per_image_mask = torch.zeros_like(labels, dtype=torch.uint8)
        pos_idx_per_image_mask[pos_idx_per_image] = 1
        return pos_idx_per_image_mask