import torch from .dice import SoftDiceLoss, MemoryEfficientSoftDiceLoss from .robust_ce_loss import RobustCrossEntropyLoss, TopKLoss from .helpers import softmax_helper_dim1 from torch import nn class DC_and_CE_loss(nn.Module): def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None, dice_class=SoftDiceLoss): """ Weights for CE and Dice do not need to sum to one. You can set whatever you want. :param soft_dice_kwargs: :param ce_kwargs: :param aggregate: :param square_dice: :param weight_ce: :param weight_dice: """ super(DC_and_CE_loss, self).__init__() if ignore_label is not None: ce_kwargs['ignore_index'] = ignore_label self.weight_dice = weight_dice self.weight_ce = weight_ce self.ignore_label = ignore_label self.ce = RobustCrossEntropyLoss(**ce_kwargs) self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) def forward(self, net_output: torch.Tensor, target: torch.Tensor): """ target must be b, c, x, y(, z) with c=1 :param net_output: :param target: :return: """ if self.ignore_label is not None: assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ '(DC_and_CE_loss)' mask = (target != self.ignore_label).bool() # remove ignore label from target, replace with one of the known labels. It doesn't matter because we # ignore gradients in those areas anyway target_dice = torch.clone(target) target_dice[target == self.ignore_label] = 0 num_fg = mask.sum() else: target_dice = target mask = None dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ if self.weight_dice != 0 else 0 ce_loss = self.ce(net_output, target[:, 0].long()) \ if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 result = self.weight_ce * ce_loss + self.weight_dice * dc_loss return result class DC_and_BCE_loss(nn.Module): def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False, dice_class=MemoryEfficientSoftDiceLoss): """ DO NOT APPLY NONLINEARITY IN YOUR NETWORK! target mut be one hot encoded IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!! :param soft_dice_kwargs: :param bce_kwargs: :param aggregate: """ super(DC_and_BCE_loss, self).__init__() if use_ignore_label: bce_kwargs['reduction'] = 'none' self.weight_dice = weight_dice self.weight_ce = weight_ce self.use_ignore_label = use_ignore_label self.ce = nn.BCEWithLogitsLoss(**bce_kwargs) self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs) def forward(self, net_output: torch.Tensor, target: torch.Tensor): if self.use_ignore_label: # target is one hot encoded here. invert it so that it is True wherever we can compute the loss mask = (1 - target[:, -1:]).bool() # remove ignore channel now that we have the mask target_regions = torch.clone(target[:, :-1]) else: target_regions = target mask = None dc_loss = self.dc(net_output, target_regions, loss_mask=mask) if mask is not None: ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8) else: ce_loss = self.ce(net_output, target_regions) result = self.weight_ce * ce_loss + self.weight_dice * dc_loss return result class DC_and_topk_loss(nn.Module): def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None): """ Weights for CE and Dice do not need to sum to one. You can set whatever you want. :param soft_dice_kwargs: :param ce_kwargs: :param aggregate: :param square_dice: :param weight_ce: :param weight_dice: """ super().__init__() if ignore_label is not None: ce_kwargs['ignore_index'] = ignore_label self.weight_dice = weight_dice self.weight_ce = weight_ce self.ignore_label = ignore_label self.ce = TopKLoss(**ce_kwargs) self.dc = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) def forward(self, net_output: torch.Tensor, target: torch.Tensor): """ target must be b, c, x, y(, z) with c=1 :param net_output: :param target: :return: """ if self.ignore_label is not None: assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ '(DC_and_CE_loss)' mask = (target != self.ignore_label).bool() # remove ignore label from target, replace with one of the known labels. It doesn't matter because we # ignore gradients in those areas anyway target_dice = torch.clone(target) target_dice[target == self.ignore_label] = 0 num_fg = mask.sum() else: target_dice = target mask = None dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ if self.weight_dice != 0 else 0 ce_loss = self.ce(net_output, target) \ if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 result = self.weight_ce * ce_loss + self.weight_dice * dc_loss return result