| import torch |
| import torch.nn.functional as F |
|
|
| def mapls_torch(test_probs: torch.Tensor, |
| pz: torch.Tensor, |
| qy_mode: str = 'soft', |
| max_iter: int = 100, |
| init_mode: str = 'identical', |
| lam: float = None, |
| dvg_name='kl') -> torch.Tensor: |
| """ |
| GPU-compatible MAP Label Shift (MAPLS) using PyTorch. |
| """ |
| device = test_probs.device |
| pz = torch.tensor(pz, dtype=torch.float32, device='cuda') |
| cls_num = pz.numel() |
| assert test_probs.shape[-1] == cls_num |
|
|
| if dvg_name == 'kl': |
| dvg = kl_div_torch |
| elif dvg_name == 'js': |
| dvg = js_div_torch |
| else: |
| raise ValueError('Unsupported divergence type') |
|
|
| |
| q_prior = torch.ones(cls_num, device=device) / cls_num |
|
|
| |
| qz = mapls_EM_torch(test_probs, pz, lam, q_prior, cls_num, |
| init_mode=init_mode, max_iter=max_iter, qy_mode=qy_mode) |
| return qz |
|
|
|
|
| def mapls_EM_torch(probs, pz, lam, q_prior, cls_num, init_mode='identical', max_iter=100, qy_mode='soft'): |
| pz = pz / pz.sum() |
| if init_mode == 'uniform': |
| qz = torch.ones(cls_num, device=probs.device) / cls_num |
| elif init_mode == 'identical': |
| qz = pz.clone() |
| else: |
| raise ValueError('init_mode must be "uniform" or "identical"') |
|
|
| w = qz / pz |
|
|
| for _ in range(max_iter): |
| mapls_probs = normalize_torch(probs * w, dim=-1) |
|
|
| if qy_mode == 'hard': |
| pred = torch.argmax(mapls_probs, dim=-1) |
| qz_new = torch.bincount(pred, minlength=cls_num).float().to(probs.device) |
| elif qy_mode == 'soft': |
| qz_new = mapls_probs.mean(dim=0) |
| else: |
| raise ValueError('qy_mode must be "soft" or "hard"') |
|
|
| qz = lam * qz_new + (1 - lam) * q_prior |
| qz = qz / qz.sum() |
| w = qz / pz |
|
|
| return qz |
|
|
|
|
| def normalize_torch(x, dim=-1, eps=1e-8): |
| return x / (x.sum(dim=dim, keepdim=True) + eps) |
|
|
|
|
| def kl_div_torch(p, q, eps=1e-8): |
| p = p.to(torch.float32) |
| q = (q + eps).to(torch.float32) |
| return torch.sum(torch.where(p != 0, p * torch.log(p / q), torch.zeros_like(p))) |
|
|
|
|
| def js_div_torch(p, q): |
| m = (p + q) / 2 |
| return (kl_div_torch(p, m) + kl_div_torch(q, m)) / 2 |
|
|