temp_backup / MAPLS /common_cuda.py
benzlxs's picture
Upload folder using huggingface_hub
9e56db5 verified
import torch
import torch.nn.functional as F
def lsc_torch(probs: torch.Tensor, w: torch.Tensor):
"""
GPU-compatible LSC implementation
probs: Tensor [N, C]
w: Tensor [C]
"""
assert probs.shape[-1] == w.shape[0], "Shape mismatch"
weighted_probs = probs * w
pc_probs = F.normalize(weighted_probs, p=1, dim=-1)
return pc_probs
def get_py_torch(probs: torch.Tensor, cls_num_list=None, mode='soft'):
"""
GPU-compatible estimation of P(Y_s=i)
probs: Tensor [N, C]
"""
cls_num = probs.shape[-1]
if mode == "soft":
py = torch.mean(probs, dim=0)
elif mode == "hard":
preds = torch.argmax(probs, dim=-1)
py = torch.bincount(preds, minlength=cls_num).float()
py = py / py.sum()
elif mode == 'gt' and cls_num_list is not None:
py = torch.tensor(cls_num_list, dtype=torch.float32, device=probs.device)
py = py / py.sum()
else:
raise ValueError("mode must be 'soft', 'hard', or 'gt'")
return py
def get_marginal_torch(probs: torch.Tensor, cls_num: int, mode='soft'):
assert probs.shape[-1] == cls_num
if mode == 'hard':
pred = torch.argmax(probs, dim=-1)
qz = torch.bincount(pred, minlength=cls_num).float()
qz = qz / qz.sum()
elif mode == 'soft':
qz = torch.mean(probs, dim=0)
return qz
def get_confusion_matrix_torch(probs: torch.Tensor, labels: torch.Tensor, cls_num: int, mode='soft'):
"""
probs: Tensor [N, C]
labels: Tensor [N] (long)
returns: [cls_num, cls_num] confusion matrix
"""
cm = torch.zeros((cls_num, cls_num), device=probs.device)
if mode == 'soft':
for i in range(len(labels)):
cm[labels[i]] += probs[i]
elif mode == 'hard':
pred = torch.argmax(probs, dim=-1)
for i in range(len(labels)):
cm[labels[i], pred[i]] += 1
return cm
def normalized_torch(a: torch.Tensor, axis=-1, order=2):
norm = torch.norm(a, p=order, dim=axis, keepdim=True)
norm[norm == 0] = 1.0
return a / norm
def topk_qy_torch(probs: torch.Tensor, cls_num: int, topk_ratio=0.8, head=0, normalize=True):
"""
probs: Tensor [N, C]
return: Tensor [C]
"""
N, C = probs.shape
k = max(min(int(cls_num * topk_ratio) + head, cls_num), head + 1)
qy = torch.zeros(cls_num, device=probs.device)
topk_vals, topk_indices = torch.topk(probs, k=k, dim=1)
for i in range(N):
qy[topk_indices[i][head:]] += topk_vals[i][head:]
if normalize:
qy = qy / N
return qy