| | import numpy as np |
| | from .common import normalized, Topk_qy |
| | import logging |
| |
|
| |
|
| | def mapls(test_probs, |
| | pz: np.ndarray, |
| | qy_mode: str = 'soft', |
| | max_iter: int = 100, |
| | init_mode: str = 'identical', |
| | lam: float = None, |
| | dvg_name='kl'): |
| | r""" |
| | Implementation of Maximum A Posteriori Label Shift, |
| | for Unknown target label distribution estimation |
| | |
| | Given source domain P(Y_s=i|X_s=x) = f(x) and P(Y_s=i), |
| | estimate targe domain P(Y_t=i) on test set |
| | |
| | """ |
| | |
| | if test_probs.is_cuda: |
| | test_probs = test_probs.cpu() |
| | test_probs = test_probs.detach().numpy() |
| |
|
| | |
| | cls_num = len(pz) |
| | assert test_probs.shape[-1] == cls_num |
| | if type(max_iter) != int or max_iter < 0: |
| | raise Exception('max_iter should be a positive integer, not ' + str(max_iter)) |
| |
|
| | |
| | if dvg_name == 'kl': |
| | dvg = kl_div |
| | elif dvg_name == 'js': |
| | dvg = js_div |
| | else: |
| | raise Exception('Unsupported distribution distance measure, expect kl or js.') |
| |
|
| | |
| | q_prior = np.ones(cls_num) / cls_num |
| | |
| |
|
| | |
| | if lam is None: |
| | |
| | |
| | |
| | lam = lam |
| | |
| | else: |
| | |
| | pass |
| | logging.info("lam is %.4f" % lam) |
| |
|
| | |
| | qz = mapls_EM(test_probs, pz, lam, q_prior, cls_num, |
| | init_mode=init_mode, max_iter=max_iter, qy_mode=qy_mode) |
| |
|
| | return qz |
| |
|
| |
|
| | def mapls_EM(probs, pz, lam, q_prior, cls_num, init_mode='identical', max_iter=100, qy_mode='soft'): |
| | |
| | pz = np.array(pz) / np.sum(pz) |
| | |
| | if init_mode == 'uniform': |
| | qz = np.ones(cls_num) / cls_num |
| | elif init_mode == 'identical': |
| | qz = pz.copy() |
| | else: |
| | raise ValueError('init_mode should be either "uniform" or "identical"') |
| |
|
| | |
| | w = (np.array(qz) / np.array(pz)) |
| | |
| | for i in range(max_iter): |
| | |
| |
|
| | |
| | mapls_probs = normalized(probs * w, axis=-1, order=1) |
| |
|
| | |
| | if qy_mode == 'hard': |
| | pred = np.argmax(mapls_probs, axis=-1) |
| | qz_new = np.bincount(pred.reshape(-1), minlength=cls_num) |
| | elif qy_mode == 'soft': |
| | qz_new = np.mean(mapls_probs, axis=0) |
| | elif qy_mode == 'topk': |
| | qz_new = Topk_qy(mapls_probs, cls_num, topk_ratio=0.9, head=0) |
| | else: |
| | raise Exception('MAPLS mode should be either "soft" or "hard". ') |
| | |
| |
|
| | |
| | |
| | qz = lam * qz_new + (1 - lam) * q_prior |
| | qz /= qz.sum() |
| | w = qz / pz |
| |
|
| | return qz |
| |
|
| |
|
| | def get_lamda(test_probs, pz, q_prior, dvg, max_iter=50): |
| |
|
| |
|
| | K = len(pz) |
| |
|
| | |
| | qz_pred = mapls_EM(test_probs, pz, 1, 0, K, max_iter=max_iter) |
| |
|
| | TU_div = dvg(qz_pred, q_prior) |
| | TS_div = dvg(qz_pred, pz) |
| | SU_div = dvg(pz, q_prior) |
| | print('weights are, TU_div %.4f, TS_div %.4f, SU_div %.4f' % (TU_div, TS_div, SU_div)) |
| |
|
| | SU_conf = 1 - lam_forward(SU_div, lam_inv(dpq=0.5, lam=0.2)) |
| | TU_conf = lam_forward(TU_div, lam_inv(dpq=0.5, lam=SU_conf)) |
| | TS_conf = lam_forward(TS_div, lam_inv(dpq=0.5, lam=SU_conf)) |
| | print('weights are, unviform_weight %.4f, differ_weight %.4f, regularize weight %.4f' |
| | % (TU_conf, TS_conf, SU_conf)) |
| |
|
| | confs = np.array([TU_conf, 1 - TS_conf]) |
| | w = np.array([0.9, 0.1]) |
| | lam = np.sum(w * confs) |
| |
|
| | print('Estimated lambda is: %.4f', lam) |
| |
|
| | return lam |
| |
|
| |
|
| | def lam_inv(dpq, lam): |
| | |
| | lam = 1e-3 if abs(lam - 1) < 1e-3 else lam |
| | return (1 / (1 - lam) - 1) / dpq |
| |
|
| | def lam_forward(dpq, gamma): |
| | return gamma * dpq / (1 + gamma * dpq) |
| |
|
| |
|
| | def kl_div(p, q): |
| | |
| | |
| | |
| | p = np.asarray(p, dtype=np.float16) |
| | q = np.asarray(q + 1e-8, dtype=np.float16) |
| |
|
| | return np.sum(np.where(p != 0, p * np.log(p / q), 0)) |
| |
|
| | def js_div(p, q): |
| | assert (np.abs(np.sum(p) - 1) < 1e-6) and (np.abs(np.sum(q) - 1) < 1e-6) |
| | m = (p + q) / 2 |
| | return kl_div(p, m) / 2 + kl_div(q, m) / 2 |
| |
|