File size: 5,021 Bytes
9e56db5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import numpy as np
from .common import normalized, Topk_qy
import logging


def mapls(test_probs,
          pz: np.ndarray,
          qy_mode: str = 'soft',
          max_iter: int = 100,
          init_mode: str = 'identical',
          lam: float = None,
          dvg_name='kl'):
    r"""
    Implementation of Maximum A Posteriori Label Shift,
    for Unknown target label distribution estimation

    Given source domain P(Y_s=i|X_s=x) = f(x) and P(Y_s=i),
    estimate targe domain P(Y_t=i) on test set

    """
    # FIXME
    if test_probs.is_cuda:
        test_probs = test_probs.cpu()
    test_probs = test_probs.detach().numpy()

    # Sanity Check
    cls_num = len(pz)
    assert test_probs.shape[-1] == cls_num
    if type(max_iter) != int or max_iter < 0:
        raise Exception('max_iter should be a positive integer, not ' + str(max_iter))

    # Setup d(p,q) measure
    if dvg_name == 'kl':
        dvg = kl_div
    elif dvg_name == 'js':
        dvg = js_div
    else:
        raise Exception('Unsupported distribution distance measure, expect kl or js.')

    # Set Prior of Target Label Distribution
    q_prior = np.ones(cls_num) / cls_num
    # q_prior = pz.copy()

    # Lambda estimation-------------------------------------------------------#
    if lam is None:
        # logging.info('Data shape: %s, %s' % (str(train_probs.shape), str(test_probs.shape)))
        # logging.info('Divergence type is %s' % (dvg))
        # lam = get_lamda(test_probs, pz, q_prior, dvg=dvg, max_iter=max_iter)  # FIXME why return none
        lam = lam
        # logging.info('Estimated lambda value is %.4f' % lam)
    else:
        # print('Assigned lambda is %.4f' % lam)
        pass
    logging.info("lam is %.4f" % lam)

    # EM Algorithm Computation
    qz = mapls_EM(test_probs, pz, lam, q_prior, cls_num,
                  init_mode=init_mode, max_iter=max_iter, qy_mode=qy_mode)

    return qz


def mapls_EM(probs, pz, lam, q_prior, cls_num, init_mode='identical', max_iter=100, qy_mode='soft'):
    # Normalize Source Label Distribution pz
    pz = np.array(pz) / np.sum(pz)
    # Initialize Target Label Distribution qz
    if init_mode == 'uniform':
        qz = np.ones(cls_num) / cls_num
    elif init_mode == 'identical':
        qz = pz.copy()
    else:
        raise ValueError('init_mode should be either "uniform" or "identical"')

    # Initialize w
    w = (np.array(qz) / np.array(pz))
    # EM algorithm with MAP estimation----------------------------------------#
    for i in range(max_iter):
        # print('w shape ', w.shape)

        # E-Step--------------------------------------------------------------#
        mapls_probs = normalized(probs * w, axis=-1, order=1)

        # M-Step--------------------------------------------------------------#
        if qy_mode == 'hard':
            pred = np.argmax(mapls_probs, axis=-1)
            qz_new = np.bincount(pred.reshape(-1), minlength=cls_num)
        elif qy_mode == 'soft':
            qz_new = np.mean(mapls_probs, axis=0)
        elif qy_mode == 'topk':
            qz_new = Topk_qy(mapls_probs, cls_num, topk_ratio=0.9, head=0)
        else:
            raise Exception('MAPLS mode should be either "soft" or "hard". ')
        # print(np.shape(pc_probs), np.shape(pred), np.shape(cls_num_list_t))

        # Update w with MAP estimation of Target Label Distribution qz
        # qz = (qz_new + alpha) / (N + np.sum(alpha))
        qz = lam * qz_new + (1 - lam) * q_prior
        qz /= qz.sum()
        w = qz / pz

    return qz


def get_lamda(test_probs, pz, q_prior, dvg, max_iter=50):


    K = len(pz)

    # MLLS estimation of source and target domain label distribution
    qz_pred = mapls_EM(test_probs, pz, 1, 0, K, max_iter=max_iter)

    TU_div = dvg(qz_pred, q_prior)
    TS_div = dvg(qz_pred, pz)
    SU_div = dvg(pz, q_prior)
    print('weights are, TU_div %.4f, TS_div %.4f, SU_div %.4f' % (TU_div, TS_div, SU_div))

    SU_conf = 1 - lam_forward(SU_div, lam_inv(dpq=0.5, lam=0.2))
    TU_conf = lam_forward(TU_div, lam_inv(dpq=0.5, lam=SU_conf))
    TS_conf = lam_forward(TS_div, lam_inv(dpq=0.5, lam=SU_conf))
    print('weights are, unviform_weight %.4f, differ_weight %.4f, regularize weight %.4f'
                 % (TU_conf, TS_conf, SU_conf))

    confs = np.array([TU_conf, 1 - TS_conf])
    w = np.array([0.9, 0.1])
    lam = np.sum(w * confs)

    print('Estimated lambda is: %.4f', lam)

    return lam


def lam_inv(dpq, lam):
    # clip for small lam
    lam = 1e-3 if abs(lam - 1) < 1e-3 else lam
    return (1 / (1 - lam) - 1) / dpq

def lam_forward(dpq, gamma):
    return gamma * dpq / (1 + gamma * dpq)


def kl_div(p, q):
    # fixme
    # if p == q:
    #     return 0.0
    p = np.asarray(p, dtype=np.float16)
    q = np.asarray(q + 1e-8, dtype=np.float16)

    return np.sum(np.where(p != 0, p * np.log(p / q), 0))

def js_div(p, q):
    assert (np.abs(np.sum(p) - 1) < 1e-6) and (np.abs(np.sum(q) - 1) < 1e-6)
    m = (p + q) / 2
    return kl_div(p, m) / 2 + kl_div(q, m) / 2