code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def get_standard_normalized_histogram_GN(batch_std_labels, gain_base=2.0):
"""
Convert to a normalized histogram based on gain values, i.e., each entry equals to gain_value/sum_gain_value
:param batch_std_labels:
:return:
"""
batch_std_gains = torch.pow(gain_base, batch_std_labels) - 1.0
batch_histograms = batch_std_gains/torch.sum(batch_std_gains, dim=1).view(-1, 1)
return batch_histograms |
Convert to a normalized histogram based on gain values, i.e., each entry equals to gain_value/sum_gain_value
:param batch_std_labels:
:return:
| get_standard_normalized_histogram_GN | python | wildltr/ptranking | ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py | MIT |
def get_normalized_histograms(batch_std_labels=None, batch_preds=None,
wass_dict_std_dists=None, qid=None, wass_para_dict=None, TL_AF=None):
""" Convert both standard labels and predictions w.r.t. a query to normalized histograms """
smooth_type, norm_type = wass_para_dict['smooth_type'], wass_para_dict['norm_type']
if 'ST' == smooth_type:
if wass_dict_std_dists is not None:
if qid in wass_dict_std_dists: # target distributions
batch_std_hists = wass_dict_std_dists[qid]
else:
batch_std_hists = get_standard_normalized_histogram_ST(batch_std_labels, adjust_softmax=False)
wass_dict_std_dists[qid] = batch_std_hists
else:
batch_std_hists = get_standard_normalized_histogram_ST(batch_std_labels, adjust_softmax=False)
if 'BothST' == norm_type:
if 'S' == TL_AF or 'ST' == TL_AF: # first convert to the same relevance level
max_rele_level = torch.max(batch_std_labels)
batch_preds = batch_preds * max_rele_level
batch_pred_hists = F.softmax(batch_preds, dim=1)
else:
raise NotImplementedError
elif 'NG' == smooth_type: # normalization of gain, i.e., gain/sum_gain
if wass_dict_std_dists is not None:
if qid in wass_dict_std_dists: # target distributions
batch_std_hists = wass_dict_std_dists[qid]
else:
batch_std_hists = get_standard_normalized_histogram_GN(batch_std_labels)
wass_dict_std_dists[qid] = batch_std_hists
else:
batch_std_hists = get_standard_normalized_histogram_GN(batch_std_labels)
#print(batch_std_hists.size())
#print('batch_std_hists', batch_std_hists)
''' normalizing predictions, where negative values should be taken into account '''
mini = torch.min(batch_preds)
# print('mini', mini)
# (1)
#'''
if mini > 0.0:
batch_pred_hists = batch_preds / torch.sum(batch_preds, dim=1).view(-1, 1)
else:
batch_preds = batch_preds - mini
batch_pred_hists = batch_preds / torch.sum(batch_preds, dim=1).view(-1, 1)
#'''
# (2) treat all negative values as zero, which seems to be a bad way
'''
if mini > 0.0:
batch_pred_hists = batch_preds / torch.sum(batch_preds, dim=1).view(-1, 1)
else:
batch_deltas = -torch.clamp(batch_preds, min=-1e3, max=0)
batch_preds = batch_preds + batch_deltas
batch_pred_hists = batch_preds / torch.sum(batch_preds, dim=1).view(-1, 1)
'''
#print(batch_pred_hists.size())
#print('batch_pred_hists', batch_pred_hists)
else:
raise NotImplementedError
return batch_std_hists, batch_pred_hists | Convert both standard labels and predictions w.r.t. a query to normalized histograms | get_normalized_histograms | python | wildltr/ptranking | ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wasserstein_cost_mat.py | MIT |
def default_para_dict(self):
"""
Default parameter setting for WassRank. EntropicOT | SinkhornOT
:return:
"""
self.wass_para_dict = dict(model_id=self.model_id, mode='SinkhornOT', sh_itr=20, lam=0.1, smooth_type='ST',
norm_type='BothST', cost_type='eg', non_rele_gap=100, var_penalty=np.e, gain_base=4)
return self.wass_para_dict |
Default parameter setting for WassRank. EntropicOT | SinkhornOT
:return:
| default_para_dict | python | wildltr/ptranking | ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
wass_para_dict = given_para_dict if given_para_dict is not None else self.wass_para_dict
s1, s2 = (':', '\n') if log else ('_', '_')
cost_type, smooth_type, norm_type = wass_para_dict['cost_type'], wass_para_dict['smooth_type'], wass_para_dict[
'norm_type']
mode_str = s1.join(['mode', wass_para_dict['mode']]) if log else wass_para_dict['mode']
if smooth_type in ['ST', 'NG']:
smooth_str = s1.join(['smooth_type', smooth_type]) if log else s1.join(['ST', smooth_type])
else:
raise NotImplementedError
if cost_type.startswith('Group'):
gain_base, non_rele_gap, var_penalty = wass_para_dict['gain_base'], wass_para_dict['non_rele_gap'], \
wass_para_dict['var_penalty']
cost_str = s2.join([s1.join(['cost_type', cost_type]),
s1.join(['gain_base', '{:,g}'.format(gain_base)]),
s1.join(['non_rele_gap', '{:,g}'.format(non_rele_gap)]),
s1.join(['var_penalty', '{:,g}'.format(var_penalty)])]) if log \
else s1.join(
[cost_type, '{:,g}'.format(non_rele_gap), '{:,g}'.format(gain_base), '{:,g}'.format(var_penalty)])
else:
cost_str = s1.join(['cost_type', cost_type]) if log else cost_type
sh_itr, lam = wass_para_dict['sh_itr'], wass_para_dict['lam']
horn_str = s2.join([s1.join(['Lambda', '{:,g}'.format(lam)]), s1.join(['ShIter', str(sh_itr)])]) if log \
else s1.join(['Lambda', '{:,g}'.format(lam), 'ShIter', str(sh_itr)])
wass_paras_str = s2.join([mode_str, smooth_str, cost_str, horn_str])
return wass_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | MIT |
def grid_search(self):
"""
Iterator of parameter settings for WassRank
"""
if self.use_json:
wass_choice_mode = self.json_dict['mode']
wass_choice_itr = self.json_dict['itr']
wass_choice_lam = self.json_dict['lam']
wass_cost_type = self.json_dict['cost_type']
# member parameters of 'Group' include margin, div, group-base
wass_choice_non_rele_gap = self.json_dict['non_rele_gap']
wass_choice_var_penalty = self.json_dict['var_penalty']
wass_choice_group_base = self.json_dict['group_base']
wass_choice_smooth = self.json_dict['smooth']
wass_choice_norm = self.json_dict['norm']
else:
wass_choice_mode = ['WassLossSta'] # EOTLossSta | WassLossSta
wass_choice_itr = [10] # number of iterations w.r.t. sink-horn operation
wass_choice_lam = [0.1] # 0.01 | 1e-3 | 1e-1 | 10 regularization parameter
wass_cost_type = ['eg'] # p1 | p2 | eg | dg| ddg
# member parameters of 'Group' include margin, div, group-base
wass_choice_non_rele_gap = [10] # the gap between a relevant document and an irrelevant document
wass_choice_var_penalty = [np.e] # variance penalty
wass_choice_group_base = [4] # the base for computing gain value
wass_choice_smooth = ['ST'] # 'ST', i.e., ST: softmax | Gain, namely the way on how to get the normalized distribution histograms
wass_choice_norm = ['BothST'] # 'BothST': use ST for both prediction and standard labels
for mode, wsss_lambda, sinkhorn_itr in product(wass_choice_mode, wass_choice_lam, wass_choice_itr):
for wass_smooth, norm in product(wass_choice_smooth, wass_choice_norm):
for cost_type in wass_cost_type:
for non_rele_gap, var_penalty, group_base in product(wass_choice_non_rele_gap,
wass_choice_var_penalty,
wass_choice_group_base):
self.wass_para_dict = dict(model_id='WassRank', mode=mode, sh_itr=sinkhorn_itr, lam=wsss_lambda,
cost_type=cost_type, smooth_type=wass_smooth, norm_type=norm,
gain_base=group_base, non_rele_gap=non_rele_gap, var_penalty=var_penalty)
yield self.wass_para_dict |
Iterator of parameter settings for WassRank
| grid_search | python | wildltr/ptranking | ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/listwise/wassrank/wassRank.py | MIT |
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs):
'''
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query
@param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@param kwargs:
@return:
'''
batch_p_ij, batch_std_p_ij = get_pairwise_comp_probs(batch_preds=batch_preds, batch_std_labels=batch_std_labels,
sigma=self.sigma)
_batch_loss = F.binary_cross_entropy(input=torch.triu(batch_p_ij, diagonal=1),
target=torch.triu(batch_std_p_ij, diagonal=1), reduction='none')
batch_loss = torch.sum(torch.sum(_batch_loss, dim=(2, 1)))
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
return batch_loss |
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query
@param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@param kwargs:
@return:
| custom_loss_function | python | wildltr/ptranking | ptranking/ltr_adhoc/pairwise/ranknet.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/pairwise/ranknet.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ranknet_para_dict = given_para_dict if given_para_dict is not None else self.ranknet_para_dict
s1, s2 = (':', '\n') if log else ('_', '_')
ranknet_para_str = s1.join(['Sigma', '{:,g}'.format(ranknet_para_dict['sigma'])])
return ranknet_para_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adhoc/pairwise/ranknet.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/pairwise/ranknet.py | MIT |
def grid_search(self):
"""
Iterator of parameter settings for RankNet
"""
if self.use_json:
choice_sigma = self.json_dict['sigma']
else:
choice_sigma = [5.0, 1.0] if self.debug else [1.0] # 1.0, 10.0, 50.0, 100.0
for sigma in choice_sigma:
self.ranknet_para_dict = dict(model_id=self.model_id, sigma=sigma)
yield self.ranknet_para_dict |
Iterator of parameter settings for RankNet
| grid_search | python | wildltr/ptranking | ptranking/ltr_adhoc/pairwise/ranknet.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/pairwise/ranknet.py | MIT |
def rankMSE_loss_function(relevance_preds=None, std_labels=None):
'''
Ranking loss based on mean square error TODO adjust output scale w.r.t. output layer activation function
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query
@param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@return:
'''
_batch_loss = F.mse_loss(relevance_preds, std_labels, reduction='none')
batch_loss = torch.mean(torch.sum(_batch_loss, dim=1))
return batch_loss |
Ranking loss based on mean square error TODO adjust output scale w.r.t. output layer activation function
@param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents associated with the same query
@param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@return:
| rankMSE_loss_function | python | wildltr/ptranking | ptranking/ltr_adhoc/pointwise/rank_mse.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/pointwise/rank_mse.py | MIT |
def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs):
'''
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
:return:
'''
batch_loss = rankMSE_loss_function(batch_preds, batch_std_labels)
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
return batch_loss |
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
:return:
| custom_loss_function | python | wildltr/ptranking | ptranking/ltr_adhoc/pointwise/rank_mse.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/pointwise/rank_mse.py | MIT |
def batch_count(batch_std_labels=None, max_rele_grade=None, descending=False, gpu=False):
"""
Todo now an api is already provided by pytorch
:param batch_std_labels:
:param max_rele_grade:
:param descending:
:param gpu:
:return:
"""
rele_grades = torch.arange(max_rele_grade+1).type(torch.cuda.FloatTensor) if gpu else torch.arange(max_rele_grade+1).type(torch.FloatTensor)
if descending: rele_grades, _ = torch.sort(rele_grades, descending=True)
batch_cnts = torch.stack([(batch_std_labels == g).sum(dim=1) for g in rele_grades])
batch_cnts = torch.t(batch_cnts)
return batch_cnts |
Todo now an api is already provided by pytorch
:param batch_std_labels:
:param max_rele_grade:
:param descending:
:param gpu:
:return:
| batch_count | python | wildltr/ptranking | ptranking/ltr_adhoc/util/bin_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/bin_utils.py | MIT |
def torch_batch_triu(batch_mats=None, k=0, pair_type='All', batch_std_labels=None, gpu=False, device=None):
'''
Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation.
All: pairs including both pairs of documents across different relevance levels and
pairs of documents having the same relevance level.
NoTies: the pairs consisting of two documents of the same relevance level are removed
No00: the pairs consisting of two non-relevant documents are removed
:param batch_mats: [batch, m, m]
:param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line
:return:
'''
assert batch_mats.size(1) == batch_mats.size(2)
assert pair_type in PAIR_TYPE
m = batch_mats.size(1) # the number of documents
if pair_type == 'All':
row_inds, col_inds = np.triu_indices(m, k=k)
elif pair_type == 'No00':
assert batch_std_labels.size(0) == 1
row_inds, col_inds = np.triu_indices(m, k=k)
std_labels = torch.squeeze(batch_std_labels, 0)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if not (0 == labels[e[0]] and 0 == labels[e[1]])] # remove pairs of 00 comparisons
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
elif pair_type == 'NoTies':
assert batch_std_labels.size(0) == 1
std_labels = torch.squeeze(batch_std_labels, 0)
row_inds, col_inds = np.triu_indices(m, k=k)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if labels[e[0]]!=labels[e[1]]] # remove pairs of documents of the same level
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
#tor_row_inds = torch.LongTensor(row_inds).to(device) if gpu else torch.LongTensor(row_inds)
#tor_col_inds = torch.LongTensor(col_inds).to(device) if gpu else torch.LongTensor(col_inds)
tor_row_inds = torch.LongTensor(row_inds, device)
tor_col_inds = torch.LongTensor(col_inds, device)
batch_triu = batch_mats[:, tor_row_inds, tor_col_inds]
return batch_triu # shape: [batch_size, number of pairs] |
Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation.
All: pairs including both pairs of documents across different relevance levels and
pairs of documents having the same relevance level.
NoTies: the pairs consisting of two documents of the same relevance level are removed
No00: the pairs consisting of two non-relevant documents are removed
:param batch_mats: [batch, m, m]
:param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line
:return:
| torch_batch_triu | python | wildltr/ptranking | ptranking/ltr_adhoc/util/gather_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/gather_utils.py | MIT |
def torch_triu_indice(k=0, pair_type='All', batch_label=None, gpu=False, device=None):
'''
Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation.
All: pairs including both pairs of documents across different relevance levels and
pairs of documents having the same relevance level.
NoTies: the pairs consisting of two documents of the same relevance level are removed
No00: the pairs consisting of two non-relevant documents are removed
Inversion: the pairs that are inverted order, i.e., the 1st doc is less relevant than the 2nd doc
:param batch_mats: [batch, m, m]
:param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line
:return:
'''
assert pair_type in PAIR_TYPE
m = batch_label.size(1) # the number of documents
if pair_type == 'All':
row_inds, col_inds = np.triu_indices(m, k=k)
elif pair_type == 'No00':
assert batch_label.size(0) == 1
row_inds, col_inds = np.triu_indices(m, k=k)
std_labels = torch.squeeze(batch_label, 0)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if not (0==labels[e[0]] and 0==labels[e[1]])] # remove pairs of 00 comparisons
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
elif pair_type == '00': # the pairs consisting of two non-relevant documents
assert batch_label.size(0) == 1
row_inds, col_inds = np.triu_indices(m, k=k)
std_labels = torch.squeeze(batch_label, 0)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if (0 == labels[e[0]] and 0 == labels[e[1]])] # remove pairs of 00 comparisons
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
elif pair_type == 'NoTies':
assert batch_label.size(0) == 1
std_labels = torch.squeeze(batch_label, 0)
row_inds, col_inds = np.triu_indices(m, k=k)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if labels[e[0]]!=labels[e[1]]] # remove pairs of documents of the same level
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
elif pair_type == 'Inversion':
assert batch_label.size(0) == 1
std_labels = torch.squeeze(batch_label, 0)
row_inds, col_inds = np.triu_indices(m, k=k)
labels = std_labels.cpu().numpy() if gpu else std_labels.data.numpy()
pairs = [e for e in zip(row_inds, col_inds) if labels[e[0]] < labels[e[1]]] # remove pairs of documents of the same level
row_inds = [e[0] for e in pairs]
col_inds = [e[1] for e in pairs]
else:
raise NotImplementedError
#tor_row_inds = torch.LongTensor(row_inds).to(device) if gpu else torch.LongTensor(row_inds)
#tor_col_inds = torch.LongTensor(col_inds).to(device) if gpu else torch.LongTensor(col_inds)
tor_row_inds = torch.LongTensor(row_inds, device)
tor_col_inds = torch.LongTensor(col_inds, device)
#batch_triu = batch_mats[:, tor_row_inds, tor_col_inds]
return tor_row_inds, tor_col_inds # shape: [number of pairs] |
Get unique document pairs being consistent with the specified pair_type. This function is used to avoid duplicate computation.
All: pairs including both pairs of documents across different relevance levels and
pairs of documents having the same relevance level.
NoTies: the pairs consisting of two documents of the same relevance level are removed
No00: the pairs consisting of two non-relevant documents are removed
Inversion: the pairs that are inverted order, i.e., the 1st doc is less relevant than the 2nd doc
:param batch_mats: [batch, m, m]
:param k: the offset w.r.t. the diagonal line: k=0 means including the diagonal line, k=1 means upper triangular part without the diagonal line
:return:
| torch_triu_indice | python | wildltr/ptranking | ptranking/ltr_adhoc/util/gather_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/gather_utils.py | MIT |
def get_pairwise_comp_probs(batch_preds, batch_std_labels, sigma=None):
'''
Get the predicted and standard probabilities p_ij which denotes d_i beats d_j
@param batch_preds:
@param batch_std_labels:
@param sigma:
@return:
'''
# computing pairwise differences w.r.t. predictions, i.e., s_i - s_j
batch_s_ij = torch.unsqueeze(batch_preds, dim=2) - torch.unsqueeze(batch_preds, dim=1)
batch_p_ij = torch.sigmoid(sigma * batch_s_ij)
# computing pairwise differences w.r.t. standard labels, i.e., S_{ij}
batch_std_diffs = torch.unsqueeze(batch_std_labels, dim=2) - torch.unsqueeze(batch_std_labels, dim=1)
# ensuring S_{ij} \in {-1, 0, 1}
batch_Sij = torch.clamp(batch_std_diffs, min=-1.0, max=1.0)
batch_std_p_ij = 0.5 * (1.0 + batch_Sij)
return batch_p_ij, batch_std_p_ij |
Get the predicted and standard probabilities p_ij which denotes d_i beats d_j
@param batch_preds:
@param batch_std_labels:
@param sigma:
@return:
| get_pairwise_comp_probs | python | wildltr/ptranking | ptranking/ltr_adhoc/util/lambda_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/lambda_utils.py | MIT |
def get_one_hot_reprs(batch_stds, gpu=False):
""" Get one-hot representation of batch ground-truth labels """
batch_size = batch_stds.size(0)
hist_size = batch_stds.size(1)
int_batch_stds = batch_stds.type(torch.cuda.LongTensor) if gpu else batch_stds.type(torch.LongTensor)
hot_batch_stds = torch.cuda.FloatTensor(batch_size, hist_size, 3) if gpu else torch.FloatTensor(batch_size, hist_size, 3)
hot_batch_stds.zero_()
hot_batch_stds.scatter_(2, torch.unsqueeze(int_batch_stds, 2), 1)
return hot_batch_stds | Get one-hot representation of batch ground-truth labels | get_one_hot_reprs | python | wildltr/ptranking | ptranking/ltr_adhoc/util/one_hot_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/one_hot_utils.py | MIT |
def arg_shuffle_ties(batch_rankings, descending=True, device=None):
'''Shuffle ties, and return the corresponding indice '''
batch_size, ranking_size = batch_rankings.size()
if batch_size > 1:
list_rperms = []
for _ in range(batch_size):
list_rperms.append(torch.randperm(ranking_size, device=device))
batch_rperms = torch.stack(list_rperms, dim=0)
else:
batch_rperms = torch.randperm(ranking_size, device=device).view(1, -1)
batch_shuffled_rankings = torch.gather(batch_rankings, dim=1, index=batch_rperms)
batch_desc_inds = torch.argsort(batch_shuffled_rankings, descending=descending)
batch_shuffle_ties_inds = torch.gather(batch_rperms, dim=1, index=batch_desc_inds)
return batch_shuffle_ties_inds | Shuffle ties, and return the corresponding indice | arg_shuffle_ties | python | wildltr/ptranking | ptranking/ltr_adhoc/util/sampling_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/sampling_utils.py | MIT |
def sample_ranking_PL(batch_preds, only_indices=True, temperature=1.0):
'''
Sample one ranking per query based on Plackett-Luce model
@param batch_preds: [batch_size, ranking_size] each row denotes the relevance predictions for documents associated with the same query
@param only_indices: only return the indices or not
'''
if torch.isnan(batch_preds).any(): # checking is needed for later PL model
print('batch_preds', batch_preds)
print('Including NaN error.')
if 1.0 != temperature:
target_batch_preds = torch.div(batch_preds, temperature)
else:
target_batch_preds = batch_preds
batch_m, _ = torch.max(target_batch_preds, dim=1, keepdim=True) # a transformation aiming for higher stability when computing softmax() with exp()
m_target_batch_preds = target_batch_preds - batch_m
batch_exps = torch.exp(m_target_batch_preds)
batch_sample_inds = torch.multinomial(batch_exps, replacement=False, num_samples=batch_preds.size(1))
if only_indices:
return batch_sample_inds
else:
# sort batch_preds according to the sample order
# w.r.t. top-k, we need the remaining part, but we don't consider the orders among the remaining parts
batch_preds_in_sample_order = torch.gather(batch_preds, dim=1, index=batch_sample_inds)
return batch_sample_inds, batch_preds_in_sample_order |
Sample one ranking per query based on Plackett-Luce model
@param batch_preds: [batch_size, ranking_size] each row denotes the relevance predictions for documents associated with the same query
@param only_indices: only return the indices or not
| sample_ranking_PL | python | wildltr/ptranking | ptranking/ltr_adhoc/util/sampling_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/sampling_utils.py | MIT |
def sample_ranking_PL_gumbel_softmax(batch_preds, only_indices=True, temperature=1.0, device=None):
'''
Sample a ranking based stochastic Plackett-Luce model, where gumble noise is added
@param batch_preds: [batch_size, ranking_size] each row denotes the relevance predictions for documents associated with the same query
@param only_indices: only return the indices or not
'''
unif = torch.rand(batch_preds.size(), device=device) # [batch_size, ranking_size]
gumbel = -torch.log(-torch.log(unif + EPS) + EPS) # Sample from gumbel distribution
if only_indices:
batch_logits = batch_preds + gumbel
_, batch_sample_inds = torch.sort(batch_logits, dim=1, descending=True)
return batch_sample_inds
else:
if 1.0 == temperature:
batch_logits = batch_preds + gumbel
else:
batch_logits = (batch_preds + gumbel) / temperature
batch_logits_in_sample_order, batch_sample_inds = torch.sort(batch_logits, dim=1, descending=True)
return batch_sample_inds, batch_logits_in_sample_order |
Sample a ranking based stochastic Plackett-Luce model, where gumble noise is added
@param batch_preds: [batch_size, ranking_size] each row denotes the relevance predictions for documents associated with the same query
@param only_indices: only return the indices or not
| sample_ranking_PL_gumbel_softmax | python | wildltr/ptranking | ptranking/ltr_adhoc/util/sampling_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adhoc/util/sampling_utils.py | MIT |
def default_pointsf_para_dict(self):
"""
A default setting of the hyper-parameters of the stump neural scoring function for adversarial ltr.
"""
self.sf_para_dict = dict()
self.sf_para_dict['sf_id'] = self.sf_id
self.sf_para_dict['opt'] = 'Adam' # Adam | RMS | Adagrad
self.sf_para_dict['lr'] = 0.001 # learning rate
pointsf_para_dict = dict(num_layers=5, AF='R', TL_AF='R', apply_tl_af=True,
BN=False, bn_type='BN', bn_affine=True)
self.sf_para_dict[self.sf_id] = pointsf_para_dict
return self.sf_para_dict |
A default setting of the hyper-parameters of the stump neural scoring function for adversarial ltr.
| default_pointsf_para_dict | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def to_eval_setting_string(self, log=False):
"""
String identifier of eval-setting
:param log:
:return:
"""
eval_dict = self.eval_dict
s1, s2 = (':', '\n') if log else ('_', '_')
do_vali, epochs = eval_dict['do_validation'], eval_dict['epochs']
eval_string = s2.join([s1.join(['epochs', str(epochs)]), s1.join(['do_validation', str(do_vali)])]) if log \
else s1.join(['EP', str(epochs), 'V', str(do_vali)])
return eval_string |
String identifier of eval-setting
:param log:
:return:
| to_eval_setting_string | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def default_setting(self):
"""
A default setting for evaluation when performing adversarial ltr
:param debug:
:param data_id:
:param dir_output:
:return:
"""
do_log = False if self.debug else True
do_validation, do_summary = True, False
log_step = 1
epochs = 10 if self.debug else 50
vali_k = 5
'''on the usage of mask_label
(1) given a supervised dataset, True means that mask a supervised data to mimic unsupervised data
(2) given an unsupervised dataset, this setting is not supported, since it is already an unsupervised data
'''
mask_label = False
if mask_label:
assert not self.data_id in MSLETOR_SEMI
mask_ratio = 0.1
mask_type = 'rand_mask_rele'
else:
mask_ratio = None
mask_type = None
# more evaluation settings that are rarely changed
self.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=self.dir_output,
cutoffs=[1, 3, 5, 10, 20, 50], do_validation=do_validation, vali_k=vali_k,
do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=False, epochs=epochs,
mask_label=mask_label, mask_ratio=mask_ratio, mask_type=mask_type)
return self.eval_dict |
A default setting for evaluation when performing adversarial ltr
:param debug:
:param data_id:
:param dir_output:
:return:
| default_setting | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def grid_search(self):
"""
Iterator of settings for evaluation when performing adversarial ltr
"""
if self.use_json:
dir_output = self.json_dict['dir_output']
epochs = 5 if self.debug else self.json_dict['epochs']
do_validation, vali_k = self.json_dict['do_validation'], self.json_dict['vali_k']
cutoffs = self.json_dict['cutoffs']
do_log, log_step = self.json_dict['do_log'], self.json_dict['log_step']
do_summary = self.json_dict['do_summary']
loss_guided = self.json_dict['loss_guided']
mask_label = self.json_dict['mask']['mask_label']
choice_mask_type = self.json_dict['mask']['mask_type']
choice_mask_ratio = self.json_dict['mask']['mask_ratio']
base_dict = dict(debug=False, grid_search=True, dir_output=dir_output)
else:
base_dict = dict(debug=self.debug, grid_search=True, dir_output=self.dir_output)
epochs = 20 if self.debug else 100
do_validation = False if self.debug else True # True, False
vali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50]
do_log = False if self.debug else True
log_step = 1
do_summary, loss_guided = False, False
mask_label = False if self.debug else False
choice_mask_type = ['rand_mask_rele']
choice_mask_ratio = [0.2]
self.eval_dict = dict(epochs=epochs, do_validation=do_validation, vali_k=vali_k, cutoffs=cutoffs,
do_log=do_log, log_step=log_step, do_summary=do_summary, loss_guided=loss_guided,
mask_label=mask_label)
self.eval_dict.update(base_dict)
if mask_label:
for mask_type, mask_ratio in product(choice_mask_type, choice_mask_ratio):
mask_dict = dict(mask_type=mask_type, mask_ratio=mask_ratio)
self.eval_dict.update(mask_dict)
yield self.eval_dict
else:
yield self.eval_dict |
Iterator of settings for evaluation when performing adversarial ltr
| grid_search | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def to_data_setting_string(self, log=False):
"""
String identifier of data-setting
:param log:
:return:
"""
data_dict = self.data_dict
s1, s2 = (':', '\n') if log else ('_', '_')
data_id, binary_rele = data_dict['data_id'], data_dict['binary_rele']
min_docs, min_rele, train_rough_batch_size, train_presort = data_dict['min_docs'], data_dict['min_rele'],\
data_dict['train_rough_batch_size'], data_dict['train_presort']
setting_string = s2.join([s1.join(['data_id', data_id]),
s1.join(['min_docs', str(min_docs)]),
s1.join(['min_rele', str(min_rele)]),
s1.join(['TrBat', str(train_rough_batch_size)])]) if log \
else s1.join([data_id, 'MiD', str(min_docs), 'MiR', str(min_rele), 'TrBat', str(train_rough_batch_size)])
if train_presort:
tr_presort_str = s1.join(['train_presort', str(train_presort)]) if log else 'TrPresort'
setting_string = s2.join([setting_string, tr_presort_str])
if binary_rele:
bi_str = s1.join(['binary_rele', str(binary_rele)]) if log else 'BiRele'
setting_string = s2.join([setting_string, bi_str])
return setting_string |
String identifier of data-setting
:param log:
:return:
| to_data_setting_string | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def default_setting(self):
"""
A default setting for data loading when performing adversarial ltr
"""
unknown_as_zero = False
binary_rele = False # using the original values
train_presort, validation_presort, test_presort = True, True, True
train_rough_batch_size, validation_rough_batch_size, test_rough_batch_size = 1, 100, 100
scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=self.data_id)
# more data settings that are rarely changed
self.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,
unknown_as_zero=unknown_as_zero, binary_rele=binary_rele, train_presort=train_presort,
validation_presort=validation_presort, test_presort=test_presort,
train_rough_batch_size=train_rough_batch_size, validation_rough_batch_size=validation_rough_batch_size,
test_rough_batch_size=test_rough_batch_size,
scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)
data_meta = get_data_meta(data_id=self.data_id) # add meta-information
if self.debug: data_meta['fold_num'] = 2
self.data_dict.update(data_meta)
return self.data_dict |
A default setting for data loading when performing adversarial ltr
| default_setting | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def grid_search(self):
"""
Iterator of settings for data loading when performing adversarial ltr
"""
if self.use_json:
scaler_id = self.json_dict['scaler_id']
choice_min_docs = self.json_dict['min_docs']
choice_min_rele = self.json_dict['min_rele']
choice_binary_rele = self.json_dict['binary_rele']
choice_unknown_as_zero = self.json_dict['unknown_as_zero']
base_data_dict = dict(data_id=self.data_id, dir_data=self.json_dict["dir_data"],
train_presort=True, test_presort=True, validation_presort=True,
train_rough_batch_size=1, validation_rough_batch_size=100, test_rough_batch_size=100)
else:
scaler_id = None
choice_min_docs = [10]
choice_min_rele = [1]
choice_binary_rele = [False]
choice_unknown_as_zero = [False]
base_data_dict = dict(data_id=self.data_id, dir_data=self.dir_data,
train_presort=True, test_presort=True, validation_presort=True,
train_rough_batch_size=1, validation_rough_batch_size=100, test_rough_batch_size=100)
data_meta = get_data_meta(data_id=self.data_id) # add meta-information
base_data_dict.update(data_meta)
scale_data, scaler_id, scaler_level = get_scaler_setting(data_id=self.data_id, scaler_id=scaler_id)
for min_docs, min_rele in product(choice_min_docs, choice_min_rele):
threshold_dict = dict(min_docs=min_docs, min_rele=min_rele)
for binary_rele, unknown_as_zero in product(choice_binary_rele, choice_unknown_as_zero):
custom_dict = dict(binary_rele=binary_rele, unknown_as_zero=unknown_as_zero)
scale_dict = dict(scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)
self.data_dict = dict()
self.data_dict.update(base_data_dict)
self.data_dict.update(threshold_dict)
self.data_dict.update(custom_dict)
self.data_dict.update(scale_dict)
yield self.data_dict |
Iterator of settings for data loading when performing adversarial ltr
| grid_search | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ad_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ad_parameter.py | MIT |
def check_consistency(self, data_dict, eval_dict, sf_para_dict):
"""
Check whether the settings are reasonable in the context of adversarial learning-to-rank
"""
''' Part-1: data loading '''
assert 1 == data_dict['train_rough_batch_size'] # the required setting w.r.t. adversarial LTR
if data_dict['data_id'] == 'Istella':
assert eval_dict['do_validation'] is not True # since there is no validation data
if data_dict['data_id'] in MSLETOR_SEMI:
assert data_dict['unknown_as_zero'] is not True # use original data
if data_dict['scale_data']:
scaler_level = data_dict['scaler_level'] if 'scaler_level' in data_dict else None
assert not scaler_level == 'DATASET' # not supported setting
assert data_dict['validation_presort'] # Rule of thumb, as validation and test data are for metric-performance
assert data_dict['test_presort'] # Rule of thumb, as validation and test data are for metric-performance
''' Part-2: evaluation setting '''
if eval_dict['mask_label']: # True is aimed to use supervised data to mimic semi-supervised data by masking
assert not data_dict['data_id'] in MSLETOR_SEMI |
Check whether the settings are reasonable in the context of adversarial learning-to-rank
| check_consistency | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ltr_adversarial.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ltr_adversarial.py | MIT |
def get_ad_machine(self, eval_dict=None, data_dict=None, sf_para_dict=None, ad_para_dict=None):
"""
Initialize the adversarial model correspondingly.
:param eval_dict:
:param data_dict:
:param sf_para_dict:
:param ad_para_dict:
:return:
"""
model_id = ad_para_dict['model_id']
if model_id in ['IRGAN_Point', 'IRGAN_Pair', 'IRGAN_List', 'IRFGAN_Point', 'IRFGAN_Pair', 'IRFGAN_List']:
ad_machine = globals()[model_id](eval_dict=eval_dict, data_dict=data_dict, gpu=self.gpu, device=self.device,
sf_para_dict=sf_para_dict, ad_para_dict=ad_para_dict)
else:
raise NotImplementedError
return ad_machine |
Initialize the adversarial model correspondingly.
:param eval_dict:
:param data_dict:
:param sf_para_dict:
:param ad_para_dict:
:return:
| get_ad_machine | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ltr_adversarial.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ltr_adversarial.py | MIT |
def ad_cv_eval(self, data_dict=None, eval_dict=None, ad_para_dict=None, sf_para_dict=None):
"""
Adversarial training and evaluation
:param data_dict:
:param eval_dict:
:param ad_para_dict:
:param sf_para_dict:
:return:
"""
self.display_information(data_dict, model_para_dict=ad_para_dict)
self.check_consistency(data_dict, eval_dict, sf_para_dict=sf_para_dict)
self.setup_eval(data_dict, eval_dict, sf_para_dict, model_para_dict=ad_para_dict)
model_id = ad_para_dict['model_id']
fold_num = data_dict['fold_num']
# for quick access of common evaluation settings
epochs, loss_guided = eval_dict['epochs'], eval_dict['loss_guided']
vali_k, log_step, cutoffs = eval_dict['vali_k'], eval_dict['log_step'], eval_dict['cutoffs']
do_vali, do_summary = eval_dict['do_validation'], eval_dict['do_summary']
ad_machine = self.get_ad_machine(eval_dict=eval_dict, data_dict=data_dict, sf_para_dict=sf_para_dict, ad_para_dict=ad_para_dict)
time_begin = datetime.datetime.now() # timing
g_l2r_cv_avg_scores, d_l2r_cv_avg_scores = np.zeros(len(cutoffs)), np.zeros(len(cutoffs)) # fold average
'''
Dataset-level buffering of frequently used information
1> e.g., number of positive documents per-query
'''
global_buffer = dict() # refresh for each model instance
for fold_k in range(1, fold_num + 1):
ad_machine.reset_generator_discriminator()
fold_optimal_checkpoint = '-'.join(['Fold', str(fold_k)])
train_data, test_data, vali_data = self.load_data(eval_dict, data_dict, fold_k)
# update due to new train_data
ad_machine.fill_global_buffer(train_data, dict_buffer=global_buffer)
if do_vali: g_fold_optimal_ndcgk, d_fold_optimal_ndcgk= 0.0, 0.0
if do_summary:
list_epoch_loss = [] # not used yet
g_list_fold_k_train_eval_track, g_list_fold_k_test_eval_track, g_list_fold_k_vali_eval_track = [], [], []
d_list_fold_k_train_eval_track, d_list_fold_k_test_eval_track, d_list_fold_k_vali_eval_track = [], [], []
for _ in range(10):
ad_machine.burn_in(train_data=train_data)
for epoch_k in range(1, epochs + 1):
if model_id == 'IR_GMAN_List':
stop_training = ad_machine.mini_max_train(train_data=train_data, generator=ad_machine.generator,
pool_discriminator=ad_machine.pool_discriminator, global_buffer=global_buffer)
g_ranker = ad_machine.get_generator()
d_ranker = ad_machine.pool_discriminator[0]
else:
stop_training = ad_machine.mini_max_train(train_data=train_data, generator=ad_machine.generator,
discriminator=ad_machine.discriminator, global_buffer=global_buffer)
g_ranker = ad_machine.get_generator()
d_ranker = ad_machine.get_discriminator()
if stop_training:
print('training is failed !')
break
if (do_summary or do_vali) and (epoch_k % log_step == 0 or epoch_k == 1): # stepwise check
if do_vali:
g_vali_eval_tmp = g_ranker.ndcg_at_k(test_data=vali_data, k=vali_k, label_type=self.data_setting.data_dict['label_type'])
d_vali_eval_tmp = d_ranker.ndcg_at_k(test_data=vali_data, k=vali_k, label_type=self.data_setting.data_dict['label_type'])
g_vali_eval_v, d_vali_eval_v = g_vali_eval_tmp.data.numpy(), d_vali_eval_tmp.data.numpy()
if epoch_k > 1:
g_buffer, g_tmp_metric_val, g_tmp_epoch = \
self.per_epoch_validation(ranker=g_ranker, curr_metric_val=g_vali_eval_v,
fold_optimal_metric_val=g_fold_optimal_ndcgk, curr_epoch=epoch_k,
id_str='G', fold_optimal_checkpoint=fold_optimal_checkpoint, epochs=epochs)
# observe better performance
if g_buffer: g_fold_optimal_ndcgk, g_fold_optimal_epoch_val = g_tmp_metric_val, g_tmp_epoch
d_buffer, d_tmp_metric_val, d_tmp_epoch = \
self.per_epoch_validation(ranker=d_ranker, curr_metric_val=d_vali_eval_v,
fold_optimal_metric_val=d_fold_optimal_ndcgk, curr_epoch=epoch_k,
id_str='D', fold_optimal_checkpoint=fold_optimal_checkpoint, epochs=epochs)
if d_buffer: d_fold_optimal_ndcgk, d_fold_optimal_epoch_val = d_tmp_metric_val, d_tmp_epoch
if do_summary: # summarize per-step performance w.r.t. train, test
self.per_epoch_summary_step1(ranker=g_ranker, train_data=train_data, test_data=test_data,
list_fold_k_train_eval_track=g_list_fold_k_train_eval_track,
list_fold_k_test_eval_track=g_list_fold_k_test_eval_track,
vali_eval_v=g_vali_eval_v,
list_fold_k_vali_eval_track=g_list_fold_k_vali_eval_track,
cutoffs=cutoffs, do_vali=do_vali)
self.per_epoch_summary_step1(ranker=d_ranker, train_data=train_data, test_data=test_data,
list_fold_k_train_eval_track=d_list_fold_k_train_eval_track,
list_fold_k_test_eval_track=d_list_fold_k_test_eval_track,
vali_eval_v=d_vali_eval_v,
list_fold_k_vali_eval_track=d_list_fold_k_vali_eval_track,
cutoffs=cutoffs, do_vali=do_vali)
if do_summary:
self.per_epoch_summary_step2(id_str='G', fold_k=fold_k,
list_fold_k_train_eval_track=g_list_fold_k_train_eval_track,
list_fold_k_test_eval_track=g_list_fold_k_test_eval_track,
do_vali=do_vali,
list_fold_k_vali_eval_track=g_list_fold_k_vali_eval_track)
self.per_epoch_summary_step2(id_str='D', fold_k=fold_k,
list_fold_k_train_eval_track=d_list_fold_k_train_eval_track,
list_fold_k_test_eval_track=d_list_fold_k_test_eval_track,
do_vali=do_vali,
list_fold_k_vali_eval_track=d_list_fold_k_vali_eval_track)
if do_vali: # using the fold-wise optimal model for later testing based on validation data #
g_buffered_model = '_'.join(['net_params_epoch', str(g_fold_optimal_epoch_val), 'G']) + '.pkl'
g_ranker.load(self.dir_run + fold_optimal_checkpoint + '/' + g_buffered_model)
g_fold_optimal_ranker = g_ranker
d_buffered_model = '_'.join(['net_params_epoch', str(d_fold_optimal_epoch_val), 'D']) + '.pkl'
d_ranker.load(self.dir_run + fold_optimal_checkpoint + '/' + d_buffered_model)
d_fold_optimal_ranker = d_ranker
else: # using default G # buffer the model after a fixed number of training-epoches if no validation is deployed
g_ranker.save(dir=self.dir_run + fold_optimal_checkpoint + '/', name='_'.join(['net_params_epoch', str(epoch_k), 'G']) + '.pkl')
g_fold_optimal_ranker = g_ranker
d_ranker.save(dir=self.dir_run + fold_optimal_checkpoint + '/', name='_'.join(['net_params_epoch', str(epoch_k), 'D']) + '.pkl')
d_fold_optimal_ranker = d_ranker
g_torch_fold_ndcg_ks = g_fold_optimal_ranker.ndcg_at_ks(test_data=test_data, ks=cutoffs, label_type=self.data_setting.data_dict['label_type'])
g_fold_ndcg_ks = g_torch_fold_ndcg_ks.data.numpy()
d_torch_fold_ndcg_ks = d_fold_optimal_ranker.ndcg_at_ks(test_data=test_data, ks=cutoffs, label_type=self.data_setting.data_dict['label_type'])
d_fold_ndcg_ks = d_torch_fold_ndcg_ks.data.numpy()
performance_list = [' Fold-' + str(fold_k)] # fold-wise performance
performance_list.append('Generator')
for i, co in enumerate(cutoffs):
performance_list.append('nDCG@{}:{:.4f}'.format(co, g_fold_ndcg_ks[i]))
performance_list.append('\nDiscriminator')
for i, co in enumerate(cutoffs):
performance_list.append('nDCG@{}:{:.4f}'.format(co, d_fold_ndcg_ks[i]))
performance_str = '\t'.join(performance_list)
print('\t', performance_str)
g_l2r_cv_avg_scores = np.add(g_l2r_cv_avg_scores, g_fold_ndcg_ks) # sum for later cv-performance
d_l2r_cv_avg_scores = np.add(d_l2r_cv_avg_scores, d_fold_ndcg_ks)
time_end = datetime.datetime.now() # overall timing
elapsed_time_str = str(time_end - time_begin)
print('Elapsed time:\t', elapsed_time_str + "\n\n")
# begin to print either cv or average performance
g_l2r_cv_avg_scores = np.divide(g_l2r_cv_avg_scores, fold_num)
d_l2r_cv_avg_scores = np.divide(d_l2r_cv_avg_scores, fold_num)
if do_vali:
eval_prefix = str(fold_num) + '-fold cross validation scores:'
else:
eval_prefix = str(fold_num) + '-fold average scores:'
print('Generator', eval_prefix, metric_results_to_string(list_scores=g_l2r_cv_avg_scores, list_cutoffs=cutoffs))
print('Discriminator', eval_prefix, metric_results_to_string(list_scores=d_l2r_cv_avg_scores, list_cutoffs=cutoffs)) |
Adversarial training and evaluation
:param data_dict:
:param eval_dict:
:param ad_para_dict:
:param sf_para_dict:
:return:
| ad_cv_eval | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ltr_adversarial.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ltr_adversarial.py | MIT |
def grid_run(self, debug=True, model_id=None, data_id=None, dir_data=None, dir_output=None, dir_json=None):
"""
Perform adversarial learning-to-rank based on grid search of optimal parameter setting
"""
if dir_json is not None:
ad_data_eval_sf_json = dir_json + 'Ad_Data_Eval_ScoringFunction.json'
para_json = dir_json + model_id + "Parameter.json"
self.set_eval_setting(debug=debug, ad_eval_json=ad_data_eval_sf_json)
self.set_data_setting(ad_data_json=ad_data_eval_sf_json)
self.set_scoring_function_setting(sf_json=ad_data_eval_sf_json)
self.set_model_setting(model_id=model_id, para_json=para_json)
else:
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
self.set_scoring_function_setting(debug=debug)
self.set_model_setting(debug=debug, model_id=model_id)
for data_dict in self.iterate_data_setting():
for eval_dict in self.iterate_eval_setting():
for sf_para_dict in self.iterate_scoring_function_setting():
for ad_para_dict in self.iterate_model_setting():
self.ad_cv_eval(data_dict=data_dict, eval_dict=eval_dict,
sf_para_dict=sf_para_dict, ad_para_dict=ad_para_dict) |
Perform adversarial learning-to-rank based on grid search of optimal parameter setting
| grid_run | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ltr_adversarial.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ltr_adversarial.py | MIT |
def point_run(self, debug=False, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None):
"""
:param debug:
:param model_id:
:param data_id:
:param dir_data:
:param dir_output:
:return:
"""
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
data_dict = self.get_default_data_setting()
eval_dict = self.get_default_eval_setting()
self.set_scoring_function_setting(debug=debug, sf_id=sf_id)
sf_para_dict = self.get_default_scoring_function_setting()
self.set_model_setting(debug=debug, model_id=model_id)
ad_model_para_dict = self.get_default_model_setting()
self.ad_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
ad_para_dict=ad_model_para_dict) |
:param debug:
:param model_id:
:param data_id:
:param dir_data:
:param dir_output:
:return:
| point_run | python | wildltr/ptranking | ptranking/ltr_adversarial/eval/ltr_adversarial.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/eval/ltr_adversarial.py | MIT |
def __init__(self, eval_dict, data_dict, sf_para_dict=None, ad_para_dict=None, optimal_train=False, gpu=False, device=None):
'''
:param optimal_train: training with supervised generator or discriminator
'''
super(IRFGAN_List, self).__init__(eval_dict=eval_dict, data_dict=data_dict, gpu=gpu, device=device)
#sf_para_dict['ffnns']['apply_tl_af'] = True # todo to be compared
g_sf_para_dict = sf_para_dict
d_sf_para_dict = copy.deepcopy(g_sf_para_dict)
d_sf_para_dict[sf_para_dict['sf_id']]['apply_tl_af'] = True
d_sf_para_dict[sf_para_dict['sf_id']]['TL_AF'] = 'S'
self.generator = List_Generator(sf_para_dict=g_sf_para_dict, gpu=gpu, device=device)
self.discriminator = List_Discriminator(sf_para_dict=d_sf_para_dict, gpu=gpu, device=device)
self.pre_check()
self.top_k = ad_para_dict['top_k']
self.f_div_id = ad_para_dict['f_div_id']
self.d_epoches = ad_para_dict['d_epoches']
self.g_epoches = ad_para_dict['g_epoches']
self.temperature = ad_para_dict['temperature']
self.ad_training_order = ad_para_dict['ad_training_order']
self.samples_per_query = ad_para_dict['samples_per_query']
self.PL_discriminator = ad_para_dict['PL_D']
self.replace_trick_4_generator = ad_para_dict['repTrick']
self.drop_discriminator_log_4_reward = ad_para_dict['dropLog']
self.optimal_train = optimal_train
if optimal_train:
self.super_generator = List_Generator(sf_para_dict=g_sf_para_dict)
self.super_discriminator = List_Discriminator(sf_para_dict=d_sf_para_dict)
self.activation_f, self.conjugate_f = get_f_divergence_functions(self.f_div_id) |
:param optimal_train: training with supervised generator or discriminator
| __init__ | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irfgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irfgan_list.py | MIT |
def per_query_generation(self, qid=None, batch_ranking=None, batch_label=None, pos_and_neg=None, generator=None,
samples_per_query=None, top_k=None, temperature=None):
'''
:param pos_and_neg: corresponding to discriminator optimization or generator optimization
'''
generator.eval_mode()
g_batch_pred = generator.predict(batch_ranking) # [batch, size_ranking]
batch_gen_stochastic_prob = gumbel_softmax(g_batch_pred, samples_per_query=samples_per_query, temperature=temperature, cuda=self.gpu, cuda_device=self.device)
sorted_batch_gen_stochastic_probs, batch_gen_sto_sorted_inds = torch.sort(batch_gen_stochastic_prob, dim=1, descending=True)
if pos_and_neg: # for training discriminator
used_batch_label = batch_label
# Generate truth rankings by shuffling ties:
'''
There is not need to firstly filter out documents of '-1', due to the descending sorting and we only use the top ones
BTW, the only required condition is: the number of non-minus-one documents is larger than top_k, which builds upon the customized mask_data()
'''
per_query_label = torch.squeeze(used_batch_label)
list_std_sto_sorted_inds = []
for i in range(samples_per_query):
shuffle_ties_inds = arg_shuffle_ties(per_query_label, descending=True)
list_std_sto_sorted_inds.append(shuffle_ties_inds)
batch_std_sto_sorted_inds = torch.stack(list_std_sto_sorted_inds, dim=0)
list_pos_ranking, list_neg_ranking = [], []
if top_k is None: # using all documents
for i in range(samples_per_query):
pos_inds = batch_std_sto_sorted_inds[i, :]
pos_ranking = batch_ranking[0, pos_inds, :]
list_pos_ranking.append(pos_ranking)
neg_inds = batch_gen_sto_sorted_inds[i, :]
neg_ranking = batch_ranking[0, neg_inds, :]
list_neg_ranking.append(neg_ranking)
else:
for i in range(samples_per_query):
pos_inds = batch_std_sto_sorted_inds[i, 0:top_k]
pos_ranking = batch_ranking[0, pos_inds, :] # sampled sublist of documents
list_pos_ranking.append(pos_ranking)
neg_inds = batch_gen_sto_sorted_inds[i, 0:top_k]
neg_ranking = batch_ranking[0, neg_inds, :]
list_neg_ranking.append(neg_ranking)
batch_std_sample_ranking = torch.stack(list_pos_ranking, dim=0)
batch_gen_sample_ranking = torch.stack(list_neg_ranking, dim=0)
return batch_std_sample_ranking, batch_gen_sample_ranking
else: # for training generator
if top_k is None:
return sorted_batch_gen_stochastic_probs, batch_gen_sto_sorted_inds
else:
list_g_sort_top_preds, list_g_sort_top_inds = [], [] # required to cope with ranking_size mismatch
for i in range(samples_per_query):
neg_inds = batch_gen_sto_sorted_inds[i, 0:top_k]
list_g_sort_top_inds.append(neg_inds)
top_gen_stochastic_probs = sorted_batch_gen_stochastic_probs[i, 0:top_k]
list_g_sort_top_preds.append(top_gen_stochastic_probs)
top_sorted_batch_gen_stochastic_probs = torch.stack(list_g_sort_top_preds, dim=0)
return top_sorted_batch_gen_stochastic_probs, list_g_sort_top_inds |
:param pos_and_neg: corresponding to discriminator optimization or generator optimization
| per_query_generation | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irfgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irfgan_list.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
f_div_id = ad_para_dict['f_div_id']
d_epoches, g_epoches, temperature, ad_training_order = ad_para_dict['d_epoches'], ad_para_dict['g_epoches'],\
ad_para_dict['temperature'], ad_para_dict['ad_training_order']
prefix = s1.join([str(d_epoches), str(g_epoches), '{:,g}'.format(temperature), ad_training_order, f_div_id])
top_k, PL_D, repTrick, dropLog = ad_para_dict['top_k'], ad_para_dict['PL_D'], ad_para_dict['repTrick'], \
ad_para_dict['dropLog']
top_k_str = 'topAll' if top_k is None else 'top' + str(top_k)
s_str = 'S' + str(ad_para_dict['samples_per_query'])
df_str = 'PLD' if PL_D else 'BTD'
prefix = s1.join([prefix, top_k_str, s_str, df_str])
if repTrick: prefix += '_Rep'
if dropLog: prefix += '_DropLog'
list_irfgan_paras_str = prefix
return list_irfgan_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irfgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irfgan_list.py | MIT |
def __init__(self, eval_dict, data_dict, sf_para_dict=None, ad_para_dict=None, optimal_train=False, gpu=False, device=None):
'''
:param optimal_train: training with supervised generator or discriminator
'''
super(IRGAN_List, self).__init__(eval_dict=eval_dict, data_dict=data_dict, gpu=gpu, device=device)
g_sf_para_dict = sf_para_dict
# todo support all setting based Nan checking
d_sf_para_dict = copy.deepcopy(g_sf_para_dict)
d_sf_para_dict[sf_para_dict['sf_id']]['apply_tl_af'] = True
d_sf_para_dict[sf_para_dict['sf_id']]['TL_AF'] = 'S'
self.generator = List_Generator(sf_para_dict=g_sf_para_dict, gpu=gpu, device=device)
self.discriminator = List_Discriminator(sf_para_dict=d_sf_para_dict, gpu=gpu, device=device)
self.super_generator = List_Generator(sf_para_dict=g_sf_para_dict, gpu=gpu, device=device)
self.super_discriminator = List_Discriminator(sf_para_dict=d_sf_para_dict, gpu=gpu, device=device)
self.top_k = ad_para_dict['top_k']
self.d_epoches = ad_para_dict['d_epoches']
self.g_epoches = ad_para_dict['g_epoches']
self.temperature = ad_para_dict['temperature']
self.ad_training_order = ad_para_dict['ad_training_order']
self.samples_per_query = ad_para_dict['samples_per_query']
self.PL_discriminator = ad_para_dict['PL_D']
self.replace_trick_4_generator = ad_para_dict['repTrick']
self.drop_discriminator_log_4_reward = ad_para_dict['dropLog']
self.optimal_train = optimal_train
self.pre_check() |
:param optimal_train: training with supervised generator or discriminator
| __init__ | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irgan_list.py | MIT |
def per_query_generation(self, qid=None, batch_ranking=None, batch_label=None, pos_and_neg=None, generator=None,
samples_per_query=None, top_k=None, temperature=None):
'''
:param pos_and_neg: corresponding to discriminator optimization or generator optimization
'''
g_batch_pred = generator.predict(batch_ranking) # [batch, size_ranking]
batch_gen_stochastic_prob = gumbel_softmax(g_batch_pred, samples_per_query=samples_per_query, temperature=temperature, cuda=self.gpu, cuda_device=self.device)
sorted_batch_gen_stochastic_probs, batch_gen_sto_sorted_inds = torch.sort(batch_gen_stochastic_prob, dim=1, descending=True)
if pos_and_neg: # for training discriminator
used_batch_label = batch_label
''' Generate truth-ranking based on shuffling ties
There is not need to firstly filter out documents of '-1', due to the descending sorting and we only use the top ones
BTW, the only required condition is: the number of non-minus-one documents is larger than top_k, which builds upon the customized mask_data()
'''
per_query_label = torch.squeeze(used_batch_label)
list_std_sto_sorted_inds = []
for i in range(samples_per_query):
shuffle_ties_inds = arg_shuffle_ties(per_query_label, descending=True)
list_std_sto_sorted_inds.append(shuffle_ties_inds)
batch_std_sto_sorted_inds = torch.stack(list_std_sto_sorted_inds, dim=0)
list_pos_ranking, list_neg_ranking = [], []
if top_k is None: # using all documents
for i in range(samples_per_query):
pos_inds = batch_std_sto_sorted_inds[i, :]
pos_ranking = batch_ranking[0, pos_inds, :]
list_pos_ranking.append(pos_ranking)
neg_inds = batch_gen_sto_sorted_inds[i, :]
neg_ranking = batch_ranking[0, neg_inds, :]
list_neg_ranking.append(neg_ranking)
else:
for i in range(samples_per_query):
pos_inds = batch_std_sto_sorted_inds[i, 0:top_k]
pos_ranking = batch_ranking[0, pos_inds, :] # sampled sublist of documents
list_pos_ranking.append(pos_ranking)
neg_inds = batch_gen_sto_sorted_inds[i, 0:top_k]
neg_ranking = batch_ranking[0, neg_inds, :]
list_neg_ranking.append(neg_ranking)
batch_std_sample_ranking = torch.stack(list_pos_ranking, dim=0)
batch_gen_sample_ranking = torch.stack(list_neg_ranking, dim=0)
return batch_std_sample_ranking, batch_gen_sample_ranking
else: # for training generator
if top_k is None:
return sorted_batch_gen_stochastic_probs, batch_gen_sto_sorted_inds
else:
list_g_sort_top_preds, list_g_sort_top_inds = [], [] # required to cope with ranking_size mismatch
for i in range(samples_per_query):
neg_inds = batch_gen_sto_sorted_inds[i, 0:top_k]
list_g_sort_top_inds.append(neg_inds)
top_gen_stochastic_probs = sorted_batch_gen_stochastic_probs[i, 0:top_k]
list_g_sort_top_preds.append(top_gen_stochastic_probs)
top_sorted_batch_gen_stochastic_probs = torch.stack(list_g_sort_top_preds, dim=0)
return top_sorted_batch_gen_stochastic_probs, list_g_sort_top_inds |
:param pos_and_neg: corresponding to discriminator optimization or generator optimization
| per_query_generation | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irgan_list.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
d_epoches, g_epoches, temperature, ad_training_order = ad_para_dict['d_epoches'], ad_para_dict['g_epoches'],\
ad_para_dict['temperature'], ad_para_dict['ad_training_order']
prefix = s1.join([str(d_epoches), str(g_epoches), '{:,g}'.format(temperature), ad_training_order])
top_k, PL_D, repTrick, dropLog = ad_para_dict['top_k'], ad_para_dict['PL_D'], ad_para_dict['repTrick'], \
ad_para_dict['dropLog']
top_k_str = 'topAll' if top_k is None else 'top' + str(top_k)
s_str = 'S' + str(ad_para_dict['samples_per_query'])
df_str = 'PLD' if PL_D else 'BTD'
prefix = s1.join([prefix, top_k_str, s_str, df_str])
if repTrick: prefix += '_Rep'
if dropLog: prefix += '_DropLog'
list_irgan_paras_str = prefix
return list_irgan_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/listwise/irgan_list.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/listwise/irgan_list.py | MIT |
def fill_global_buffer(self, train_data, dict_buffer=None):
''' Buffer the number of positive documents, and the number of non-positive documents per query '''
assert self.data_dict['train_presort'] is True # this is required for efficient truth exampling
if self.data_dict['data_id'] in MSLETOR_SEMI:
for entry in train_data:
qid, _, batch_label = entry[0][0], entry[1], entry[2]
if not qid in dict_buffer:
pos_boolean_mat = torch.gt(batch_label, 0)
num_pos = torch.sum(pos_boolean_mat)
explicit_boolean_mat = torch.ge(batch_label, 0)
num_explicit = torch.sum(explicit_boolean_mat)
ranking_size = batch_label.size(1)
num_neg_unk = ranking_size - num_pos
num_unk = ranking_size - num_explicit
num_unique_labels = torch.unique(batch_label).size(0)
dict_buffer[qid] = (num_pos, num_explicit, num_neg_unk, num_unk, num_unique_labels)
else:
for entry in train_data:
qid, _, batch_label = entry[0][0], entry[1], entry[2]
if not qid in dict_buffer:
pos_boolean_mat = torch.gt(batch_label, 0)
num_pos = torch.sum(pos_boolean_mat)
ranking_size = batch_label.size(1)
num_explicit = ranking_size
num_neg_unk = ranking_size - num_pos
num_unk = 0
num_unique_labels = torch.unique(batch_label).size(0)
dict_buffer[qid] = (num_pos, num_explicit, num_neg_unk, num_unk, num_unique_labels) | Buffer the number of positive documents, and the number of non-positive documents per query | fill_global_buffer | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irfgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irfgan_pair.py | MIT |
def mini_max_train(self, train_data=None, generator=None, discriminator=None, global_buffer=None):
'''
Here it can not use the way of training like irgan-pair (still relying on single documents rather thank pairs),
since ir-fgan requires to sample with two distributions.
'''
stop_training = self.train_discriminator_generator_single_step(train_data=train_data, generator=generator,
discriminator=discriminator, global_buffer=global_buffer)
return stop_training |
Here it can not use the way of training like irgan-pair (still relying on single documents rather thank pairs),
since ir-fgan requires to sample with two distributions.
| mini_max_train | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irfgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irfgan_pair.py | MIT |
def train_discriminator_generator_single_step(self, train_data=None, generator=None, discriminator=None,
global_buffer=None):
''' Train both discriminator and generator with a single step per query '''
stop_training = False
generator.train_mode()
for entry in train_data:
qid, batch_ranking, batch_label = entry[0][0], entry[1], entry[2]
if self.gpu: batch_ranking = batch_ranking.type(self.tensor)
sorted_std_labels = torch.squeeze(batch_label, dim=0)
num_pos, num_explicit, num_neg_unk, num_unk, num_unique_labels = global_buffer[qid]
if num_unique_labels <2: # check unique values, say all [1, 1, 1] generates no pairs
continue
true_head_inds, true_tail_inds = generate_true_pairs(qid=qid, sorted_std_labels=sorted_std_labels,
num_pairs=self.samples_per_query, dict_diff=self.dict_diff, global_buffer=global_buffer)
batch_preds = generator.predict(batch_ranking) # [batch, size_ranking]
# todo determine how to activation
point_preds = torch.squeeze(batch_preds)
if torch.isnan(point_preds).any():
print('Including NaN error.')
stop_training = True
return stop_training
#--generate samples
if 'BT' == self.g_key:
mat_diffs = torch.unsqueeze(point_preds, dim=1) - torch.unsqueeze(point_preds, dim=0)
mat_bt_probs = torch.sigmoid(mat_diffs) # default delta=1.0
fake_head_inds, fake_tail_inds = sample_points_Bernoulli(mat_bt_probs, num_pairs=self.samples_per_query)
else:
raise NotImplementedError
#--
# real data and generated data
true_head_docs = batch_ranking[:, true_head_inds, :]
true_tail_docs = batch_ranking[:, true_tail_inds, :]
fake_head_docs = batch_ranking[:, fake_head_inds, :]
fake_tail_docs = batch_ranking[:, fake_tail_inds, :]
''' optimize discriminator '''
discriminator.train_mode()
true_head_preds = discriminator.predict(true_head_docs)
true_tail_preds = discriminator.predict(true_tail_docs)
true_preds = true_head_preds - true_tail_preds
fake_head_preds = discriminator.predict(fake_head_docs)
fake_tail_preds = discriminator.predict(fake_tail_docs)
fake_preds = fake_head_preds - fake_tail_preds
dis_loss = torch.mean(self.conjugate_f(self.activation_f(fake_preds))) - torch.mean(self.activation_f(true_preds)) # objective to minimize w.r.t. discriminator
discriminator.optimizer.zero_grad()
dis_loss.backward()
discriminator.optimizer.step()
''' optimize generator ''' #
discriminator.eval_mode()
d_fake_head_preds = discriminator.predict(fake_head_docs)
d_fake_tail_preds = discriminator.predict(fake_tail_docs)
d_fake_preds = self.conjugate_f(self.activation_f(d_fake_head_preds - d_fake_tail_preds))
if 'BT' == self.g_key:
log_g_probs = torch.log(mat_bt_probs[fake_head_inds, fake_tail_inds].view(1, -1))
else:
raise NotImplementedError
g_batch_loss = -torch.mean(log_g_probs * d_fake_preds)
generator.optimizer.zero_grad()
g_batch_loss.backward()
generator.optimizer.step()
# after iteration ove train_data
return stop_training | Train both discriminator and generator with a single step per query | train_discriminator_generator_single_step | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irfgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irfgan_pair.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
f_div_id = ad_para_dict['f_div_id']
pair_irfgan_paras_str = f_div_id
return pair_irfgan_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irfgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irfgan_pair.py | MIT |
def __init__(self, eval_dict, data_dict, sf_para_dict=None, ad_para_dict=None, gpu=False, device=None):
'''
:param sf_para_dict:
:param temperature: according to the description around Eq-10, temperature is deployed, while it is not used within the provided code
'''
super(IRGAN_Pair, self).__init__(eval_dict=eval_dict, data_dict=data_dict, gpu=gpu, device=device)
self.torch_one = torch.tensor([1.0], device=self.device)
self.torch_zero = torch.tensor([0.0], device=self.device)
sf_para_dict[sf_para_dict['sf_id']]['apply_tl_af'] = True
g_sf_para_dict = sf_para_dict
d_sf_para_dict = copy.deepcopy(g_sf_para_dict)
d_sf_para_dict[sf_para_dict['sf_id']]['apply_tl_af'] = False
#d_sf_para_dict['ffnns']['TL_AF'] = 'S' # as required by the IRGAN model
self.generator = IRGAN_Pair_Generator(sf_para_dict=g_sf_para_dict, temperature=ad_para_dict['temperature'], gpu=gpu, device=device)
self.discriminator = IRGAN_Pair_Discriminator(sf_para_dict=d_sf_para_dict, gpu=gpu, device=device)
self.loss_type = ad_para_dict['loss_type']
self.d_epoches = ad_para_dict['d_epoches']
self.g_epoches = ad_para_dict['g_epoches']
self.temperature = ad_para_dict['temperature']
self.ad_training_order = ad_para_dict['ad_training_order']
self.samples_per_query = ad_para_dict['samples_per_query'] |
:param sf_para_dict:
:param temperature: according to the description around Eq-10, temperature is deployed, while it is not used within the provided code
| __init__ | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irgan_pair.py | MIT |
def fill_global_buffer(self, train_data, dict_buffer=None):
''' Buffer the number of positive documents, and the number of non-positive documents per query '''
assert self.data_dict['train_presort'] is True # this is required for efficient truth exampling
for entry in train_data:
qid, _, batch_label = entry[0], entry[1], entry[2]
if not qid in dict_buffer:
boolean_mat = torch.gt(batch_label, 0)
num_pos = torch.sum(boolean_mat)
ranking_size = batch_label.size(1)
num_neg_unk = ranking_size - num_pos
dict_buffer[qid] = (num_pos, num_neg_unk) | Buffer the number of positive documents, and the number of non-positive documents per query | fill_global_buffer | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irgan_pair.py | MIT |
def generate_data(self, train_data=None, generator=None, global_buffer=None):
'''
Sampling for training discriminator
This is a re-implementation as the released irgan-tensorflow, but it seems that this part of irgan-tensorflow
is not consistent with the discription of the paper (i.e., the description below Eq. 7)
'''
generator.eval_mode()
generated_data = dict()
for entry in train_data:
qid, batch_ranking, batch_label = entry[0], entry[1], entry[2]
if self.gpu: batch_ranking = batch_ranking.to(self.device)
samples = self.per_query_generation(qid=qid, batch_ranking=batch_ranking, generator=generator,
global_buffer=global_buffer)
if samples is not None: generated_data[qid] = samples
return generated_data |
Sampling for training discriminator
This is a re-implementation as the released irgan-tensorflow, but it seems that this part of irgan-tensorflow
is not consistent with the discription of the paper (i.e., the description below Eq. 7)
| generate_data | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irgan_pair.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
d_epoches, g_epoches, temperature, ad_training_order, loss_type, samples_per_query = \
ad_para_dict['d_epoches'], ad_para_dict['g_epoches'], ad_para_dict['temperature'],\
ad_para_dict['ad_training_order'], ad_para_dict['loss_type'], ad_para_dict['samples_per_query']
pair_irgan_paras_str = s1.join([str(d_epoches), str(g_epoches), '{:,g}'.format(temperature),
ad_training_order, loss_type, str(samples_per_query)])
return pair_irgan_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/pairwise/irgan_pair.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pairwise/irgan_pair.py | MIT |
def fill_global_buffer(self, train_data, dict_buffer=None):
''' Buffer the number of positive documents per query '''
assert self.data_dict['train_presort'] is True # this is required for efficient truth exampling
for entry in train_data:
qid, _, batch_label = entry[0], entry[1], entry[2]
if not qid in dict_buffer:
boolean_mat = torch.gt(batch_label, 0)
num_pos = torch.sum(boolean_mat) # number of positive documents
dict_buffer[qid] = num_pos | Buffer the number of positive documents per query | fill_global_buffer | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irfgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irfgan_point.py | MIT |
def train_discriminator_generator_single_step(self, train_data=None, generator=None, discriminator=None,
global_buffer=None):
''' Train both discriminator and generator with a single step per query '''
generator.train_mode()
for entry in train_data:
qid, batch_ranking, batch_label = entry[0], entry[1], entry[2]
if self.gpu: batch_ranking = batch_ranking.to(self.device)
num_pos = global_buffer[qid]
if num_pos < 1: continue
valid_num = min(num_pos, self.samples_per_query)
true_inds = torch.randperm(num_pos)[0:valid_num] # randomly select positive documents
batch_preds = generator.predict(batch_ranking) # [batch, size_ranking]
pred_probs = F.softmax(torch.squeeze(batch_preds), dim=0)
if torch.isnan(pred_probs).any():
stop_training = True
return stop_training
fake_inds = torch.multinomial(pred_probs, valid_num, replacement=False)
#real data and generated data
true_docs = batch_ranking[0, true_inds, :]
fake_docs = batch_ranking[0, fake_inds, :]
true_docs = torch.unsqueeze(true_docs, dim=0)
fake_docs = torch.unsqueeze(fake_docs, dim=0)
''' optimize discriminator '''
discriminator.train_mode()
true_preds = discriminator.predict(true_docs)
fake_preds = discriminator.predict(fake_docs)
dis_loss = torch.mean(self.conjugate_f(self.activation_f(fake_preds))) - torch.mean(self.activation_f(true_preds)) # objective to minimize w.r.t. discriminator
discriminator.optimizer.zero_grad()
dis_loss.backward()
discriminator.optimizer.step()
''' optimize generator ''' #
discriminator.eval_mode()
d_fake_preds = discriminator.predict(fake_docs)
d_fake_preds = self.conjugate_f(self.activation_f(d_fake_preds))
ger_loss = -torch.mean((torch.log(pred_probs[fake_inds]) * d_fake_preds))
generator.optimizer.zero_grad()
ger_loss.backward()
generator.optimizer.step()
stop_training = False
return stop_training | Train both discriminator and generator with a single step per query | train_discriminator_generator_single_step | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irfgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irfgan_point.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
f_div_id = ad_para_dict['f_div_id']
point_irfgan_paras_str = f_div_id
return point_irfgan_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irfgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irfgan_point.py | MIT |
def __init__(self, eval_dict, data_dict, sf_para_dict=None, ad_para_dict=None, gpu=False, device=None):
'''
:param ad_training_order: really matters, DG is preferred than GD
'''
super(IRGAN_Point, self).__init__(eval_dict=eval_dict, data_dict=data_dict, gpu=gpu, device=device)
''' required final layer setting for Point_IR_GAN '''
# the setting of 'apply_tl_af=False' is due to the later application of softmax function w.r.t. all documents
# TODO experiments show it is quite important to be True, otherwise will be nan issues.
assert sf_para_dict[sf_para_dict['sf_id']]['apply_tl_af'] == True # local assignment affects the grid-evaluation
g_sf_para_dict = sf_para_dict
d_sf_para_dict = copy.deepcopy(g_sf_para_dict)
#d_sf_para_dict['ffnns']['apply_tl_af'] = True
d_sf_para_dict[sf_para_dict['sf_id']]['TL_AF'] = 'S' # as required by the IRGAN model
self.generator = IRGAN_Point_Generator(sf_para_dict=g_sf_para_dict, temperature=ad_para_dict['temperature'], gpu=gpu, device=device)
self.discriminator = IRGAN_Point_Discriminator(sf_para_dict=d_sf_para_dict, gpu=gpu, device=device)
self.d_epoches = ad_para_dict['d_epoches']
self.g_epoches = ad_para_dict['g_epoches']
self.temperature = ad_para_dict['temperature']
self.ad_training_order = ad_para_dict['ad_training_order']
self.samples_per_query = ad_para_dict['samples_per_query'] |
:param ad_training_order: really matters, DG is preferred than GD
| __init__ | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irgan_point.py | MIT |
def fill_global_buffer(self, train_data, dict_buffer=None):
''' Buffer the number of positive documents per query '''
assert self.data_dict['train_presort'] is True # this is required for efficient truth exampling
for entry in train_data:
qid, _, batch_label = entry[0], entry[1], entry[2]
if not qid in dict_buffer:
boolean_mat = torch.gt(batch_label, 0)
num_pos = torch.sum(boolean_mat) # number of positive documents
dict_buffer[qid] = num_pos | Buffer the number of positive documents per query | fill_global_buffer | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irgan_point.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
"""
# using specified para-dict or inner para-dict
ad_para_dict = given_para_dict if given_para_dict is not None else self.ad_para_dict
s1 = ':' if log else '_'
d_epoches, g_epoches, temperature, ad_training_order, samples_per_query = ad_para_dict['d_epoches'],\
ad_para_dict['g_epoches'], ad_para_dict['temperature'],\
ad_para_dict['ad_training_order'], ad_para_dict['samples_per_query']
irgan_point_paras_str = s1.join([str(d_epoches), str(g_epoches), '{:,g}'.format(temperature),
ad_training_order, str(samples_per_query)])
return irgan_point_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
| to_para_string | python | wildltr/ptranking | ptranking/ltr_adversarial/pointwise/irgan_point.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/pointwise/irgan_point.py | MIT |
def get_f_divergence_functions(f_div_str=None):
'''
the activation function is chosen as a monotone increasing function
'''
if 'TVar' == f_div_str: # Total variation
def activation_f(v):
return 0.5 * torch.tanh(v)
def conjugate_f(t):
return t
elif 'KL' == f_div_str: # Kullback-Leibler
def activation_f(v):
return v
def conjugate_f(t):
return torch.exp(t-1)
elif 'RKL' == f_div_str: # Reverse KL
def activation_f(v):
return -torch.exp(-v)
def conjugate_f(t):
return -1.0 - torch.log(-t)
elif 'PC' == f_div_str: # Pearson chi-square
def activation_f(v):
return v
def conjugate_f(t):
return 0.25 * torch.pow(t, exponent=2.0) + t
elif 'NC' == f_div_str: # Neyman chi-square
def activation_f(v):
return 1.0 - torch.exp(-v)
def conjugate_f(t):
return 2.0 - 2.0 * torch.sqrt(1.0-t)
elif 'SH' == f_div_str: # Squared Hellinger
def activation_f(v):
return 1.0 - torch.exp(-v)
def conjugate_f(t):
return t/(1.0-t)
elif 'JS' == f_div_str: # Jensen-Shannon
def activation_f(v):
return torch.log(torch.tensor(2.0)) - torch.log(1.0 + torch.exp(-v))
def conjugate_f(t):
return -torch.log(2.0 - torch.exp(t))
elif 'JSW' == f_div_str: # Jensen-Shannon-weighted
def activation_f(v):
return -math.pi*torch.log(math.pi) - torch.log(1.0+torch.exp(-v))
def conjugate_f(t):
return (1.0-math.pi)*torch.log((1.0-math.pi)/(1.0-math.pi*torch.exp(t/math.pi)))
elif 'GAN' == f_div_str: # GAN
def activation_f(v):
return -torch.log(1.0 + torch.exp(-v))
def conjugate_f(t):
return -torch.log(1.0 - torch.exp(t))
return activation_f, conjugate_f |
the activation function is chosen as a monotone increasing function
| get_f_divergence_functions | python | wildltr/ptranking | ptranking/ltr_adversarial/util/f_divergence.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/f_divergence.py | MIT |
def gumbel_softmax(logits, samples_per_query, temperature=1.0, cuda=False, cuda_device=None):
'''
:param logits: [1, ranking_size]
:param num_samples_per_query: number of stochastic rankings to generate
:param temperature:
:return:
'''
assert 1 == logits.size(0) and 2 == len(logits.size())
unif = torch.rand(samples_per_query, logits.size(1)) # [num_samples_per_query, ranking_size]
if cuda: unif = unif.to(cuda_device)
gumbel = -torch.log(-torch.log(unif + EPS) + EPS) # Sample from gumbel distribution
logit = (logits + gumbel) / temperature
y = F.softmax(logit, dim=1)
# i.e., #return F.softmax(logit, dim=1)
return y |
:param logits: [1, ranking_size]
:param num_samples_per_query: number of stochastic rankings to generate
:param temperature:
:return:
| gumbel_softmax | python | wildltr/ptranking | ptranking/ltr_adversarial/util/list_sampling.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/list_sampling.py | MIT |
def sample_ranking_PL_gumbel_softmax(batch_preds, num_sample_ranking=1, only_indices=True, temperature=1.0, gpu=False, device=None):
'''
Sample a ranking based stochastic Plackett-Luce model, where gumble noise is added
@param batch_preds: [1, ranking_size] vector of relevance predictions for documents associated with the same query
@param num_sample_ranking: number of rankings to sample
@param only_indices: only return the indices or not
@return:
'''
if num_sample_ranking > 1:
target_batch_preds = batch_preds.expand(num_sample_ranking, -1)
else:
target_batch_preds = batch_preds
unif = torch.rand(target_batch_preds.size()) # [num_samples_per_query, ranking_size]
if gpu: unif = unif.to(device)
gumbel = -torch.log(-torch.log(unif + EPS) + EPS) # Sample from gumbel distribution
if only_indices:
batch_logits = target_batch_preds + gumbel
_, batch_indices = torch.sort(batch_logits, dim=1, descending=True)
return batch_indices
else:
if 1.0 == temperature:
batch_logits = target_batch_preds + gumbel
else:
batch_logits = (target_batch_preds + gumbel) / temperature
batch_logits_sorted, batch_indices = torch.sort(batch_logits, dim=1, descending=True)
return batch_indices, batch_logits_sorted |
Sample a ranking based stochastic Plackett-Luce model, where gumble noise is added
@param batch_preds: [1, ranking_size] vector of relevance predictions for documents associated with the same query
@param num_sample_ranking: number of rankings to sample
@param only_indices: only return the indices or not
@return:
| sample_ranking_PL_gumbel_softmax | python | wildltr/ptranking | ptranking/ltr_adversarial/util/list_sampling.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/list_sampling.py | MIT |
def arg_shuffle_ties(target_batch_stds, descending=True, gpu=False, device=None):
''' Shuffle ties, and return the corresponding indice '''
batch_size, ranking_size = target_batch_stds.size()
if batch_size > 1:
list_rperms = []
for _ in range(batch_size):
list_rperms.append(torch.randperm(ranking_size))
batch_rperms = torch.stack(list_rperms, dim=0)
else:
batch_rperms = torch.randperm(ranking_size).view(1, -1)
if gpu: batch_rperms = batch_rperms.to(device)
shuffled_target_batch_stds = torch.gather(target_batch_stds, dim=1, index=batch_rperms)
batch_sorted_inds = torch.argsort(shuffled_target_batch_stds, descending=descending)
batch_shuffle_ties_inds = torch.gather(batch_rperms, dim=1, index=batch_sorted_inds)
return batch_shuffle_ties_inds | Shuffle ties, and return the corresponding indice | arg_shuffle_ties | python | wildltr/ptranking | ptranking/ltr_adversarial/util/list_sampling.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/list_sampling.py | MIT |
def get_weighted_clipped_pos_diffs(qid, sorted_std_labels, global_buffer=None):
'''
Get total true pairs based on explicit labels.
In particular, the difference values are discounted based on positions.
'''
num_pos, num_explicit, num_neg_unk, num_unk, num_unique_labels = global_buffer[qid]
mat_diffs = torch.unsqueeze(sorted_std_labels, dim=1) - torch.unsqueeze(sorted_std_labels, dim=0)
pos_diffs = torch.where(mat_diffs < 0, tor_zero, mat_diffs)
clipped_pos_diffs = pos_diffs[0:num_pos, 0:num_explicit]
total_true_pairs = torch.nonzero(clipped_pos_diffs, as_tuple=False).size(0)
r_discounts = torch.arange(num_explicit).type(tensor)
r_discounts = torch.log2(2.0 + r_discounts)
r_discounts = torch.unsqueeze(r_discounts, dim=0)
c_discounts = torch.arange(num_pos).type(tensor)
c_discounts = torch.log2(2.0 + c_discounts)
c_discounts = torch.unsqueeze(c_discounts, dim=1)
weighted_clipped_pos_diffs = clipped_pos_diffs / r_discounts
weighted_clipped_pos_diffs = weighted_clipped_pos_diffs / c_discounts
return weighted_clipped_pos_diffs, total_true_pairs, num_explicit |
Get total true pairs based on explicit labels.
In particular, the difference values are discounted based on positions.
| get_weighted_clipped_pos_diffs | python | wildltr/ptranking | ptranking/ltr_adversarial/util/pair_sampling.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/pair_sampling.py | MIT |
def sample_pairs_BT(point_vals=None, num_pairs=None):
''' The probability of observing a pair of ordered documents is formulated based on Bradley-Terry model, i.e., p(d_i > d_j)=1/(1+exp(-delta(s_i - s_j))) '''
# the rank information is not taken into account, and all pairs are treated equally.
#total_items = point_vals.size(0)
mat_diffs = torch.unsqueeze(point_vals, dim=1) - torch.unsqueeze(point_vals, dim=0)
mat_bt_probs = torch.sigmoid(mat_diffs) # default delta=1.0
"""
B = tdist.Binomial(1, mat_bt_probs.view(1, -1))
b_res = B.sample()
num_unique_pairs = torch.nonzero(b_res).size(0)
if num_unique_pairs < num_pairs:
res = torch.multinomial(b_res, num_pairs, replacement=True)
else:
res = torch.multinomial(b_res, num_pairs, replacement=False)
res = torch.squeeze(res)
head_inds = res / total_items
tail_inds = res % total_items
"""
head_inds, tail_inds = sample_points_Bernoulli(mat_bt_probs, num_pairs)
return head_inds, tail_inds | The probability of observing a pair of ordered documents is formulated based on Bradley-Terry model, i.e., p(d_i > d_j)=1/(1+exp(-delta(s_i - s_j))) | sample_pairs_BT | python | wildltr/ptranking | ptranking/ltr_adversarial/util/pair_sampling.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_adversarial/util/pair_sampling.py | MIT |
def ini_listsf(self, num_features=None, n_heads=2, encoder_layers=2, dropout=0.1, encoder_type=None,
ff_dims=[256, 128, 64], out_dim=1, AF='R', TL_AF='GE', apply_tl_af=False,
BN=True, bn_type=None, bn_affine=False):
'''
Initialization the univariate scoring function for diversified ranking.
'''
# the input size according to the used dataset
encoder_num_features, fc_num_features = num_features * 3, num_features * 6
''' Component-1: stacked multi-head self-attention (MHSA) blocks '''
mhsa = MultiheadAttention(hid_dim=encoder_num_features, n_heads=n_heads, dropout=dropout, device=self.device)
if 'AllRank' == encoder_type:
fc = PositionwiseFeedForward(encoder_num_features, hid_dim=encoder_num_features, dropout=dropout)
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type,
fc=fc, dropout=dropout),
num_layers=encoder_layers, encoder_type=encoder_type)
elif 'DASALC' == encoder_type: # we note that feature normalization strategy is different from AllRank
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type),
num_layers=encoder_layers, encoder_type=encoder_type)
elif 'AttnDIN' == encoder_type:
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type),
num_layers=encoder_layers, encoder_type=encoder_type)
else:
raise NotImplementedError
''' Component-2: univariate scoring function '''
uni_ff_dims = [fc_num_features]
uni_ff_dims.extend(ff_dims)
uni_ff_dims.append(out_dim)
uni_sf = get_stacked_FFNet(ff_dims=uni_ff_dims, AF=AF, TL_AF=TL_AF, apply_tl_af=apply_tl_af,
BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device)
if self.gpu:
encoder = encoder.to(self.device)
uni_sf = uni_sf.to(self.device)
list_sf = {'encoder': encoder, 'uni_sf': uni_sf}
return list_sf |
Initialization the univariate scoring function for diversified ranking.
| ini_listsf | python | wildltr/ptranking | ptranking/ltr_diversification/base/div_list_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/base/div_list_ranker.py | MIT |
def get_diff_normal(self, batch_mus, batch_vars, batch_cocos=None):
'''
The difference of two normal random variables is another normal random variable. In particular, we consider two
cases: (1) correlated (2) independent.
@param batch_mus: the predicted mean
@param batch_vars: the predicted variance
@param batch_cocos: the predicted correlation coefficient in [-1, 1], which is formulated as the cosine-similarity of corresponding vectors.
@return: the mean, variance of the result normal variable.
'''
# mu_i - mu_j
batch_pairsub_mus = torch.unsqueeze(batch_mus, dim=2) - torch.unsqueeze(batch_mus, dim=1)
# variance w.r.t. S_i - S_j, which is equal to: (1)sigma^2_i + sigma^2_j - \rou_ij*sigma_i*sigma_j (2) sigma^2_i + sigma^2_j
if batch_cocos is not None:
batch_std_vars = torch.pow(batch_vars, .5)
batch_pairsub_vars = torch.unsqueeze(batch_vars, dim=2) + torch.unsqueeze(batch_vars, dim=1) - \
batch_cocos * torch.bmm(torch.unsqueeze(batch_std_vars, dim=2),
torch.unsqueeze(batch_std_vars, dim=1))
else:
batch_pairsub_vars = torch.unsqueeze(batch_vars, dim=2) + torch.unsqueeze(batch_vars, dim=1)
return batch_pairsub_mus, batch_pairsub_vars |
The difference of two normal random variables is another normal random variable. In particular, we consider two
cases: (1) correlated (2) independent.
@param batch_mus: the predicted mean
@param batch_vars: the predicted variance
@param batch_cocos: the predicted correlation coefficient in [-1, 1], which is formulated as the cosine-similarity of corresponding vectors.
@return: the mean, variance of the result normal variable.
| get_diff_normal | python | wildltr/ptranking | ptranking/ltr_diversification/base/div_mdn_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/base/div_mdn_ranker.py | MIT |
def ini_listsf(self, num_features=None, n_heads=2, encoder_layers=2, dropout=0.1, encoder_type=None,
ff_dims=[256, 128, 64], out_dim=1, AF='R', TL_AF='GE', apply_tl_af=False,
BN=True, bn_type=None, bn_affine=False):
'''
Initialization the univariate scoring function for diversified ranking.
'''
# the input size according to the used dataset
encoder_num_features, fc_num_features = num_features * 3, num_features * 6
''' Component-1: stacked multi-head self-attention (MHSA) blocks '''
mhsa = MultiheadAttention(hid_dim=encoder_num_features, n_heads=n_heads, dropout=dropout, device=self.device)
if 'AllRank' == encoder_type:
fc = PositionwiseFeedForward(encoder_num_features, hid_dim=encoder_num_features, dropout=dropout)
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type,
fc=fc, dropout=dropout),
num_layers=encoder_layers, encoder_type=encoder_type)
elif 'DASALC' == encoder_type: # we note that feature normalization strategy is different from AllRank
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type),
num_layers=encoder_layers, encoder_type=encoder_type)
elif 'AttnDIN' == encoder_type:
encoder = Encoder(layer=EncoderLayer(hid_dim=encoder_num_features, mhsa=dc(mhsa), encoder_type=encoder_type),
num_layers=encoder_layers, encoder_type=encoder_type)
else:
raise NotImplementedError
''' Component-2: univariate scoring function '''
uni_ff_dims = [fc_num_features]
uni_ff_dims.extend(ff_dims)
uni_ff_dims.append(out_dim)
uni_sf = get_stacked_FFNet(ff_dims=uni_ff_dims, AF=AF, TL_AF=TL_AF, apply_tl_af=apply_tl_af,
BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device)
''' Component-3: stacked feed-forward layers for co-variance prediction '''
if self.sf_id.endswith("co"):
co_ff_dims = [fc_num_features]
co_ff_dims.extend(ff_dims)
co_ffnns = get_stacked_FFNet(ff_dims=co_ff_dims, AF=AF, apply_tl_af=False, dropout=dropout,
BN=BN, bn_type=bn_type, bn_affine=bn_affine, device=self.device)
if self.gpu:
encoder = encoder.to(self.device)
uni_sf = uni_sf.to(self.device)
if self.sf_id.endswith("co"): co_ffnns = co_ffnns.to(self.device)
if self.sf_id.endswith("co"):
list_sf = {'encoder': encoder, 'uni_sf': uni_sf, 'co_ffnns':co_ffnns}
else:
list_sf = {'encoder': encoder, 'uni_sf': uni_sf}
return list_sf |
Initialization the univariate scoring function for diversified ranking.
| ini_listsf | python | wildltr/ptranking | ptranking/ltr_diversification/base/div_mdn_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/base/div_mdn_ranker.py | MIT |
def div_predict(self, q_repr, doc_reprs):
'''
The relevance prediction. In the context of diversified ranking, the shape is interpreted as:
@param q_repr:
@param doc_reprs:
@return:
'''
if self.sf_id.endswith("co"):
batch_mus, batch_vars, batch_cocos = self.div_forward(q_repr, doc_reprs)
else:
batch_cocos = None
batch_mus, batch_vars = self.div_forward(q_repr, doc_reprs)
if 'RERAR' == self.sort_id: # reciprocal_expected_rank_as_relevance (RERAR)
''' Expected Ranks '''
batch_expt_ranks = \
get_expected_rank(batch_mus=batch_mus, batch_vars=batch_vars, batch_cocos=batch_cocos, return_cdf=False)
batch_RERAR = 1.0 / batch_expt_ranks
return batch_RERAR
elif 'ExpRele' == self.sort_id:
return batch_mus
elif 'RiskAware' == self.sort_id: # TODO integrating coco for ranking
return batch_mus - self.b*batch_vars
else:
raise NotImplementedError |
The relevance prediction. In the context of diversified ranking, the shape is interpreted as:
@param q_repr:
@param doc_reprs:
@return:
| div_predict | python | wildltr/ptranking | ptranking/ltr_diversification/base/div_mdn_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/base/div_mdn_ranker.py | MIT |
def default_pointsf_para_dict(self):
"""
The default setting of the hyper-parameters of the stump neural scoring function.
"""
self.sf_para_dict = dict()
if self.use_json:
opt = self.json_dict['opt'][0]
lr = self.json_dict['lr'][0]
pointsf_json_dict = self.json_dict[self.sf_id]
num_layers = pointsf_json_dict['layers'][0]
af = pointsf_json_dict['AF'][0]
apply_tl_af = pointsf_json_dict['apply_tl_af'][0]
tl_af = pointsf_json_dict['TL_AF'][0] if apply_tl_af else None
BN = pointsf_json_dict['BN'][0]
bn_type = pointsf_json_dict['bn_type'][0] if BN else None
bn_affine = pointsf_json_dict['bn_affine'][0] if BN else None
self.sf_para_dict['opt'] = opt
self.sf_para_dict['lr'] = lr
pointsf_para_dict = dict(num_layers=num_layers, AF=af, TL_AF=tl_af, apply_tl_af=apply_tl_af,
BN=BN, bn_type=bn_type, bn_affine=bn_affine)
self.sf_para_dict['sf_id'] = self.sf_id
self.sf_para_dict[self.sf_id] = pointsf_para_dict
else:
# optimization-specific setting
self.sf_para_dict['opt'] = 'Adagrad' # Adam | RMS | Adagrad
self.sf_para_dict['lr'] = 0.001 # learning rate
# common settings for a scoring function based on feed-forward neural networks
pointsf_para_dict = dict(num_layers=5, AF='GE', TL_AF='GE', apply_tl_af=False,
BN=True, bn_type='BN', bn_affine=True)
self.sf_para_dict['sf_id'] = self.sf_id
self.sf_para_dict[self.sf_id] = pointsf_para_dict
return self.sf_para_dict |
The default setting of the hyper-parameters of the stump neural scoring function.
| default_pointsf_para_dict | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def default_listsf_para_dict(self):
"""
The default setting of the hyper-parameters of the permutation-equivariant neural scoring function.
"""
self.sf_para_dict = dict()
if self.use_json:
opt = self.json_dict['opt'][0]
lr = self.json_dict['lr'][0]
listsf_json_dict = self.json_dict[self.sf_id]
BN = listsf_json_dict['BN'][0]
bn_type = listsf_json_dict['bn_type'][0] if BN else None
bn_affine = listsf_json_dict['bn_affine'][0] if BN else None
ff_dims = listsf_json_dict['ff_dims']
af = listsf_json_dict['AF'][0]
apply_tl_af = listsf_json_dict['apply_tl_af'][0]
tl_af = listsf_json_dict['TL_AF'][0] if apply_tl_af else None
n_heads = listsf_json_dict['n_heads'][0]
encoder_type = listsf_json_dict['encoder_type'][0]
encoder_layers = listsf_json_dict['encoder_layers'][0]
self.sf_para_dict['opt'] = opt
self.sf_para_dict['lr'] = lr
listsf_para_dict = dict(BN=BN, AF=af, ff_dims=ff_dims, apply_tl_af=apply_tl_af,
n_heads=n_heads, encoder_type=encoder_type, encoder_layers=encoder_layers,
bn_type=bn_type, bn_affine=bn_affine, TL_AF=tl_af)
self.sf_para_dict['sf_id'] = self.sf_id
self.sf_para_dict[self.sf_id] = listsf_para_dict
else:
# optimization-specific setting
self.sf_para_dict['opt'] = 'Adagrad' # Adam | RMS | Adagrad
self.sf_para_dict['lr'] = 0.01 # learning rate
# DASALC, AllRank, AttnDIN
listsf_para_dict = dict(encoder_type='AttnDIN', n_heads=6, encoder_layers=6, ff_dims=[256, 128, 64],
AF='R', TL_AF='GE', apply_tl_af=False, BN=True, bn_type='BN', bn_affine=True)
self.sf_para_dict['sf_id'] = self.sf_id
self.sf_para_dict[self.sf_id] = listsf_para_dict
return self.sf_para_dict |
The default setting of the hyper-parameters of the permutation-equivariant neural scoring function.
| default_listsf_para_dict | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def pointsf_grid_search(self):
"""
Iterator of hyper-parameters of the stump neural scoring function.
"""
if self.use_json:
choice_opt = self.json_dict['opt']
choice_lr = self.json_dict['lr']
pointsf_json_dict = self.json_dict[self.sf_id]
choice_layers = pointsf_json_dict['layers']
choice_af = pointsf_json_dict['AF']
choice_apply_tl_af = pointsf_json_dict['apply_tl_af']
choice_tl_af = pointsf_json_dict['TL_AF'] if True in choice_apply_tl_af else None
choice_BN = pointsf_json_dict['BN']
choice_bn_type = pointsf_json_dict['bn_type'] if True in choice_BN else None
choice_bn_affine = pointsf_json_dict['bn_affine'] if True in choice_BN else None
else:
choice_BN = [True]
choice_bn_type = ['BN']
choice_bn_affine = [True]
choice_layers = [3] if self.debug else [5] # 1, 2, 3, 4
choice_af = ['R', 'CE'] if self.debug else ['R', 'CE', 'S'] # ['R', 'LR', 'RR', 'E', 'SE', 'CE', 'S']
choice_tl_af = ['R', 'CE'] if self.debug else ['R', 'CE', 'S'] # ['R', 'LR', 'RR', 'E', 'SE', 'CE', 'S']
choice_apply_tl_af = [True] # True, False
choice_opt = ['Adam']
choice_lr = [0.001]
for opt, lr in product(choice_opt, choice_lr):
sf_para_dict = dict()
sf_para_dict['sf_id'] = self.sf_id
base_dict = dict(opt=opt, lr=lr)
sf_para_dict.update(base_dict)
for num_layers, af, apply_tl_af, BN in product(choice_layers, choice_af, choice_apply_tl_af, choice_BN):
pointsf_para_dict = dict(num_layers=num_layers, AF=af, apply_tl_af=apply_tl_af, BN=BN)
if apply_tl_af:
for tl_af in choice_tl_af:
pointsf_para_dict.update(dict(TL_AF=tl_af))
if BN:
for bn_type, bn_affine in product(choice_bn_type, choice_bn_affine):
bn_dict = dict(bn_type=bn_type, bn_affine=bn_affine)
pointsf_para_dict.update(bn_dict)
sf_para_dict[self.sf_id] = pointsf_para_dict
self.sf_para_dict = sf_para_dict
yield sf_para_dict
else:
sf_para_dict[self.sf_id] = pointsf_para_dict
self.sf_para_dict = sf_para_dict
yield sf_para_dict
else:
if BN:
for bn_type, bn_affine in product(choice_bn_type, choice_bn_affine):
bn_dict = dict(bn_type=bn_type, bn_affine=bn_affine)
pointsf_para_dict.update(bn_dict)
sf_para_dict[self.sf_id] = pointsf_para_dict
self.sf_para_dict = sf_para_dict
yield sf_para_dict
else:
sf_para_dict[self.sf_id] = pointsf_para_dict
self.sf_para_dict = sf_para_dict
yield sf_para_dict |
Iterator of hyper-parameters of the stump neural scoring function.
| pointsf_grid_search | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def listsf_to_para_string(self, log=False):
''' Get the identifier of scoring function '''
sf_str = super().listsf_to_para_string(log=log)
s1, s2 = (':', '\n') if log else ('_', '_')
if self.sf_id.endswith("co"):
if log:
sf_str = s2.join([sf_str, s1.join(['CoVariance', 'True'])])
else:
sf_str = '_'.join([sf_str, 'CoCo'])
return sf_str | Get the identifier of scoring function | listsf_to_para_string | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def to_eval_setting_string(self, log=False):
"""
String identifier of eval-setting
:param log:
:return:
"""
eval_dict = self.eval_dict
s1, s2 = (':', '\n') if log else ('_', '_')
do_vali, epochs = eval_dict['do_validation'], eval_dict['epochs']
if do_vali:
vali_metric, vali_k = eval_dict['vali_metric'], eval_dict['vali_k']
vali_str = '@'.join([vali_metric, str(vali_k)])
eval_string = s2.join([s1.join(['epochs', str(epochs)]), s1.join(['validation', vali_str])]) if log \
else s1.join(['EP', str(epochs), 'V', vali_str])
else:
eval_string = s1.join(['epochs', str(epochs)])
rerank = eval_dict['rerank']
if rerank:
rerank_k, rerank_model_id = eval_dict['rerank_k'], eval_dict['rerank_model_id']
eval_string = s2.join([eval_string, s1.join(['rerank_k', str(rerank_k)]),
s1.join(['rerank_model_id', rerank_model_id])]) if log else \
s1.join([eval_string, 'RR', str(rerank_k), rerank_model_id])
return eval_string |
String identifier of eval-setting
:param log:
:return:
| to_eval_setting_string | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def default_setting(self):
"""
A default setting for evaluation when performing diversified ranking.
:param debug:
:param data_id:
:param dir_output:
:return:
"""
if self.use_json:
dir_output = self.json_dict['dir_output']
epochs = 5 if self.debug else self.json_dict['epochs']
do_validation = self.json_dict['do_validation']
vali_k = self.json_dict['vali_k'] if do_validation else None
vali_metric = self.json_dict['vali_metric'] if do_validation else None
cutoffs = self.json_dict['cutoffs']
do_log = self.json_dict['do_log']
log_step = self.json_dict['log_step'] if do_log else None
do_summary = self.json_dict['do_summary']
loss_guided = self.json_dict['loss_guided']
rerank = self.json_dict['rerank']
rerank_k = self.json_dict['rerank_k'] if rerank else None
rerank_dir = self.json_dict['rerank_dir'] if rerank else None
rerank_model_id = self.json_dict['rerank_model_id'] if rerank else None
rerank_model_dir = self.json_dict['rerank_model_dir'] if rerank else None
else:
do_log = False if self.debug else True
do_validation, do_summary = True, False
cutoffs = [1, 3, 5, 10, 20, 50]
log_step = 1
epochs = 5 if self.debug else 500
vali_k = 5
vali_metric = 'aNDCG' # nERR-IA, aNDCG
dir_output = self.dir_output
loss_guided = False
rerank = False
rerank_k = 50 if rerank else None
rerank_dir = '/Users/iimac/Workbench/CodeBench/Output/DivLTR/Rerank/R_reproduce/Opt_aNDCG/' if rerank\
else None
rerank_model_id = 'DivProbRanker' if rerank else None
rerank_model_dir = '/Users/iimac/Workbench/CodeBench/Output/DivLTR/Rerank/R/DivProbRanker_SF_R3R_BN_Affine_AttnDIN_2_heads_6_encoder_Adagrad_0.01_WT_Div_0912_Implicit_EP_300_V_aNDCG@5/1_SuperSoft_ExpRele_0.01_OptIdeal_10/' if rerank\
else None
# more evaluation settings that are rarely changed
self.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=dir_output, epochs=epochs,
cutoffs=cutoffs, do_validation=do_validation, vali_metric=vali_metric, vali_k=vali_k,
do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=loss_guided,
rerank=rerank, rerank_k=rerank_k, rerank_dir=rerank_dir, rerank_model_id=rerank_model_id, rerank_model_dir=rerank_model_dir)
return self.eval_dict |
A default setting for evaluation when performing diversified ranking.
:param debug:
:param data_id:
:param dir_output:
:return:
| default_setting | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def grid_search(self):
"""
Iterator of settings for evaluation when performing diversified ranking.
"""
if self.use_json:
dir_output = self.json_dict['dir_output']
epochs = 5 if self.debug else self.json_dict['epochs']
do_validation = self.json_dict['do_validation']
vali_k = self.json_dict['vali_k'] if do_validation else None
vali_metric = self.json_dict['vali_metric'] if do_validation else None
cutoffs = self.json_dict['cutoffs']
do_log, log_step = self.json_dict['do_log'], self.json_dict['log_step']
do_summary = self.json_dict['do_summary']
loss_guided = self.json_dict['loss_guided']
rerank = self.json_dict['rerank']
rerank_k = self.json_dict['rerank_k'] if rerank else None
rerank_dir = self.json_dict['rerank_dir'] if rerank else None
rerank_model_id = self.json_dict['rerank_model_id'] if rerank else None
base_dict = dict(debug=False, grid_search=True, dir_output=dir_output)
else:
base_dict = dict(debug=self.debug, grid_search=True, dir_output=self.dir_output)
epochs = 2 if self.debug else 100
do_validation = False if self.debug else True # True, False
vali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50]
vali_metric = 'aNDCG'
do_log = False if self.debug else True
log_step = 1
do_summary, loss_guided = False, False
rerank = False
rerank_k = 20 if rerank else None
rerank_dir = '' if rerank else None
rerank_model_id = '' if rerank else None
self.eval_dict = dict(epochs=epochs, do_validation=do_validation, vali_k=vali_k, cutoffs=cutoffs,
vali_metric=vali_metric, do_log=do_log, log_step=log_step,
do_summary=do_summary, loss_guided=loss_guided,
rerank=rerank, rerank_k=rerank_k, rerank_dir=rerank_dir, rerank_model_id=rerank_model_id)
self.eval_dict.update(base_dict)
yield self.eval_dict |
Iterator of settings for evaluation when performing diversified ranking.
| grid_search | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def to_data_setting_string(self, log=False):
"""
String identifier of data-setting
:param log:
:return:
"""
data_dict = self.data_dict
setting_string, add_noise = data_dict['data_id'], data_dict['add_noise']
if add_noise:
std_delta = data_dict['std_delta']
setting_string = '_'.join([setting_string, 'Gaussian', '{:,g}'.format(std_delta)])
return setting_string |
String identifier of data-setting
:param log:
:return:
| to_data_setting_string | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def default_setting(self):
"""
A default setting for data loading when performing diversified ranking
"""
if self.use_json:
add_noise = self.json_dict['add_noise'][0]
std_delta = self.json_dict['std_delta'][0] if add_noise else None
self.data_dict = dict(data_id=self.data_id, dir_data=self.json_dict["dir_data"],
add_noise=add_noise, std_delta=std_delta)
else:
add_noise = False
std_delta = 1.0 if add_noise else None
self.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, add_noise=add_noise,std_delta=std_delta)
div_data_meta = get_div_data_meta(data_id=self.data_id) # add meta-information
self.data_dict.update(div_data_meta)
return self.data_dict |
A default setting for data loading when performing diversified ranking
| default_setting | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def grid_search(self):
"""
Iterator of settings for data loading when performing adversarial ltr
"""
if self.use_json:
choice_add_noise = self.json_dict['add_noise']
choice_std_delta = self.json_dict['std_delta'] if True in choice_add_noise else None
self.data_dict = dict(data_id=self.data_id, dir_data=self.json_dict["dir_data"])
else:
choice_add_noise = [False]
choice_std_delta = [1.0] if True in choice_add_noise else None
self.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data)
div_data_meta = get_div_data_meta(data_id=self.data_id) # add meta-information
self.data_dict.update(div_data_meta)
for add_noise in choice_add_noise:
if add_noise:
for std_delta in choice_std_delta:
noise_dict = dict(add_noise=add_noise, std_delta=std_delta)
self.data_dict.update(noise_dict)
yield self.data_dict
else:
noise_dict = dict(add_noise=add_noise, std_delta=None)
self.data_dict.update(noise_dict)
yield self.data_dict |
Iterator of settings for data loading when performing adversarial ltr
| grid_search | python | wildltr/ptranking | ptranking/ltr_diversification/eval/div_parameter.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/div_parameter.py | MIT |
def load_data(self, eval_dict=None, data_dict=None, fold_k=None, discriminator=None):
"""
We note that it is impossible to perform processing over multiple queries,
since q_doc_rele_mat may differ from query to query.
@param eval_dict:
@param data_dict:
@param fold_k:
@return:
"""
file_train, file_vali, file_test = self.determine_files(data_splits=self.data_splits, fold_k=fold_k)
fold_dir = data_dict['dir_data'] + 'folder' + str(fold_k) + '/'
if discriminator is not None:
train_data = \
RerankDIVDataset(list_as_file=file_train, split_type=SPLIT_TYPE.Train, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
discriminator=discriminator, eval_dict=eval_dict)
test_data = \
RerankDIVDataset(list_as_file=file_test, split_type=SPLIT_TYPE.Test, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
presort=self.presort, discriminator=discriminator, buffer=True, eval_dict=eval_dict)
vali_data = \
RerankDIVDataset(list_as_file=file_vali, split_type=SPLIT_TYPE.Validation, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
buffer=True, presort=self.presort, discriminator=discriminator, eval_dict=eval_dict)
else:
train_data = \
DIVDataset(list_as_file=file_train, split_type=SPLIT_TYPE.Train, fold_dir=fold_dir, data_dict=data_dict,
dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, buffer=True, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
test_data = \
DIVDataset(list_as_file=file_test, split_type=SPLIT_TYPE.Test, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
vali_data = \
DIVDataset(list_as_file=file_vali, split_type=SPLIT_TYPE.Validation, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
return train_data, test_data, vali_data |
We note that it is impossible to perform processing over multiple queries,
since q_doc_rele_mat may differ from query to query.
@param eval_dict:
@param data_dict:
@param fold_k:
@return:
| load_data | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def setup_output(self, data_dict=None, eval_dict=None, reproduce=False):
"""
Update output directory
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
"""
model_id = self.model_parameter.model_id
grid_search, do_vali, dir_output = eval_dict['grid_search'], eval_dict['do_validation'], eval_dict['dir_output']
if grid_search or reproduce:
dir_root = dir_output + '_'.join(['gpu', 'grid', model_id]) + '/' if self.gpu else dir_output + '_'.join(['grid', model_id]) + '/'
else:
dir_root = dir_output
eval_dict['dir_root'] = dir_root
if not os.path.exists(dir_root): os.makedirs(dir_root)
sf_str = self.sf_parameter.to_para_string()
data_eval_str = '_'.join([self.data_setting.to_data_setting_string(),
self.eval_setting.to_eval_setting_string()])
file_prefix = '_'.join([model_id, 'SF', sf_str, data_eval_str])
dir_run = dir_root + file_prefix + '/' # run-specific outputs
model_para_string = self.model_parameter.to_para_string()
if len(model_para_string) > 0:
dir_run = dir_run + model_para_string + '/'
eval_dict['dir_run'] = dir_run
if not os.path.exists(dir_run):
os.makedirs(dir_run)
return dir_run |
Update output directory
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
| setup_output | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def setup_eval(self, data_dict, eval_dict, sf_para_dict, model_para_dict):
"""
Finalize the evaluation setting correspondingly
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
"""
sf_para_dict[sf_para_dict['sf_id']].update(dict(num_features=data_dict['num_features']))
self.dir_run = self.setup_output(data_dict, eval_dict)
if eval_dict['do_log'] and not self.eval_setting.debug:
time_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
sys.stdout = open(self.dir_run + '_'.join(['log', time_str]) + '.txt', "w")
#if self.do_summary: self.summary_writer = SummaryWriter(self.dir_run + 'summary') |
Finalize the evaluation setting correspondingly
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
| setup_eval | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def load_ranker(self, sf_para_dict, model_para_dict):
"""
Load a ranker correspondingly
:param sf_para_dict:
:param model_para_dict:
:param kwargs:
:return:
"""
model_id = model_para_dict['model_id']
if model_id in ['DALETOR', 'DivLambdaRank', 'DivProbRanker', 'DivSoftRank', 'DivTwinRank']:
ranker = globals()[model_id](sf_para_dict=sf_para_dict, model_para_dict=model_para_dict,
gpu=self.gpu, device=self.device)
else:
raise NotImplementedError
return ranker |
Load a ranker correspondingly
:param sf_para_dict:
:param model_para_dict:
:param kwargs:
:return:
| load_ranker | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def log_max(self, data_dict=None, max_cv_avg_scores=None, sf_para_dict=None, eval_dict=None, log_para_str=None):
''' Log the best performance across grid search and the corresponding setting '''
dir_root, cutoffs = eval_dict['dir_root'], eval_dict['cutoffs']
data_id = data_dict['data_id']
sf_str = self.sf_parameter.to_para_string(log=True)
data_eval_str = self.data_setting.to_data_setting_string(log=True) +'\n'+ self.eval_setting.to_eval_setting_string(log=True)
with open(file=dir_root + '/' + '_'.join([data_id, sf_para_dict['sf_id'], 'max.txt']), mode='w') as max_writer:
max_writer.write('\n\n'.join([data_eval_str, sf_str, log_para_str, metric_results_to_string(max_cv_avg_scores, cutoffs, metric='aNDCG')])) | Log the best performance across grid search and the corresponding setting | log_max | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def grid_run(self, debug=True, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None, dir_json=None):
"""
Perform diversified ranking based on grid search of optimal parameter setting
"""
if dir_json is not None:
div_data_eval_sf_json = dir_json + 'Div_Data_Eval_ScoringFunction.json'
para_json = dir_json + model_id + "Parameter.json"
self.set_eval_setting(debug=debug, div_eval_json=div_data_eval_sf_json)
self.set_data_setting(div_data_json=div_data_eval_sf_json)
self.set_scoring_function_setting(sf_json=div_data_eval_sf_json)
self.set_model_setting(model_id=model_id, para_json=para_json)
else:
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
self.set_scoring_function_setting(debug=debug, sf_id=sf_id)
self.set_model_setting(debug=debug, model_id=model_id)
''' select the best setting through grid search '''
vali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50] # cutoffs should be consistent w.r.t. eval_dict
max_cv_avg_scores = np.zeros(len(cutoffs)) # fold average
k_index = cutoffs.index(vali_k)
max_common_para_dict, max_sf_para_dict, max_div_para_dict = None, None, None
for data_dict in self.iterate_data_setting():
for eval_dict in self.iterate_eval_setting():
if eval_dict['rerank']:
d_sf_para_dict, d_div_para_dict = self.get_rerank_para_dicts(eval_dict=eval_dict)
else:
d_sf_para_dict, d_div_para_dict = None, None
for sf_para_dict in self.iterate_scoring_function_setting():
for div_para_dict in self.iterate_model_setting():
curr_cv_avg_scores = \
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict,
sf_para_dict=sf_para_dict, div_para_dict=div_para_dict,
d_sf_para_dict=d_sf_para_dict, d_div_para_dict=d_div_para_dict)
if curr_cv_avg_scores[k_index] > max_cv_avg_scores[k_index]:
max_cv_avg_scores, max_sf_para_dict, max_eval_dict, max_div_para_dict = \
curr_cv_avg_scores, sf_para_dict, eval_dict, div_para_dict
# log max setting
self.log_max(data_dict=data_dict, eval_dict=max_eval_dict,
max_cv_avg_scores=max_cv_avg_scores, sf_para_dict=max_sf_para_dict,
log_para_str=self.model_parameter.to_para_string(log=True, given_para_dict=max_div_para_dict)) |
Perform diversified ranking based on grid search of optimal parameter setting
| grid_run | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def point_run(self, debug=False, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None,
dir_json=None, reproduce=False):
"""
:param debug:
:param model_id:
:param data_id:
:param dir_data:
:param dir_output:
:return:
"""
if dir_json is None:
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
self.set_scoring_function_setting(debug=debug, sf_id=sf_id)
self.set_model_setting(debug=debug, model_id=model_id)
else:
div_data_eval_sf_json = dir_json + 'Div_Data_Eval_ScoringFunction.json'
para_json = dir_json + model_id + "Parameter.json"
self.set_eval_setting(debug=debug, div_eval_json=div_data_eval_sf_json)
self.set_data_setting(div_data_json=div_data_eval_sf_json)
self.set_scoring_function_setting(sf_json=div_data_eval_sf_json)
self.set_model_setting(model_id=model_id, para_json=para_json)
data_dict = self.get_default_data_setting()
eval_dict = self.get_default_eval_setting()
sf_para_dict = self.get_default_scoring_function_setting()
div_model_para_dict = self.get_default_model_setting()
if eval_dict['rerank']:
d_sf_para_dict, d_div_para_dict = self.get_rerank_para_dicts(eval_dict=eval_dict)
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict,
d_sf_para_dict=d_sf_para_dict, d_div_para_dict=d_div_para_dict)
else:
if reproduce:
self.div_cv_reproduce(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict)
else:
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict) |
:param debug:
:param model_id:
:param data_id:
:param dir_data:
:param dir_output:
:return:
| point_run | python | wildltr/ptranking | ptranking/ltr_diversification/eval/ltr_diversification.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/eval/ltr_diversification.py | MIT |
def get_approx_ranks(batch_preds, rt=None, device=None, q_doc_rele_mat=None):
''' get approximated rank positions: Equation-7 in the paper'''
batch_pred_diffs = torch.unsqueeze(batch_preds, dim=2) - torch.unsqueeze(batch_preds, dim=1) # computing pairwise differences, i.e., Sij or Sxy
batch_indicators = robust_sigmoid(torch.transpose(batch_pred_diffs, dim0=1, dim1=2), rt, device) # using {-1.0*} may lead to a poor performance when compared with the above way;
batch_hat_pis = torch.sum(batch_indicators, dim=2) + 0.5 # get approximated rank positions, i.e., hat_pi(x)
_q_doc_rele_mat = torch.unsqueeze(q_doc_rele_mat, dim=1)
batch_q_doc_rele_mat = _q_doc_rele_mat.expand(-1, q_doc_rele_mat.size(1), -1) # duplicate w.r.t. each subtopic -> [num_subtopics, ranking_size, ranking_size]
prior_cover_cnts = torch.sum(batch_indicators * batch_q_doc_rele_mat, dim=2) - q_doc_rele_mat/2.0 # [num_subtopics, num_docs]
return batch_hat_pis, prior_cover_cnts | get approximated rank positions: Equation-7 in the paper | get_approx_ranks | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def alphaDCG_as_a_loss(batch_preds=None, q_doc_rele_mat=None, rt=10, device=None, alpha=0.5, top_k=10):
"""
There are two ways to formulate the loss: (1) using the ideal order; (2) using the predicted order (TBA)
"""
batch_hat_pis, prior_cover_cnts = get_approx_ranks(batch_preds, rt=rt, device=device, q_doc_rele_mat=q_doc_rele_mat)
batch_per_subtopic_gains = q_doc_rele_mat * torch.pow((1.0-alpha), prior_cover_cnts) / torch.log2(1.0 + batch_hat_pis)
batch_global_gains = torch.sum(batch_per_subtopic_gains, dim=1)
if top_k is None:
alpha_DCG = torch.sum(batch_global_gains)
else:
alpha_DCG = torch.sum(batch_global_gains[0:top_k])
batch_loss = -alpha_DCG
return batch_loss |
There are two ways to formulate the loss: (1) using the ideal order; (2) using the predicted order (TBA)
| alphaDCG_as_a_loss | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def div_custom_loss_function(self, batch_preds, q_doc_rele_mat, **kwargs):
'''
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_stds: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
:return:
'''
assert 'presort' in kwargs and kwargs['presort'] is True # aiming for directly optimising alpha-nDCG over top-k documents
batch_loss = alphaDCG_as_a_loss(batch_preds=batch_preds, q_doc_rele_mat=q_doc_rele_mat,
rt=self.rt, top_k=self.top_k, device=self.device)
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
return batch_loss |
:param batch_preds: [batch, ranking_size] each row represents the relevance predictions for documents within a ltr_adhoc
:param batch_stds: [batch, ranking_size] each row represents the standard relevance grades for documents within a ltr_adhoc
:return:
| div_custom_loss_function | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def default_para_dict(self):
"""
Default parameter setting for DALETOR. Here rt (reversed T) corresponds to 1/T in paper.
:return:
"""
if self.use_json:
top_k = self.json_dict['top_k'][0]
rt = self.json_dict['rt'][0] # corresponds to 1/T in paper
self.DALETOR_para_dict = dict(model_id=self.model_id, rt=rt, top_k=top_k)
else:
self.DALETOR_para_dict = dict(model_id=self.model_id, rt=10., top_k=10)
return self.DALETOR_para_dict |
Default parameter setting for DALETOR. Here rt (reversed T) corresponds to 1/T in paper.
:return:
| default_para_dict | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
"""
# using specified para-dict or inner para-dict
DALETOR_para_dict = given_para_dict if given_para_dict is not None else self.DALETOR_para_dict
rt, top_k = DALETOR_para_dict['rt'], DALETOR_para_dict['top_k']
s1 = ':' if log else '_'
if top_k is None:
DALETOR_paras_str = s1.join(['rt', str(rt), 'topk', 'Full'])
else:
DALETOR_paras_str = s1.join(['rt', str(rt), 'topk', str(top_k)])
return DALETOR_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
:return:
| to_para_string | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def grid_search(self):
"""
Iterator of parameter settings for ApproxNDCG
"""
if self.use_json:
choice_rt = self.json_dict['rt'] # corresponds to 1/T in paper
choice_topk = self.json_dict['top_k'] # the cutoff value of optimising objective alpha-nDCG@k
else:
choice_rt = [10.0] if self.debug else [10.0] # 1.0, 10.0, 50.0, 100.0
choice_topk = [10] if self.debug else [10]
for rt, top_k in product(choice_rt, choice_topk):
self.DALETOR_para_dict = dict(model_id=self.model_id, rt=rt, top_k=top_k)
yield self.DALETOR_para_dict |
Iterator of parameter settings for ApproxNDCG
| grid_search | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/daletor.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/daletor.py | MIT |
def alpha_dcg_as_a_loss(top_k=None, batch_mus=None, batch_vars=None, batch_cocos=None, q_doc_rele_mat=None,
opt_ideal=True, presort=False, beta=0.5, const=False, const_var=None):
'''
Alpha_nDCG as the optimization objective.
@param top_k:
@param batch_mus:
@param batch_vars:
@param batch_cocos:
@param q_doc_rele_mat:
@param opt_ideal:
@param presort:
@param beta:
@return:
'''
if const:
batch_expt_ranks, batch_Phi0_subdiag = \
get_expected_rank_const(batch_mus=batch_mus, const_var=const_var, return_cdf=True)
else:
batch_expt_ranks, batch_Phi0_subdiag = \
get_expected_rank(batch_mus=batch_mus, batch_vars=batch_vars, batch_cocos=batch_cocos, return_cdf=True)
if opt_ideal:
assert presort is True
used_batch_expt_ranks = batch_expt_ranks
used_q_doc_rele_mat = q_doc_rele_mat
used_batch_indicators = batch_Phi0_subdiag # the diagonal elements are zero
else:
batch_ascend_expt_ranks, batch_resort_inds = torch.sort(batch_expt_ranks, dim=1, descending=False)
used_batch_expt_ranks = batch_ascend_expt_ranks
used_batch_indicators = torch.gather(batch_Phi0_subdiag, dim=1,
index=torch.unsqueeze(batch_resort_inds.expand(batch_Phi0_subdiag.size(0), -1), dim=0))
used_q_doc_rele_mat = torch.gather(q_doc_rele_mat, dim=1,
index=batch_resort_inds.expand(q_doc_rele_mat.size(0), -1))
_used_q_doc_rele_mat = torch.unsqueeze(used_q_doc_rele_mat, dim=1)
# duplicate w.r.t. each subtopic -> [num_subtopics, ranking_size, ranking_size]
batch_q_doc_rele_mat = _used_q_doc_rele_mat.expand(-1, used_q_doc_rele_mat.size(1), -1)
prior_cover_cnts = torch.sum(used_batch_indicators * batch_q_doc_rele_mat, dim=2) # [num_subtopics,num_docs]
batch_per_subtopic_gains = used_q_doc_rele_mat * torch.pow((1.0 - beta), prior_cover_cnts) \
/ torch.log2(1.0 + used_batch_expt_ranks)
batch_global_gains = torch.sum(batch_per_subtopic_gains, dim=1)
if top_k is None:
alpha_DCG = torch.sum(batch_global_gains)
else:
alpha_DCG = torch.sum(batch_global_gains[0:top_k])
batch_loss = -alpha_DCG
return batch_loss |
Alpha_nDCG as the optimization objective.
@param top_k:
@param batch_mus:
@param batch_vars:
@param batch_cocos:
@param q_doc_rele_mat:
@param opt_ideal:
@param presort:
@param beta:
@return:
| alpha_dcg_as_a_loss | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | MIT |
def err_ia_as_a_loss(top_k=None, batch_mus=None, batch_vars=None, batch_cocos=None, q_doc_rele_mat=None,
opt_ideal=True, presort=False, max_label=1.0, device=None, const=False, const_var=None):
'''
ERR-IA as the optimization objective.
@param top_k:
@param batch_mus:
@param batch_vars:
@param batch_cocos:
@param q_doc_rele_mat:
@param opt_ideal:
@param presort:
@return:
'''
ranking_size = q_doc_rele_mat.size(1)
#max_label = torch.max(q_doc_rele_mat)
t2 = torch.tensor([2.0], dtype=torch.float, device=device)
#k = ranking_size if top_k is None else top_k
#if self.norm:
# batch_ideal_err = torch_rankwise_err(q_doc_rele_mat, max_label=max_label, k=k, point=True, device=self.device)
if const:
batch_expt_ranks = get_expected_rank_const(batch_mus=batch_mus, const_var=const_var, return_cdf=False)
else:
batch_expt_ranks = get_expected_rank(batch_mus=batch_mus, batch_vars=batch_vars, batch_cocos=batch_cocos)
if opt_ideal:
assert presort is True
used_batch_expt_ranks = batch_expt_ranks
used_batch_labels = q_doc_rele_mat
else:
'''
Sort the predicted ranks in a ascending natural order (i.e., 1, 2, 3, ..., n),
the returned indices can be used to sort other vectors following the predicted order
'''
batch_ascend_expt_ranks, sort_indices = torch.sort(batch_expt_ranks, dim=1, descending=False)
# sort labels according to the expected ranks
# batch_sys_std_labels = torch.gather(batch_std_labels, dim=1, index=sort_indices)
batch_sys_std_labels = torch.gather(q_doc_rele_mat, dim=1,index=sort_indices.expand(q_doc_rele_mat.size(0), -1))
used_batch_expt_ranks = batch_ascend_expt_ranks
used_batch_labels = batch_sys_std_labels
if top_k is None:
expt_ranks = 1.0 / used_batch_expt_ranks
satis_pros = (torch.pow(t2, used_batch_labels) - 1.0) / torch.pow(t2, max_label)
unsatis_pros = torch.ones_like(used_batch_labels) - satis_pros
cum_unsatis_pros = torch.cumprod(unsatis_pros, dim=1)
cascad_unsatis_pros = torch.ones_like(cum_unsatis_pros)
cascad_unsatis_pros[:, 1:ranking_size] = cum_unsatis_pros[:, 0:ranking_size - 1]
expt_satis_ranks = expt_ranks * satis_pros * cascad_unsatis_pros
batch_err = torch.sum(expt_satis_ranks, dim=1)
#if self.norm:
# batch_nerr = batch_err / batch_ideal_err
# nerr_loss = -torch.sum(batch_nerr)
#else:
nerr_loss = -torch.sum(batch_err)
return nerr_loss
else:
top_k_labels = used_batch_labels[:, 0:top_k]
if opt_ideal:
pos_rows = torch.arange(top_k_labels.size(0), dtype=torch.long) # all rows
else:
non_zero_inds = torch.nonzero(torch.sum(top_k_labels, dim=1))
zero_metric_value = False if non_zero_inds.size(0) > 0 else True
if zero_metric_value:
return None, zero_metric_value # should not be optimized due to no useful training signal
else:
pos_rows = non_zero_inds[:, 0]
expt_ranks = 1.0 / used_batch_expt_ranks[:, 0:top_k]
satis_pros = (torch.pow(t2, top_k_labels) - 1.0) / torch.pow(t2, max_label)
unsatis_pros = torch.ones_like(top_k_labels) - satis_pros
cum_unsatis_pros = torch.cumprod(unsatis_pros, dim=1)
cascad_unsatis_pros = torch.ones_like(cum_unsatis_pros)
cascad_unsatis_pros[:, 1:top_k] = cum_unsatis_pros[:, 0:top_k - 1]
expt_satis_ranks = satis_pros[pos_rows, :] * cascad_unsatis_pros[pos_rows, :] * expt_ranks
batch_err = torch.sum(expt_satis_ranks, dim=1)
#if self.norm:
# batch_nerr = batch_err / batch_ideal_err[pos_rows, :]
# nerr_loss = -torch.sum(batch_nerr)
#else:
nerr_loss = -torch.sum(batch_err)
return nerr_loss |
ERR-IA as the optimization objective.
@param top_k:
@param batch_mus:
@param batch_vars:
@param batch_cocos:
@param q_doc_rele_mat:
@param opt_ideal:
@param presort:
@return:
| err_ia_as_a_loss | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | MIT |
def div_custom_loss_function(self, batch_mus, batch_vars, q_doc_rele_mat, **kwargs):
'''
In the context of SRD, batch_size is commonly 1.
@param batch_mus: [batch_size, ranking_size] each row represents the mean predictions for documents associated with the same query
@param batch_vars: [batch_size, ranking_size] each row represents the variance predictions for documents associated with the same query
@param batch_std_labels: [batch_size, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@param kwargs:
@return:
'''
# aiming for directly optimising alpha-nDCG over top-k documents
assert 'presort' in kwargs and kwargs['presort'] is True
presort = kwargs['presort']
batch_cocos = kwargs['batch_cocos'] if 'batch_cocos' in kwargs else None
if 'SuperSoft' == self.opt_id:
if 'aNDCG' == self.metric:
batch_loss = alpha_dcg_as_a_loss(top_k=self.top_k, batch_mus=batch_mus, batch_vars=batch_vars,
batch_cocos=batch_cocos, q_doc_rele_mat=q_doc_rele_mat,
opt_ideal=self.opt_ideal, presort=presort, beta=self.beta)
elif 'nERR-IA' == self.metric:
batch_loss = err_ia_as_a_loss(top_k=self.top_k, batch_mus=batch_mus, batch_vars=batch_vars,
batch_cocos=batch_cocos, q_doc_rele_mat=q_doc_rele_mat,
opt_ideal=self.opt_ideal, presort=presort, max_label=1.0,
device=self.device)
elif self.opt_id == 'LambdaPairCLS':
batch_loss = prob_lambda_loss(opt_id=self.opt_id, batch_mus=batch_mus, batch_vars=batch_vars,
batch_cocos=batch_cocos, q_doc_rele_mat=q_doc_rele_mat,
opt_ideal=self.opt_ideal, presort=presort, beta=self.beta,
device=self.device, norm=self.norm)
elif self.opt_id == 'PairCLS':
batch_loss = prob_lambda_loss(opt_id=self.opt_id, batch_mus=batch_mus, batch_vars=batch_vars,
batch_cocos=batch_cocos, q_doc_rele_mat=q_doc_rele_mat)
elif self.opt_id == "Portfolio":
rets = batch_mus
n_samples, n_assets = rets.shape
covmat_sqrt = batch_cocos
alpha = torch.tensor([0.01], device=self.device)
gamma_sqrt = torch.tensor([0.1], device=self.device)
gamma_sqrt_ = gamma_sqrt.repeat((1, n_assets * n_assets)).view(n_samples, n_assets, n_assets)
alpha_abs = torch.abs(alpha) # it needs to be nonnegative
#print('rets', rets.size())
#print('gamma_sqrt_', gamma_sqrt_.size())
#print('covmat_sqrt', covmat_sqrt.size())
#print('alpha_abs', alpha_abs)
batch_preds = self.cvxpylayer(rets, gamma_sqrt_ * covmat_sqrt, alpha_abs)[0]
#print('batch_preds', batch_preds)
#print('q_doc_rele_mat', q_doc_rele_mat.size())
batch_loss = alphaDCG_as_a_loss(batch_preds=batch_preds, q_doc_rele_mat=q_doc_rele_mat,
rt=10, top_k=10, device=self.device)
else:
raise NotImplementedError
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
return batch_loss |
In the context of SRD, batch_size is commonly 1.
@param batch_mus: [batch_size, ranking_size] each row represents the mean predictions for documents associated with the same query
@param batch_vars: [batch_size, ranking_size] each row represents the variance predictions for documents associated with the same query
@param batch_std_labels: [batch_size, ranking_size] each row represents the standard relevance grades for documents associated with the same query
@param kwargs:
@return:
| div_custom_loss_function | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | MIT |
def to_para_string(self, log=False, given_para_dict=None):
"""
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
"""
# using specified para-dict or inner para-dict
probrank_para_dict = given_para_dict if given_para_dict is not None else self.probrank_para_dict
s1 = ':' if log else '_'
K, cluster, opt_id = probrank_para_dict['K'], probrank_para_dict['cluster'], probrank_para_dict['opt_id']
sort_id, limit_delta = probrank_para_dict['sort_id'], probrank_para_dict['limit_delta']
if cluster:
probrank_paras_str = s1.join([str(K), 'CS', opt_id])
else:
probrank_paras_str = s1.join([str(K), opt_id])
probrank_paras_str = s1.join([probrank_paras_str, sort_id])
if limit_delta is not None:
probrank_paras_str = s1.join([probrank_paras_str, '{:,g}'.format(limit_delta)])
if 'LambdaPairCLS' == opt_id:
norm = probrank_para_dict['norm']
probrank_paras_str = s1.join([probrank_paras_str, 'Norm']) if norm else probrank_paras_str
opt_ideal = probrank_para_dict['opt_ideal']
probrank_paras_str = s1.join([probrank_paras_str, 'OptIdeal']) if opt_ideal else probrank_paras_str
elif 'SuperSoft' == opt_id:
opt_ideal = probrank_para_dict['opt_ideal']
probrank_paras_str = s1.join([probrank_paras_str, 'OptIdeal']) if opt_ideal else probrank_paras_str
top_k = probrank_para_dict['top_k']
if top_k is None:
probrank_paras_str = s1.join([probrank_paras_str, 'Full'])
else:
probrank_paras_str = s1.join([probrank_paras_str, str(top_k)])
metric = probrank_para_dict['metric']
s1.join([probrank_paras_str, metric])
return probrank_paras_str |
String identifier of parameters
:param log:
:param given_para_dict: a given dict, which is used for maximum setting w.r.t. grid-search
| to_para_string | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | MIT |
def grid_search(self):
""" Iterator of parameter settings for MiDeExpectedUtility """
if self.use_json:
choice_topk = self.json_dict['top_k']
choice_opt_id = self.json_dict['opt_id']
choice_K = self.json_dict['K']
choice_cluster = self.json_dict['cluster']
choice_sort_id = self.json_dict['sort_id']
choice_limit_delta = self.json_dict['limit_delta']
choice_opt_ideal = self.json_dict['opt_ideal']
choice_metric = self.json_dict['metric']
choice_norm = self.json_dict['norm']
else:
choice_topk = [10] if self.debug else [10]
choice_opt_id = ['SuperSoft'] if self.debug else ['SuperSoft', 'PairCLS', 'LambdaPairCLS']
choice_K = [5]
choice_cluster = [False]
choice_sort_id = ['ExpRele']
choice_limit_delta = [None, 0.1]
choice_opt_ideal = [True] if self.debug else [True]
choice_metric = ['aNDCG'] # 'aNDCG', 'nERR-IA'
choice_norm = [True] if self.debug else [True]
for K, cluster, opt_id, sort_id, limit_delta in \
product(choice_K, choice_cluster, choice_opt_id, choice_sort_id, choice_limit_delta):
self.probrank_para_dict = dict(model_id=self.model_id, K=K, cluster=cluster, opt_id=opt_id, sort_id=sort_id,
limit_delta=limit_delta)
if opt_id == 'PairCLS':
yield self.probrank_para_dict
elif opt_id == 'LambdaPairCLS': # top-k is not needed, due to the requirement of pairwise swapping
for opt_ideal, norm in product(choice_opt_ideal, choice_norm):
inner_para_dict = dict()
inner_para_dict['opt_ideal'] = opt_ideal
inner_para_dict['norm'] = norm
self.probrank_para_dict.update(inner_para_dict)
yield self.probrank_para_dict
elif opt_id == 'SuperSoft':
for top_k, metric, opt_ideal in product(choice_topk, choice_metric, choice_opt_ideal):
inner_para_dict = dict()
inner_para_dict['top_k'] = top_k
inner_para_dict['metric'] = metric
inner_para_dict['opt_ideal'] = opt_ideal
self.probrank_para_dict.update(inner_para_dict)
yield self.probrank_para_dict
else:
raise NotImplementedError | Iterator of parameter settings for MiDeExpectedUtility | grid_search | python | wildltr/ptranking | ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/score_and_sort/div_prob_ranker.py | MIT |
def get_div_data_meta(data_id=None):
""" Get the meta-information corresponding to the specified dataset """
if data_id in TREC_DIV:
fold_num = 5
max_label = 1
num_features = 100
else:
raise NotImplementedError
data_meta = dict(num_features=num_features, fold_num=fold_num, max_label=max_label)
return data_meta | Get the meta-information corresponding to the specified dataset | get_div_data_meta | python | wildltr/ptranking | ptranking/ltr_diversification/util/div_data.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/div_data.py | MIT |
def deploy_1st_stage_div_discriminating(discriminator, rerank_k, q_repr, doc_reprs, gpu, device):
''' Perform 1st-stage ranking as a discriminating process. '''
sys_rele_preds = discriminator.div_predict(q_repr, doc_reprs) # [1, ranking_size]
if gpu: sys_rele_preds = sys_rele_preds.cpu()
_, sys_sorted_inds = torch.sort(sys_rele_preds, dim=1, descending=True) # [1, ranking_size]
batch_top_k_sys_sorted_inds = sys_sorted_inds[:, 0:rerank_k]
return torch.squeeze(batch_top_k_sys_sorted_inds) | Perform 1st-stage ranking as a discriminating process. | deploy_1st_stage_div_discriminating | python | wildltr/ptranking | ptranking/ltr_diversification/util/div_data.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/div_data.py | MIT |
def get_pairwise_comp_probs(batch_preds, std_q_doc_rele_mat, sigma=None):
'''
Get the predicted and standard probabilities p_ij which denotes d_i beats d_j, the subtopic labels are aggregated.
@param batch_preds:
@param batch_std_labels:
@param sigma:
@return:
'''
# standard pairwise differences per-subtopic, i.e., S_{ij}
subtopic_std_diffs = torch.unsqueeze(std_q_doc_rele_mat, dim=2) - torch.unsqueeze(std_q_doc_rele_mat, dim=1)
# ensuring S_{ij} \in {-1, 0, 1}
subtopic_std_Sij = torch.clamp(subtopic_std_diffs, min=-1.0, max=1.0)
subtopic_std_p_ij = 0.5 * (1.0 + subtopic_std_Sij)
batch_std_p_ij = torch.mean(subtopic_std_p_ij, dim=0, keepdim=True)
# computing pairwise differences, i.e., s_i - s_j
batch_s_ij = torch.unsqueeze(batch_preds, dim=2) - torch.unsqueeze(batch_preds, dim=1)
batch_p_ij = torch.sigmoid(sigma * batch_s_ij)
return batch_p_ij, batch_std_p_ij |
Get the predicted and standard probabilities p_ij which denotes d_i beats d_j, the subtopic labels are aggregated.
@param batch_preds:
@param batch_std_labels:
@param sigma:
@return:
| get_pairwise_comp_probs | python | wildltr/ptranking | ptranking/ltr_diversification/util/div_lambda_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/div_lambda_utils.py | MIT |
def get_prob_pairwise_comp_probs(batch_pairsub_mus, batch_pairsub_vars, q_doc_rele_mat):
'''
The difference of two normal random variables is another normal random variable.
pairsub_mu & pairsub_var denote the corresponding mean & variance of the difference of two normal random variables
p_ij denotes the probability that d_i beats d_j
@param batch_pairsub_mus:
@param batch_pairsub_vars:
@param batch_std_labels:
@return:
'''
subtopic_std_diffs = torch.unsqueeze(q_doc_rele_mat, dim=2) - torch.unsqueeze(q_doc_rele_mat, dim=1)
subtopic_std_Sij = torch.clamp(subtopic_std_diffs, min=-1.0, max=1.0) # ensuring S_{ij} \in {-1, 0, 1}
subtopic_std_p_ij = 0.5 * (1.0 + subtopic_std_Sij)
batch_std_p_ij = torch.mean(subtopic_std_p_ij, dim=0, keepdim=True)
batch_p_ij = 1.0 - 0.5 * torch.erfc(batch_pairsub_mus / torch.sqrt(2 * batch_pairsub_vars))
return batch_p_ij, batch_std_p_ij |
The difference of two normal random variables is another normal random variable.
pairsub_mu & pairsub_var denote the corresponding mean & variance of the difference of two normal random variables
p_ij denotes the probability that d_i beats d_j
@param batch_pairsub_mus:
@param batch_pairsub_vars:
@param batch_std_labels:
@return:
| get_prob_pairwise_comp_probs | python | wildltr/ptranking | ptranking/ltr_diversification/util/div_lambda_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/div_lambda_utils.py | MIT |
def get_diff_normal(batch_mus, batch_vars, batch_cocos=None):
'''
The difference of two normal random variables is another normal random variable. In particular, we consider two
cases: (1) correlated (2) independent.
@param batch_mus: the predicted mean
@param batch_vars: the predicted variance
@param batch_cocos: the predicted correlation coefficient in [-1, 1], which is formulated as the cosine-similarity of corresponding vectors.
@return: the mean, variance of the result normal variable.
'''
# mu_i - mu_j
batch_pairsub_mus = torch.unsqueeze(batch_mus, dim=2) - torch.unsqueeze(batch_mus, dim=1)
# variance w.r.t. S_i - S_j, which is equal to: (1)sigma^2_i + sigma^2_j - \rou_ij*sigma_i*sigma_j (2) sigma^2_i + sigma^2_j
if batch_cocos is not None:
batch_std_vars = torch.pow(batch_vars, .5)
batch_pairsub_vars = torch.unsqueeze(batch_vars, dim=2) + torch.unsqueeze(batch_vars, dim=1) - \
batch_cocos * torch.bmm(torch.unsqueeze(batch_std_vars, dim=2),
torch.unsqueeze(batch_std_vars, dim=1))
else:
batch_pairsub_vars = torch.unsqueeze(batch_vars, dim=2) + torch.unsqueeze(batch_vars, dim=1)
return batch_pairsub_mus, batch_pairsub_vars |
The difference of two normal random variables is another normal random variable. In particular, we consider two
cases: (1) correlated (2) independent.
@param batch_mus: the predicted mean
@param batch_vars: the predicted variance
@param batch_cocos: the predicted correlation coefficient in [-1, 1], which is formulated as the cosine-similarity of corresponding vectors.
@return: the mean, variance of the result normal variable.
| get_diff_normal | python | wildltr/ptranking | ptranking/ltr_diversification/util/prob_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/prob_utils.py | MIT |
def get_diff_normal_resort(batch_mus, batch_vars, batch_cocos=None, batch_resort_inds=None):
'''
Compared with get_diff_normal(), resort is conducted first.
'''
batch_resorted_mus = torch.gather(batch_mus, dim=1, index=batch_resort_inds)
batch_resorted_vars = torch.gather(batch_vars, dim=1, index=batch_resort_inds)
if batch_cocos is not None:
num_docs = batch_cocos.size(1)
batch_cocos_1 = torch.gather(batch_cocos, dim=2,
index=torch.unsqueeze(batch_resort_inds, dim=1).expand(-1, num_docs, -1))
batch_resorted_cocos = torch.gather(batch_cocos_1, dim=1,
index=torch.unsqueeze(batch_resort_inds, dim=2).expand(-1, -1, num_docs))
else:
batch_resorted_cocos = None
return get_diff_normal(batch_mus=batch_resorted_mus, batch_vars=batch_resorted_vars,
batch_cocos=batch_resorted_cocos) |
Compared with get_diff_normal(), resort is conducted first.
| get_diff_normal_resort | python | wildltr/ptranking | ptranking/ltr_diversification/util/prob_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/prob_utils.py | MIT |
def neg_log_likelihood(batch_pairsub_mus, batch_pairsub_vars, top_k=None, device=None):
'''
Compute the negative log-likelihood w.r.t. rankings, where the likelihood is formulated as the joint probability of
consistent pairwise comparisons.
@param batch_pairsub_mus: mean w.r.t. a pair comparison
@param batch_pairsub_vars: variance w.r.t. a pair comparison
@return:
'''
batch_full_erfc = torch.erfc(batch_pairsub_mus / torch.sqrt(2 * batch_pairsub_vars))
if top_k is None:
# use the triu-part of pairwise probabilities w.r.t. d_i > d_j, and using the trick: log(1.0) is zero
batch_p_ij_triu = 1.0 - 0.5 * torch.triu(batch_full_erfc, diagonal=1)
# batch_neg_log_probs = - torch.log(triu_probs) # facing the issue of nan due to overflow
batch_neg_log_probs = F.binary_cross_entropy(input=batch_p_ij_triu, reduction='none',
target=torch.ones_like(batch_p_ij_triu, device=device))
else: # the part to keep will be 1, otherwise 0
keep_mask = torch.triu(torch.ones_like(batch_pairsub_vars), diagonal=1)
keep_mask[:, top_k:, :] = 0.0 # without considering pairs beneath position-k
batch_p_ij_triu_top_k = 1 - batch_full_erfc * keep_mask * 0.5
# batch_neg_log_probs = - torch.log(1 - batch_full_erfc * keep_mask * 0.5) # using the trick: log(1.0) is zero
batch_neg_log_probs = F.binary_cross_entropy(input=batch_p_ij_triu_top_k, reduction='none',
target=torch.ones_like(batch_p_ij_triu_top_k, device=device))
return batch_neg_log_probs # with a shape of [batch_size, ranking_size, ranking_size] |
Compute the negative log-likelihood w.r.t. rankings, where the likelihood is formulated as the joint probability of
consistent pairwise comparisons.
@param batch_pairsub_mus: mean w.r.t. a pair comparison
@param batch_pairsub_vars: variance w.r.t. a pair comparison
@return:
| neg_log_likelihood | python | wildltr/ptranking | ptranking/ltr_diversification/util/prob_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/prob_utils.py | MIT |
def neg_log_likelihood_explicit(batch_pairsub_mus, std_var, top_k=None, device=None):
'''
Compute the negative log-likelihood w.r.t. rankings, where the likelihood is formulated as the joint probability of
consistent pairwise comparisons.
@param batch_pairsub_mus: mean w.r.t. a pair comparison
@param batch_pairsub_vars: variance w.r.t. a pair comparison
@return:
'''
batch_full_erfc = torch.erfc(batch_pairsub_mus / torch.sqrt(torch.tensor([2 * std_var ** 2], device=device)))
if top_k is None:
# use the triu-part of pairwise probabilities w.r.t. d_i > d_j, and using the trick: log(1.0) is zero
batch_p_ij_triu = 1.0 - 0.5 * torch.triu(batch_full_erfc, diagonal=1)
# batch_neg_log_probs = - torch.log(triu_probs) # facing the issue of nan due to overflow
batch_neg_log_probs = F.binary_cross_entropy(input=batch_p_ij_triu, reduction='none',
target=torch.ones_like(batch_p_ij_triu, device=device))
else: # the part to keep will be 1, otherwise 0
keep_mask = torch.triu(torch.ones_like(batch_pairsub_mus), diagonal=1)
keep_mask[:, top_k:, :] = 0.0 # without considering pairs beneath position-k
batch_p_ij_triu_top_k = 1 - batch_full_erfc * keep_mask * 0.5
# batch_neg_log_probs = - torch.log(1 - batch_full_erfc * keep_mask * 0.5) # using the trick: log(1.0) is zero
batch_neg_log_probs = F.binary_cross_entropy(input=batch_p_ij_triu_top_k, reduction='none',
target=torch.ones_like(batch_p_ij_triu_top_k, device=device))
return batch_neg_log_probs # with a shape of [batch_size, ranking_size, ranking_size] |
Compute the negative log-likelihood w.r.t. rankings, where the likelihood is formulated as the joint probability of
consistent pairwise comparisons.
@param batch_pairsub_mus: mean w.r.t. a pair comparison
@param batch_pairsub_vars: variance w.r.t. a pair comparison
@return:
| neg_log_likelihood_explicit | python | wildltr/ptranking | ptranking/ltr_diversification/util/prob_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/prob_utils.py | MIT |
def batch_cosine_similarity(x1, x2=None, eps=1e-8):
'''
:param x1: [batch_size, num_docs, num_features]
:param x2: the same shape or None
:param eps:
:return:
'''
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=2, keepdim=True)
#print('w1', w1.size(), '\n', w1)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=2, keepdim=True)
batch_numerator = torch.bmm(x1, x2.permute(0, 2, 1))
batch_denominator = torch.bmm(w1, w2.permute(0, 2, 1)).clamp(min=eps)
return batch_numerator/batch_denominator |
:param x1: [batch_size, num_docs, num_features]
:param x2: the same shape or None
:param eps:
:return:
| batch_cosine_similarity | python | wildltr/ptranking | ptranking/ltr_diversification/util/sim_utils.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_diversification/util/sim_utils.py | MIT |
def check_consistency(self, data_dict, eval_dict):
"""
Check whether the settings are reasonable in the context of gbdt learning-to-rank
"""
''' Part-1: data loading '''
if data_dict['data_id'] == 'Istella':
assert eval_dict['do_validation'] is not True # since there is no validation data
if data_dict['data_id'] in MSLETOR_SEMI:
assert data_dict['train_presort'] is not True # due to the non-labeled documents
if data_dict['binary_rele']: # for unsupervised dataset, it is required for binarization due to '-1' labels
assert data_dict['unknown_as_zero']
else:
assert data_dict['unknown_as_zero'] is not True # since there is no non-labeled documents
if data_dict['data_id'] in MSLETOR_LIST: # for which the standard ltr_adhoc of each query is unique
assert 1 == data_dict['train_rough_batch_size']
if data_dict['scale_data']:
scaler_level = data_dict['scaler_level'] if 'scaler_level' in data_dict else None
assert not scaler_level == 'DATASET' # not supported setting
assert data_dict['validation_presort'] # Rule of thumb setting for adhoc learning-to-rank
assert data_dict['test_presort'] # Rule of thumb setting for adhoc learning-to-rank
assert 1 == data_dict['validation_rough_batch_size'] # Rule of thumb setting for adhoc learning-to-rank
assert 1 == data_dict['test_rough_batch_size'] # Rule of thumb setting for adhoc learning-to-rank
''' Part-2: evaluation setting '''
if eval_dict['mask_label']: # True is aimed to use supervised data to mimic semi-supervised data by masking
assert not data_dict['data_id'] in MSLETOR_SEMI |
Check whether the settings are reasonable in the context of gbdt learning-to-rank
| check_consistency | python | wildltr/ptranking | ptranking/ltr_tree/eval/ltr_tree.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py | MIT |
def setup_output(self, data_dict=None, eval_dict=None):
"""
Determine the output.
:param data_dict:
:param eval_dict:
:return:
"""
dir_output, grid_search, mask_label = eval_dict['dir_output'], eval_dict['grid_search'],\
eval_dict['mask_label']
#print(' '.join(['Start {} on {} >>>'.format(self.model_parameter.model_id, data_id)]))
if grid_search:
output_root = dir_output + '_'.join(['grid', self.model_parameter.get_identifier()]) + '/'
else:
output_root = dir_output + self.model_parameter.get_identifier() + '/'
data_eval_str = '_'.join([self.data_setting.to_data_setting_string(), self.eval_setting.to_eval_setting_string()])
if mask_label:
data_eval_str = '_'.join([data_eval_str, 'MaskLabel', 'Ratio', '{:,g}'.format(eval_dict['mask_ratio'])])
if data_dict['scale_data']:
if data_dict['scaler_level'] == 'QUERY':
data_eval_str = '_'.join([data_eval_str, 'QS', data_dict['scaler_id']])
else:
data_eval_str = '_'.join([data_eval_str, 'DS', data_dict['scaler_id']])
output_root = output_root + data_eval_str + '/' + self.model_parameter.to_para_string() + '/' # run-specific outputs
return output_root |
Determine the output.
:param data_dict:
:param eval_dict:
:return:
| setup_output | python | wildltr/ptranking | ptranking/ltr_tree/eval/ltr_tree.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py | MIT |
def result_to_str(self, list_scores=None, list_cutoffs=None, split_str=', ', metric_str=None):
"""
Convert metric results to a string
:param list_scores:
:param list_cutoffs:
:param split_str:
:param metric_str:
:return:
"""
list_str = []
for i in range(len(list_scores)):
list_str.append('{}@{}:{:.4f}'.format(metric_str, list_cutoffs[i], list_scores[i]))
return split_str.join(list_str) |
Convert metric results to a string
:param list_scores:
:param list_cutoffs:
:param split_str:
:param metric_str:
:return:
| result_to_str | python | wildltr/ptranking | ptranking/ltr_tree/eval/ltr_tree.py | https://github.com/wildltr/ptranking/blob/master/ptranking/ltr_tree/eval/ltr_tree.py | MIT |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.