Datasets:

ArXiv:
FRGCF / util /loss_torch.py
kouke123's picture
Upload loss_torch.py
88bdfb5 verified
import torch
import torch.nn.functional as F
def bpr_loss(user_emb, pos_item_emb, neg_item_emb):
pos_score = torch.mul(user_emb, pos_item_emb).sum(dim=1)
neg_score = torch.mul(user_emb, neg_item_emb).sum(dim=1)
loss = -torch.log(10e-8 + torch.sigmoid(pos_score - neg_score))
return torch.mean(loss)
def l2_reg_loss(reg, *args):
emb_loss = 0
for emb in args:
emb_loss += torch.norm(emb, p=2)
return emb_loss * reg
def batch_softmax_loss(user_emb, item_emb, temperature):
user_emb, item_emb = F.normalize(user_emb, dim=1), F.normalize(item_emb, dim=1)
pos_score = (user_emb * item_emb).sum(dim=-1)
pos_score = torch.exp(pos_score / temperature)
ttl_score = torch.matmul(user_emb, item_emb.transpose(0, 1))
ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)
loss = -torch.log(pos_score / ttl_score)
return torch.mean(loss)
def InfoNCE(view1, view2, temperature):
view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1)
pos_score = (view1 * view2).sum(dim=-1)
pos_score = torch.exp(pos_score / temperature)
ttl_score = torch.matmul(view1, view2.transpose(0, 1))
ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)
cl_loss = -torch.log(pos_score / ttl_score)
return torch.mean(cl_loss)
def InfoNCE_FRGCF(view1, view2, temperature):
"""
FRGCF practical version:
- sim(·) = dot product (paper-aligned)
- batch-wise negatives (memory-friendly)
- mean reduction (stable training)
"""
# positive scores: matched pairs
pos_score = torch.sum(view1 * view2, dim=-1) # [B]
pos_score = torch.exp(pos_score / temperature) # [B]
# all pair scores within current batch
all_score = torch.matmul(view1, view2.transpose(0, 1)) # [B, B]
all_score = torch.exp(all_score / temperature).sum(dim=1) # [B]
cl_loss = -torch.log(pos_score / (all_score + 1e-12)) # [B]
return torch.mean(cl_loss)
def kl_divergence(p_logit, q_logit):
p = F.softmax(p_logit, dim=-1)
kl = torch.sum(p * (F.log_softmax(p_logit, dim=-1) - F.log_softmax(q_logit, dim=-1)), 1)
return torch.mean(kl)
def js_divergence(p_logit, q_logit):
p = F.softmax(p_logit, dim=-1)
q = F.softmax(q_logit, dim=-1)
kl_p = torch.sum(p * (F.log_softmax(p_logit, dim=-1) - F.log_softmax(q_logit, dim=-1)), 1)
kl_q = torch.sum(q * (F.log_softmax(q_logit, dim=-1) - F.log_softmax(p_logit, dim=-1)), 1)
return torch.mean(kl_p+kl_q)