temp_backup / tpt /tpt_utils.py
benzlxs's picture
Upload folder using huggingface_hub
9e56db5 verified
import torch
import numpy as np
def select_confident_samples(logits, top):
batch_entropy = -(logits.softmax(1) * logits.log_softmax(1)).sum(1)
idx = torch.argsort(batch_entropy, descending=False)[:int(batch_entropy.size()[0] * top)]
return logits[idx], idx
def avg_entropy(outputs):
logits = outputs - outputs.logsumexp(dim=-1, keepdim=True) # logits = outputs.log_softmax(dim=1) [N, 1000]
avg_logits = logits.logsumexp(dim=0) - np.log(logits.shape[0]) # avg_logits = logits.mean(0) [1, 1000]
min_real = torch.finfo(avg_logits.dtype).min
avg_logits = torch.clamp(avg_logits, min=min_real)
return -(avg_logits * torch.exp(avg_logits)).sum(dim=-1)
def test_time_tuning(model, inputs, optimizer, scaler, args):
if args.cocoop:
image_feature, pgen_ctx = inputs # FIXME image_feature.shape is torch.Size([1, 1024])
# pgen_ctx.requires_grad = True
pgen_ctx = pgen_ctx.detach().to(torch.float32).requires_grad_(True)
optimizer = torch.optim.AdamW([pgen_ctx], args.lr)
selected_idx = None
for j in range(args.tta_steps):
with torch.cuda.amp.autocast():
if args.cocoop:
output = model((image_feature, pgen_ctx))
else:
output = model(inputs)
if selected_idx is not None:
output = output[selected_idx]
else:
output, selected_idx = select_confident_samples(output, args.selection_p)
loss = avg_entropy(output)
optimizer.zero_grad()
# compute gradient and do SGD step
scaler.scale(loss).backward()
# Unscales the gradients of optimizer's assigned params in-place
scaler.step(optimizer)
scaler.update()
if args.cocoop:
return pgen_ctx
return