| | import torch |
| | import numpy as np |
| |
|
| | def select_confident_samples(logits, top): |
| | batch_entropy = -(logits.softmax(1) * logits.log_softmax(1)).sum(1) |
| | idx = torch.argsort(batch_entropy, descending=False)[:int(batch_entropy.size()[0] * top)] |
| | return logits[idx], idx |
| |
|
| | def avg_entropy(outputs): |
| | logits = outputs - outputs.logsumexp(dim=-1, keepdim=True) |
| | avg_logits = logits.logsumexp(dim=0) - np.log(logits.shape[0]) |
| | min_real = torch.finfo(avg_logits.dtype).min |
| | avg_logits = torch.clamp(avg_logits, min=min_real) |
| | return -(avg_logits * torch.exp(avg_logits)).sum(dim=-1) |
| |
|
| | def test_time_tuning(model, inputs, optimizer, scaler, args): |
| | if args.cocoop: |
| | image_feature, pgen_ctx = inputs |
| | |
| | pgen_ctx = pgen_ctx.detach().to(torch.float32).requires_grad_(True) |
| | optimizer = torch.optim.AdamW([pgen_ctx], args.lr) |
| |
|
| | selected_idx = None |
| | for j in range(args.tta_steps): |
| | with torch.cuda.amp.autocast(): |
| | if args.cocoop: |
| | output = model((image_feature, pgen_ctx)) |
| | else: |
| | output = model(inputs) |
| |
|
| | if selected_idx is not None: |
| | output = output[selected_idx] |
| | else: |
| | output, selected_idx = select_confident_samples(output, args.selection_p) |
| |
|
| | loss = avg_entropy(output) |
| |
|
| | optimizer.zero_grad() |
| | |
| | scaler.scale(loss).backward() |
| | |
| | scaler.step(optimizer) |
| | scaler.update() |
| | if args.cocoop: |
| | return pgen_ctx |
| |
|
| | return |
| |
|