| """DINOv3 ViT-H/16+ Tagger β Fully Standalone Inference Script |
| |
| Zero dependency on transformers, trainer code, or any internal module. |
| Only requires: torch, torchvision, safetensors, Pillow, requests. |
| |
| pip install torch torchvision safetensors Pillow requests |
| |
| The DINOv3 ViT-H/16+ architecture is implemented directly here, with weights |
| loaded from a .safetensors checkpoint. The state-dict key names match the |
| HuggingFace transformers layout exactly so checkpoints are interchangeable. |
| |
| Usage |
| ----- |
| # Single image, top-30 tags: |
| python inference_tagger_standalone.py \ |
| --checkpoint tagger_checkpoints/2026-03-28_22-57-47.safetensors \ |
| --vocab tagger_vocab.json \ |
| --images photo.jpg \ |
| --topk 30 |
| |
| # URL input: |
| python inference_tagger_standalone.py \ |
| --checkpoint tagger_checkpoints/2026-03-28_22-57-47.safetensors \ |
| --vocab tagger_vocab.json \ |
| --images https://example.com/photo.jpg |
| |
| # Threshold instead of top-k: |
| python inference_tagger_standalone.py ... --threshold 0.4 |
| |
| # Pipe-friendly comma-separated tags (one line per image): |
| python inference_tagger_standalone.py ... --format tags |
| |
| # JSON output: |
| python inference_tagger_standalone.py ... --format json |
| |
| Output formats (--format) |
| ------------------------- |
| pretty (default) β human-readable table with scores |
| tags β comma-separated tag string, one line per image |
| json β JSON array of {file, tags: [{tag, score}]} objects |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import math |
| import sys |
| from functools import lru_cache |
| from io import BytesIO |
| from pathlib import Path |
|
|
| import requests |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torchvision.transforms.v2 as v2 |
| from PIL import Image |
| from safetensors.torch import load_file |
|
|
|
|
| |
| |
| |
| |
|
|
| D_MODEL = 1280 |
| N_HEADS = 20 |
| HEAD_DIM = D_MODEL // N_HEADS |
| N_LAYERS = 32 |
| D_FFN = 5120 |
| N_REGISTERS = 4 |
| PATCH_SIZE = 16 |
| ROPE_THETA = 100.0 |
| ROPE_RESCALE = 2.0 |
| LN_EPS = 1e-5 |
| LAYERSCALE = 1.0 |
|
|
|
|
| |
| |
| |
|
|
| @lru_cache(maxsize=32) |
| def _patch_coords_cached(h: int, w: int, device_str: str) -> torch.Tensor: |
| """Normalised [-1,+1] patch-centre coordinates (float32, cached).""" |
| device = torch.device(device_str) |
| cy = torch.arange(0.5, h, dtype=torch.float32, device=device) / h |
| cx = torch.arange(0.5, w, dtype=torch.float32, device=device) / w |
| coords = torch.stack(torch.meshgrid(cy, cx, indexing="ij"), dim=-1).flatten(0, 1) |
| coords = 2.0 * coords - 1.0 |
| coords = coords * ROPE_RESCALE |
| return coords |
|
|
|
|
| def _build_rope(h_patches: int, w_patches: int, |
| dtype: torch.dtype, device: torch.device): |
| """Return (cos, sin) of shape [1, 1, h*w, HEAD_DIM] for broadcasting.""" |
| coords = _patch_coords_cached(h_patches, w_patches, str(device)) |
| inv_freq = 1.0 / (ROPE_THETA ** torch.arange( |
| 0, 1, 4 / HEAD_DIM, dtype=torch.float32, device=device)) |
| angles = 2 * math.pi * coords[:, :, None] * inv_freq[None, None, :] |
| angles = angles.flatten(1, 2).tile(2) |
| cos = torch.cos(angles).to(dtype).unsqueeze(0).unsqueeze(0) |
| sin = torch.sin(angles).to(dtype).unsqueeze(0).unsqueeze(0) |
| return cos, sin |
|
|
|
|
| def _rotate_half(x: torch.Tensor) -> torch.Tensor: |
| h = x.shape[-1] // 2 |
| return torch.cat((-x[..., h:], x[..., :h]), dim=-1) |
|
|
|
|
| def _apply_rope(q: torch.Tensor, k: torch.Tensor, |
| cos: torch.Tensor, sin: torch.Tensor): |
| """Apply RoPE only to patch tokens (skip CLS + register prefix).""" |
| n_pre = 1 + N_REGISTERS |
| q_pre, q_pat = q[..., :n_pre, :], q[..., n_pre:, :] |
| k_pre, k_pat = k[..., :n_pre, :], k[..., n_pre:, :] |
| q_pat = q_pat * cos + _rotate_half(q_pat) * sin |
| k_pat = k_pat * cos + _rotate_half(k_pat) * sin |
| return torch.cat([q_pre, q_pat], dim=-2), torch.cat([k_pre, k_pat], dim=-2) |
|
|
|
|
| |
| |
| |
|
|
| class _Attention(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.q_proj = nn.Linear(D_MODEL, D_MODEL, bias=True) |
| self.k_proj = nn.Linear(D_MODEL, D_MODEL, bias=False) |
| self.v_proj = nn.Linear(D_MODEL, D_MODEL, bias=True) |
| self.o_proj = nn.Linear(D_MODEL, D_MODEL, bias=True) |
|
|
| def forward(self, x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: |
| B, S, _ = x.shape |
| q = self.q_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2) |
| k = self.k_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2) |
| v = self.v_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2) |
| q, k = _apply_rope(q, k, cos, sin) |
| out = F.scaled_dot_product_attention(q, k, v, scale=HEAD_DIM ** -0.5) |
| return self.o_proj(out.transpose(1, 2).reshape(B, S, D_MODEL)) |
|
|
|
|
| class _GatedMLP(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.gate_proj = nn.Linear(D_MODEL, D_FFN, bias=True) |
| self.up_proj = nn.Linear(D_MODEL, D_FFN, bias=True) |
| self.down_proj = nn.Linear(D_FFN, D_MODEL, bias=True) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x)) |
|
|
|
|
| class _Block(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.norm1 = nn.LayerNorm(D_MODEL, eps=LN_EPS) |
| self.attention = _Attention() |
| self.layer_scale1 = nn.Parameter(torch.full((D_MODEL,), LAYERSCALE)) |
| self.norm2 = nn.LayerNorm(D_MODEL, eps=LN_EPS) |
| self.mlp = _GatedMLP() |
| self.layer_scale2 = nn.Parameter(torch.full((D_MODEL,), LAYERSCALE)) |
|
|
| def forward(self, x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: |
| x = x + self.attention(self.norm1(x), cos, sin) * self.layer_scale1 |
| x = x + self.mlp(self.norm2(x)) * self.layer_scale2 |
| return x |
|
|
|
|
| |
| |
| |
|
|
| class DINOv3ViTH(nn.Module): |
| """DINOv3 ViT-H/16+ backbone. |
| |
| Accepts any H, W that are multiples of 16. |
| Returns last_hidden_state [B, 1+R+P, D_MODEL]. |
| Token layout: [CLS, reg_0..reg_3, patch_0..patch_N]. |
| |
| State-dict keys are intentionally identical to the HuggingFace |
| transformers layout so .safetensors checkpoints load without remapping. |
| """ |
|
|
| def __init__(self): |
| super().__init__() |
| |
| self.embeddings = _Embeddings() |
| self.layer = nn.ModuleList([_Block() for _ in range(N_LAYERS)]) |
| self.norm = nn.LayerNorm(D_MODEL, eps=LN_EPS) |
|
|
| def _load_from_state_dict(self, state_dict, prefix, local_metadata, |
| strict, missing_keys, unexpected_keys, error_msgs): |
| |
| |
| |
| for k in list(state_dict.keys()): |
| if k.startswith(prefix) and ".layer_scale" in k and k.endswith(".lambda1"): |
| new_k = k[:-len(".lambda1")] |
| state_dict[new_k] = state_dict.pop(k) |
| |
| for k in list(state_dict.keys()): |
| if k.startswith(prefix) and "rope_embeddings" in k: |
| state_dict.pop(k) |
| super()._load_from_state_dict( |
| state_dict, prefix, local_metadata, strict, |
| missing_keys, unexpected_keys, error_msgs) |
|
|
| def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: |
| B, _, H, W = pixel_values.shape |
| x = self.embeddings(pixel_values) |
|
|
| h_p, w_p = H // PATCH_SIZE, W // PATCH_SIZE |
| cos, sin = _build_rope(h_p, w_p, x.dtype, pixel_values.device) |
|
|
| for block in self.layer: |
| x = block(x, cos, sin) |
|
|
| return self.norm(x) |
|
|
|
|
| class _Embeddings(nn.Module): |
| """Patch + CLS + register token embeddings. |
| Key names match HF: embeddings.cls_token, embeddings.register_tokens, |
| embeddings.patch_embeddings.{weight,bias}. |
| """ |
|
|
| def __init__(self): |
| super().__init__() |
| self.cls_token = nn.Parameter(torch.empty(1, 1, D_MODEL)) |
| self.mask_token = nn.Parameter(torch.zeros(1, 1, D_MODEL)) |
| self.register_tokens = nn.Parameter(torch.empty(1, N_REGISTERS, D_MODEL)) |
| self.patch_embeddings = nn.Conv2d(3, D_MODEL, kernel_size=PATCH_SIZE, stride=PATCH_SIZE) |
|
|
| def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: |
| B = pixel_values.shape[0] |
| dtype = self.patch_embeddings.weight.dtype |
| patches = self.patch_embeddings(pixel_values.to(dtype)).flatten(2).transpose(1, 2) |
| cls = self.cls_token.expand(B, -1, -1) |
| regs = self.register_tokens.expand(B, -1, -1) |
| return torch.cat([cls, regs, patches], dim=1) |
|
|
|
|
| |
| |
| |
|
|
| class DINOv3Tagger(nn.Module): |
| """DINOv3 ViT-H/16+ backbone + linear projection head. |
| |
| features = concat(CLS, reg_0..reg_3) β [B, (1+R)*D] |
| projection: Linear β [B, num_tags] |
| """ |
|
|
| def __init__(self, num_tags: int, projection_bias: bool = False): |
| super().__init__() |
| self.backbone = DINOv3ViTH() |
| self.projection = nn.Linear((1 + N_REGISTERS) * D_MODEL, num_tags, bias=projection_bias) |
|
|
| def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: |
| hidden = self.backbone(pixel_values) |
| cls = hidden[:, 0, :] |
| regs = hidden[:, 1: 1 + N_REGISTERS, :].flatten(1) |
| features = torch.cat([cls, regs], dim=-1) |
| return self.projection(features.float()) |
|
|
|
|
| |
| |
| |
|
|
| _IMAGENET_MEAN = [0.485, 0.456, 0.406] |
| _IMAGENET_STD = [0.229, 0.224, 0.225] |
|
|
|
|
| def _snap(x: int, m: int) -> int: |
| return max(m, (x // m) * m) |
|
|
|
|
| def _open_image(source) -> Image.Image: |
| s = str(source) |
| if s.startswith("http://") or s.startswith("https://"): |
| r = requests.get(s, timeout=30) |
| r.raise_for_status() |
| return Image.open(BytesIO(r.content)).convert("RGB") |
| return Image.open(source).convert("RGB") |
|
|
|
|
| def preprocess_image(source, max_size: int = 1024) -> torch.Tensor: |
| """Load and preprocess an image β [1, 3, H, W] float32, ImageNet-normalised.""" |
| img = _open_image(source) |
| w, h = img.size |
| scale = min(1.0, max_size / max(w, h)) |
| new_w = _snap(round(w * scale), PATCH_SIZE) |
| new_h = _snap(round(h * scale), PATCH_SIZE) |
| return v2.Compose([ |
| v2.Resize((new_h, new_w), interpolation=v2.InterpolationMode.LANCZOS), |
| v2.ToImage(), |
| v2.ToDtype(torch.float32, scale=True), |
| v2.Normalize(mean=_IMAGENET_MEAN, std=_IMAGENET_STD), |
| ])(img).unsqueeze(0) |
|
|
|
|
| |
| |
| |
|
|
| class Tagger: |
| """Inference wrapper for DINOv3Tagger (ViT-H/16+). |
| |
| Parameters |
| ---------- |
| checkpoint_path : str |
| Path to a .safetensors or .pth checkpoint saved by TaggerTrainer. |
| vocab_path : str |
| Path to tagger_vocab.json ({"idx2tag": [...]}). |
| device : str |
| "cuda", "cuda:0", "cpu", etc. |
| dtype : torch.dtype |
| bfloat16 recommended on Ampere+; float16 for older GPUs; float32 for CPU. |
| max_size : int |
| Long-edge cap in pixels before feeding to the model. |
| """ |
|
|
| def __init__( |
| self, |
| checkpoint_path: str, |
| vocab_path: str, |
| device: str = "cuda", |
| dtype: torch.dtype = torch.bfloat16, |
| max_size: int = 1024, |
| ): |
| self.device = torch.device(device if torch.cuda.is_available() or device == "cpu" else "cpu") |
| self.dtype = dtype |
| self.max_size = max_size |
|
|
| with open(vocab_path) as f: |
| data = json.load(f) |
| self.idx2tag: list[str] = data["idx2tag"] |
| self.num_tags = len(self.idx2tag) |
| print(f"[Tagger] Vocabulary: {self.num_tags:,} tags") |
|
|
| self.model = DINOv3Tagger(num_tags=self.num_tags) |
|
|
| print(f"[Tagger] Loading checkpoint: {checkpoint_path}") |
| if checkpoint_path.endswith((".safetensors", ".sft")): |
| sd = load_file(checkpoint_path, device=str(self.device)) |
| else: |
| sd = torch.load(checkpoint_path, map_location=str(self.device)) |
|
|
| missing, unexpected = self.model.load_state_dict(sd, strict=False, assign=True) |
| if missing: |
| print(f"[Tagger] Missing keys ({len(missing)}): {missing[:5]}{'...' if len(missing) > 5 else ''}") |
| if unexpected: |
| print(f"[Tagger] Unexpected keys ({len(unexpected)}): {unexpected[:5]}{'...' if len(unexpected) > 5 else ''}") |
|
|
| self.model.backbone = self.model.backbone.to(dtype=dtype) |
| self.model = self.model.to(self.device) |
| self.model.eval() |
| print(f"[Tagger] Ready on {self.device} ({dtype})") |
|
|
| @torch.no_grad() |
| def predict(self, image, topk: int | None = 30, |
| threshold: float | None = None) -> list[tuple[str, float]]: |
| """Tag a single image (local path or URL). |
| Specify either topk OR threshold. Returns [(tag, score), ...] desc.""" |
| if topk is None and threshold is None: |
| topk = 30 |
|
|
| pv = preprocess_image(image, max_size=self.max_size).to(self.device) |
| with torch.autocast(device_type=self.device.type, dtype=self.dtype): |
| logits = self.model(pv)[0] |
| scores = torch.sigmoid(logits.float()) |
|
|
| if topk is not None: |
| values, indices = scores.topk(min(topk, self.num_tags)) |
| else: |
| assert threshold is not None |
| indices = (scores >= threshold).nonzero(as_tuple=True)[0] |
| values = scores[indices] |
| order = values.argsort(descending=True) |
| indices, values = indices[order], values[order] |
|
|
| return [(self.idx2tag[i], float(v)) for i, v in zip(indices.tolist(), values.tolist())] |
|
|
| @torch.no_grad() |
| def predict_batch(self, images, topk: int | None = 30, |
| threshold: float | None = None) -> list[list[tuple[str, float]]]: |
| """Tag multiple images (processed individually for mixed resolutions).""" |
| return [self.predict(img, topk=topk, threshold=threshold) for img in images] |
|
|
|
|
| |
| |
| |
|
|
| def _fmt_pretty(path: str, results) -> str: |
| lines = [f"\n{'β' * 60}", f" {path}", f"{'β' * 60}"] |
| for rank, (tag, score) in enumerate(results, 1): |
| bar = "β" * int(score * 20) |
| lines.append(f" {rank:>3}. {score:.3f} {bar:<20} {tag}") |
| return "\n".join(lines) |
|
|
| def _fmt_tags(results) -> str: |
| return ", ".join(tag for tag, _ in results) |
|
|
| def _fmt_json(path: str, results) -> dict: |
| return {"file": path, "tags": [{"tag": t, "score": round(s, 4)} for t, s in results]} |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser( |
| description="DINOv3 ViT-H/16+ tagger inference (standalone, no transformers dep)", |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| ) |
| parser.add_argument("--checkpoint", required=True, help="Path to .safetensors or .pth checkpoint") |
| parser.add_argument("--vocab", required=True, help="Path to tagger_vocab.json") |
| parser.add_argument("--images", nargs="+", required=True, help="Image paths and/or http(s) URLs") |
| parser.add_argument("--device", default="cuda", help="Device: cuda, cuda:0, cpu, β¦ (default: cuda)") |
| parser.add_argument("--max-size", type=int, default=1024, |
| help="Long-edge cap in pixels, multiple of 16 (default: 1024)") |
|
|
| mode = parser.add_mutually_exclusive_group() |
| mode.add_argument("--topk", type=int, default=30, help="Return top-k tags (default: 30)") |
| mode.add_argument("--threshold", type=float, help="Return all tags with score >= threshold") |
|
|
| parser.add_argument("--format", choices=["pretty", "tags", "json"], |
| default="pretty", help="Output format (default: pretty)") |
| args = parser.parse_args() |
|
|
| tagger = Tagger(checkpoint_path=args.checkpoint, vocab_path=args.vocab, |
| device=args.device, max_size=args.max_size) |
|
|
| topk, threshold = (None, args.threshold) if args.threshold else (args.topk, None) |
| json_out = [] |
|
|
| for src in args.images: |
| is_url = str(src).startswith("http://") or str(src).startswith("https://") |
| if not is_url and not Path(src).exists(): |
| print(f"[warning] File not found: {src}", file=sys.stderr) |
| continue |
| results = tagger.predict(src, topk=topk, threshold=threshold) |
| if args.format == "pretty": print(_fmt_pretty(src, results)) |
| elif args.format == "tags": print(_fmt_tags(results)) |
| elif args.format == "json": json_out.append(_fmt_json(src, results)) |
|
|
| if args.format == "json": |
| print(json.dumps(json_out, indent=2, ensure_ascii=False)) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|