#!/usr/bin/env python3 # Vision Transformer (ViT) image-classification on Neuron – full graph, constant shapes import argparse import logging import time import torch from transformers import AutoImageProcessor, ViTForImageClassification from datasets import load_dataset import torch_neuronx # guarantees Neuron backend logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="ViT on Neuron (full graph)") parser.add_argument("--model", default="google/vit-base-patch16-224") args = parser.parse_args() torch.manual_seed(42) torch.set_default_dtype(torch.float32) # load dataset image dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] # load processor & model processor = AutoImageProcessor.from_pretrained(args.model) model = ViTForImageClassification.from_pretrained( args.model, torch_dtype=torch.float32, attn_implementation="eager" ).eval() # preprocess inputs = processor(images=image, return_tensors="pt") # pre-run to lock shapes with torch.no_grad(): _ = model(**inputs).logits # compile full graph model.forward = torch.compile(model.forward, backend="neuron", fullgraph=True) # warmup warmup_start = time.time() with torch.no_grad(): _ = model(**inputs) logger.info("Warmup: %.3f s", time.time() - warmup_start) # benchmark run run_start = time.time() with torch.no_grad(): logits = model(**inputs).logits run_time = time.time() - run_start # top-1 ImageNet class predicted_class_idx = logits.argmax(-1).item() predicted_label = model.config.id2label[predicted_class_idx] logger.info("Run: %.3f s", run_time) logger.info("Predicted label: %s", predicted_label) if __name__ == "__main__": main()