#!/usr/bin/env python3 # RoBERTa text-classification on Neuron – full graph compile import argparse import logging import time import torch from transformers import AutoTokenizer, RobertaForSequenceClassification import torch_neuronx # guarantees Neuron backend logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="RoBERTa on Neuron (full graph)") parser.add_argument( "--model", type=str, default="roberta-base", help="RoBERTa model name on Hugging Face Hub", ) parser.add_argument("--batch-size", type=int, default=1, help="Batch size") args = parser.parse_args() torch.set_default_dtype(torch.float32) torch.manual_seed(42) # load tokenizer & model tokenizer = AutoTokenizer.from_pretrained(args.model) model = RobertaForSequenceClassification.from_pretrained( args.model, torch_dtype=torch.float32, attn_implementation="eager" ).eval() # tokenize sample text = "RoBERTa is a robustly optimized BERT pretraining approach." inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) # pre-run to lock shapes with torch.no_grad(): _ = model(**inputs).logits # compile full graph model.forward = torch.compile(model.forward, backend="neuron", fullgraph=True) # warmup warmup_start = time.time() with torch.no_grad(): _ = model(**inputs) warmup_time = time.time() - warmup_start # benchmark run run_start = time.time() with torch.no_grad(): logits = model(**inputs).logits run_time = time.time() - run_start # top-1 label predicted_class_id = logits.argmax().item() predicted_label = model.config.id2label[predicted_class_id] logger.info("Warmup: %.2f s, Run: %.4f s", warmup_time, run_time) logger.info("Predicted label: %s", predicted_label) if __name__ == "__main__": main()