#!/usr/bin/env python3 # MPNet sentence-embedding on Neuron import argparse import logging import time import torch from transformers import AutoTokenizer, MPNetModel import torch_neuronx # ensures Neuron backend logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="Run MPNet encoder on Neuron") parser.add_argument( "--model", type=str, default="microsoft/mpnet-base", help="MPNet model name on Hugging Face Hub", ) args = parser.parse_args() torch.set_default_dtype(torch.float32) torch.manual_seed(42) # load tokenizer & model tokenizer = AutoTokenizer.from_pretrained(args.model) model = MPNetModel.from_pretrained( args.model, torch_dtype=torch.float32, attn_implementation="eager" ).eval() # tokenize sample sentence text = "MPNet is a variant of BERT with permutation language modeling." inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) # pre-run to lock shapes with torch.no_grad(): _ = model(**inputs).pooler_output # compile model.forward = torch.compile(model.forward, backend="neuron", fullgraph=True) # warmup warmup_start = time.time() with torch.no_grad(): _ = model(**inputs) warmup_time = time.time() - warmup_start # benchmark run run_start = time.time() with torch.no_grad(): embeddings = model(**inputs).pooler_output run_time = time.time() - run_start logger.info("Warmup: %.2f s, Run: %.4f s", warmup_time, run_time) logger.info("Output embedding shape: %s", embeddings.shape) # [1, hidden] if __name__ == "__main__": main()