| | import argparse |
| | import logging |
| | import time |
| |
|
| | import torch |
| | from transformers import AutoTokenizer, AlbertForSequenceClassification |
| |
|
| | import torch_neuronx |
| |
|
| | logging.basicConfig(level=logging.INFO) |
| | logger = logging.getLogger(__name__) |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser(description="Run ALBERT on Neuron") |
| | parser.add_argument( |
| | "--model", type=str, default="albert-base-v2", help="ALBERT model name" |
| | ) |
| | parser.add_argument("--batch-size", type=int, default=1, help="Batch size") |
| | args = parser.parse_args() |
| |
|
| | torch.set_default_dtype(torch.float32) |
| | torch.manual_seed(42) |
| |
|
| | |
| | model = AlbertForSequenceClassification.from_pretrained( |
| | args.model, torch_dtype=torch.float32, attn_implementation="eager" |
| | ) |
| | model.eval() |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(args.model) |
| | inputs = tokenizer( |
| | "Hamilton is considered to be the best musical of human history.", |
| | return_tensors="pt" |
| | ) |
| |
|
| | |
| | with torch.no_grad(): |
| | _ = model(**inputs).logits |
| |
|
| | |
| | model.forward = torch.compile(model.forward, backend="neuron", fullgraph=True) |
| |
|
| | |
| | warmup_start = time.time() |
| | with torch.no_grad(): |
| | _ = model(**inputs) |
| | warmup_time = time.time() - warmup_start |
| |
|
| | |
| | run_start = time.time() |
| | with torch.no_grad(): |
| | logits = model(**inputs).logits |
| | run_time = time.time() - run_start |
| | predicted_class_id = logits.argmax().item() |
| | predicted_class_label = model.config.id2label[predicted_class_id] |
| |
|
| | logger.info(f"Warmup: {warmup_time:.2f}s, Run: {run_time:.4f}s") |
| | logger.info(f"Output label: {predicted_class_label}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |