|
|
import argparse |
|
|
import logging |
|
|
import time |
|
|
|
|
|
import torch |
|
|
from transformers import AutoTokenizer, DebertaForSequenceClassification |
|
|
import torch_neuronx |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser( |
|
|
description="DeBERTa sequence-classification with torch.compile on Neuron" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--model", |
|
|
type=str, |
|
|
default="microsoft/deberta-base", |
|
|
help="DeBERTa model name on Hugging Face Hub", |
|
|
) |
|
|
parser.add_argument("--batch-size", type=int, default=1, help="Batch size") |
|
|
args = parser.parse_args() |
|
|
|
|
|
torch.set_default_dtype(torch.float32) |
|
|
torch.manual_seed(42) |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(args.model) |
|
|
model = DebertaForSequenceClassification.from_pretrained( |
|
|
args.model, torch_dtype=torch.float32, attn_implementation="eager" |
|
|
) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
text = "DeBERTa improves BERT and RoBERTa using disentangled attention." |
|
|
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
logits = model(**inputs).logits |
|
|
|
|
|
|
|
|
model.forward = torch.compile(model.forward, backend="neuron", fullgraph=False) |
|
|
|
|
|
|
|
|
warmup_start = time.time() |
|
|
with torch.no_grad(): |
|
|
_ = model(**inputs) |
|
|
warmup_time = time.time() - warmup_start |
|
|
|
|
|
|
|
|
run_start = time.time() |
|
|
with torch.no_grad(): |
|
|
logits = model(**inputs).logits |
|
|
run_time = time.time() - run_start |
|
|
|
|
|
|
|
|
predicted_class_id = logits.argmax().item() |
|
|
predicted_label = model.config.id2label[predicted_class_id] |
|
|
|
|
|
logger.info("Warmup: %.2f s, Run: %.4f s", warmup_time, run_time) |
|
|
logger.info("Predicted label: %s", predicted_label) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
""" |
|
|
torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function linear>(*(FakeTensor(..., device='neuron:0', size=(1, 18, 768)), Parameter(FakeTensor(..., size=(2304, 768), requires_grad=True)), None), **{}): got RuntimeError('Unhandled FakeTensor Device Propagation for aten.mm.default, found two different devices neuron:0, cpu') |
|
|
""" |