File size: 2,459 Bytes
5ee43e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import argparse
import logging
import time

import torch
from transformers import AutoTokenizer, DebertaForSequenceClassification
import torch_neuronx  # ensures Neuron backend is available

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def main():
    parser = argparse.ArgumentParser(
        description="DeBERTa sequence-classification with torch.compile on Neuron"
    )
    parser.add_argument(
        "--model",
        type=str,
        default="microsoft/deberta-base",
        help="DeBERTa model name on Hugging Face Hub",
    )
    parser.add_argument("--batch-size", type=int, default=1, help="Batch size")
    args = parser.parse_args()

    torch.set_default_dtype(torch.float32)
    torch.manual_seed(42)

    # Load tokenizer and model
    tokenizer = AutoTokenizer.from_pretrained(args.model)
    model = DebertaForSequenceClassification.from_pretrained(
        args.model, torch_dtype=torch.float32, attn_implementation="eager"
    )
    model.eval()

    # Tokenize sample text
    text = "DeBERTa improves BERT and RoBERTa using disentangled attention."
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)

    # Pre-run once to fix shapes before compilation
    with torch.no_grad():
        logits = model(**inputs).logits

    # Compile forward pass (allow graph breaks to avoid instruction-limit)
    model.forward = torch.compile(model.forward, backend="neuron", fullgraph=False)

    # Warmup
    warmup_start = time.time()
    with torch.no_grad():
        _ = model(**inputs)
    warmup_time = time.time() - warmup_start

    # Actual run
    run_start = time.time()
    with torch.no_grad():
        logits = model(**inputs).logits
    run_time = time.time() - run_start

    # Decode result
    predicted_class_id = logits.argmax().item()
    predicted_label = model.config.id2label[predicted_class_id]

    logger.info("Warmup: %.2f s, Run: %.4f s", warmup_time, run_time)
    logger.info("Predicted label: %s", predicted_label)


if __name__ == "__main__":
    main()

"""
torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors: call_function <built-in function linear>(*(FakeTensor(..., device='neuron:0', size=(1, 18, 768)), Parameter(FakeTensor(..., size=(2304, 768), requires_grad=True)), None), **{}): got RuntimeError('Unhandled FakeTensor Device Propagation for aten.mm.default, found two different devices neuron:0, cpu')
"""