Ubuntu
tests
5ee43e9
#!/usr/bin/env python3
# FlauBERT text-classification on Neuron
import argparse
import logging
import time
import torch
from transformers import FlaubertTokenizer, FlaubertForSequenceClassification
import torch_neuronx # ensures Neuron backend
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(description="Run FlauBERT on Neuron")
parser.add_argument(
"--model",
type=str,
default="flaubert/flaubert_base_cased",
help="FlauBERT model name on Hugging Face Hub",
)
parser.add_argument("--batch-size", type=int, default=1, help="Batch size")
args = parser.parse_args()
torch.set_default_dtype(torch.float32)
torch.manual_seed(42)
# load tokenizer & model
tokenizer = FlaubertTokenizer.from_pretrained(args.model)
model = FlaubertForSequenceClassification.from_pretrained(
args.model, torch_dtype=torch.float32, attn_implementation="eager"
).eval()
# tokenize sample
text = "FlauBERT est un modèle de langue français performant."
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
# pre-run to lock shapes
with torch.no_grad():
_ = model(**inputs).logits
# compile
model.forward = torch.compile(model.forward, backend="neuron", fullgraph=True)
# warmup
warmup_start = time.time()
with torch.no_grad():
_ = model(**inputs)
warmup_time = time.time() - warmup_start
# benchmark run
run_start = time.time()
with torch.no_grad():
logits = model(**inputs).logits
run_time = time.time() - run_start
# top-1 label
predicted_class_id = logits.argmax().item()
predicted_label = model.config.id2label[predicted_class_id]
logger.info("Warmup: %.2f s, Run: %.4f s", warmup_time, run_time)
logger.info("Predicted label: %s", predicted_label)
if __name__ == "__main__":
main()
"""
Traceback (most recent call last):
File "/workspace/torch_neuron_sample/torch-neuron-samples/scripts/torch_compile/run_flaubert.py", line 67, in <module>
main()
File "/workspace/torch_neuron_sample/torch-neuron-samples/scripts/torch_compile/run_flaubert.py", line 49, in main
_ = model(**inputs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1775, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1786, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 841, in compile_wrapper
raise e.with_traceback(None) from e.__cause__ # User compiler error
torch._dynamo.exc.Unsupported: Unsupported Tensor.item() call with capture_scalar_outputs=False
Explanation: Dynamo does not support tracing `Tensor.item()` with config.capture_scalar_outputs=False.
Hint: Set `torch._dynamo.config.capture_scalar_outputs = True` or `export TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS=1` to include these operations in the captured graph.
Developer debug context: call_method TensorVariable() item () {}
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0124.html
from user code:
File "/usr/local/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py", line 1156, in forward
transformer_outputs = self.transformer(
File "/usr/local/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py", line 873, in forward
assert lengths.max().item() <= slen
Set TORCHDYNAMO_VERBOSE=1 for the internal stack trace (please do this especially if you're reporting a bug to PyTorch). For even more developer context, set TORCH_LOGS="+dynamo"
"""