operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.native_layer_norm_backward.default | TIMM/jx_nest_base | ((T([64, 16, 196, 128], f16), T([64, 16, 196, 128], f16), [128], T([64, 16, 196, 1], f32), T([64, 16, 196, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 196, 1024], f16), T([64, 196, 1024], f16), [1024], T([64, 196, 1], f32), T([64, 196, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/gmlp_s16_224 | ((T([64, 196, 256], f16), T([64, 196, 256], f16, stride=(50176, 1, 196)), [256], T([64, 196, 1], f32), T([64, 196, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/tnt_s_patch16_224 | ((T([64, 196, 384], f16), T([64, 196, 384], f16), [384], T([64, 196, 1], f32), T([64, 196, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/gmixer_24_224 | ((T([64, 196, 384], f16), T([64, 196, 384], f16, stride=(75264, 1, 196)), [384], T([64, 196, 1], f32), T([64, 196, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/gmixer_24_224 | ((T([64, 196, 384], f16, stride=(75264, 1, 196)), T([64, 196, 384], f16, stride=(75264, 1, 196)), [384], T([64, 196, 1], f32), T([64, 196, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/tnt_s_patch16_224 | ((T([64, 196, 384], f16, stride=(75648, 384, 1)), T([64, 196, 384], f16), [384], T([64, 196, 1], f32), T([64, 196, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 196, 512], f16), T([64, 196, 512], f16), [512], T([64, 196, 1], f32), T([64, 196, 1], f32), T([512], f16), T([512], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/mixer_b16_224 | ((T([64, 196, 768], f16), T([64, 196, 768], f16, stride=(150528, 1, 196)), [768], T([64, 196, 1], f32), T([64, 196, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/mixer_b16_224 | ((T([64, 196, 768], f16, stride=(150528, 1, 196)), T([64, 196, 768], f16, stride=(150528, 1, 196)), [768], T([64, 196, 1], f32), T([64, 196, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/gmlp_s16_224 | ((T([64, 196, 768], f16, stride=(150528, 1, 196)), T([64, 196, 768], f16, stride=(301056, 1536, 1)), [768], T([64, 196, 1], f32), T([64, 196, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/crossvit_9_240 | ((T([64, 197, 256], f16), T([64, 197, 256], f16), [256], T([64, 197, 1], f32), T([64, 197, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/tnt_s_patch16_224 | ((T([64, 197, 384], f16), T([64, 197, 384], f16), [384], T([64, 197, 1], f32), T([64, 197, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/volo_d1_224 | ((T([64, 197, 384], f16), T([64, 197, 384], f16), [384], T([64, 197, 1], f32), T([64, 197, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/beit_base_patch16_224 | ((T([64, 197, 768], f16), T([64, 197, 768], f16), [768], T([64, 197, 1], f32), T([64, 197, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/vit_base_patch16_224 | ((T([64, 197, 768], f16), T([64, 197, 768], f16), [768], T([64, 197, 1], f32), T([64, 197, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/deit_base_distilled_patch16_224 | ((T([64, 198, 768], f16), T([64, 198, 768], f16), [768], T([64, 198, 1], f32), T([64, 198, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/pit_b_224 | ((T([64, 257, 512], f16), T([64, 257, 512], f16), [512], T([64, 257, 1], f32), T([64, 257, 1], f32), T([512], f16), T([512], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/volo_d1_224 | ((T([64, 28, 28, 192], f16), T([64, 28, 28, 192], f16, stride=(150528, 28, 1, 784)), [192], T([64, 28, 28, 1], f32), T([64, 28, 28, 1], f32), T([192], f16), T([192], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/jx_nest_base | ((T([64, 28, 28, 512], f16), T([64, 28, 28, 512], f16), [512], T([64, 28, 28, 1], f32), T([64, 28, 28, 1], f32), T([512], f16), T([512], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 3136, 128], f16), T([64, 3136, 128], f16), [128], T([64, 3136, 1], f32), T([64, 3136, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 3136, 128], f16), T([64, 3136, 128], f16, stride=(401408, 1, 3136)), [128], T([64, 3136, 1], f32), T([64, 3136, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/jx_nest_base | ((T([64, 4, 196, 256], f16), T([64, 4, 196, 256], f16), [256], T([64, 4, 196, 1], f32), T([64, 4, 196, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/crossvit_9_240 | ((T([64, 401, 128], f16), T([64, 401, 128], f16), [128], T([64, 401, 1], f32), T([64, 401, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 49, 1024], f16), T([64, 49, 1024], f16), [1024], T([64, 49, 1], f32), T([64, 49, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 49, 2048], f16), T([64, 49, 2048], f16), [2048], T([64, 49, 1], f32), T([64, 49, 1], f32), T([2048], f16), T([2048], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/ElectraForQuestionAnswering | ((T([64, 512, 128], f16), T([64, 512, 128], f16), [128], T([64, 512, 1], f32), T([64, 512, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/ElectraForQuestionAnswering | ((T([64, 512, 256], f16), T([64, 512, 256], f16), [256], T([64, 512, 1], f32), T([64, 512, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/jx_nest_base | ((T([64, 56, 56, 256], f16), T([64, 56, 56, 256], f16), [256], T([64, 56, 56, 1], f32), T([64, 56, 56, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/pit_b_224 | ((T([64, 65, 1024], f16), T([64, 65, 1024], f16), [1024], T([64, 65, 1], f32), T([64, 65, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/beit_base_patch16_224 | ((T([64, 768], f16), T([64, 768], f16), [768], T([64, 1], f32), T([64, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 784, 256], f16), T([64, 784, 256], f16), [256], T([64, 784, 1], f32), T([64, 784, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 784, 512], f16), T([64, 784, 512], f16), [512], T([64, 784, 1], f32), T([64, 784, 1], f32), T([512], f16), T([512], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TIMM/pit_b_224 | ((T([64, 962, 256], f16), T([64, 962, 256], f16), [256], T([64, 962, 1], f32), T([64, 962, 1], f32), T([256], f16), T([256], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/MBartForConditionalGeneration | ((T([8, 128, 1024], f16), T([8, 128, 1024], f16), [1024], T([8, 128, 1], f32), T([8, 128, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/MegatronBertForQuestionAnswering | ((T([8, 128, 1024], f16), T([8, 128, 1024], f16), [1024], T([8, 128, 1], f32), T([8, 128, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/PegasusForCausalLM | ((T([8, 128, 1024], f16), T([8, 128, 1024], f16), [1024], T([8, 128, 1], f32), T([8, 128, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/TrOCRForCausalLM | ((T([8, 128, 1024], f16), T([8, 128, 1024], f16), [1024], T([8, 128, 1], f32), T([8, 128, 1], f32), T([1024], f16), T([1024], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | HuggingFace/PLBartForConditionalGeneration | ((T([8, 128, 768], f16), T([8, 128, 768], f16), [768], T([8, 128, 1], f32), T([8, 128, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TorchBench/timm_vision_transformer | ((T([8, 197, 384], f16), T([8, 197, 384], f16), [384], T([8, 197, 1], f32), T([8, 197, 1], f32), T([384], f16), T([384], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TorchBench/hf_Albert | ((T([8, 512, 128], f16), T([8, 512, 128], f16), [128], T([8, 512, 1], f32), T([8, 512, 1], f32), T([128], f16), T([128], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TorchBench/hf_Albert | ((T([8, 512, 768], f16), T([8, 512, 768], f16), [768], T([8, 512, 1], f32), T([8, 512, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.native_layer_norm_backward.default | TorchBench/hf_DistilBert | ((T([8, 512, 768], f16), T([8, 512, 768], f16), [768], T([8, 512, 1], f32), T([8, 512, 1], f32), T([768], f16), T([768], f16), [True, True, True]), {}) |
aten.ne.Scalar | HuggingFace/AllenaiLongformerBase | ((T([1, 1024], f16), 0), {}) |
aten.ne.Scalar | HuggingFace/AllenaiLongformerBase | ((T([1, 1024], i64), 1), {}) |
aten.ne.Scalar | HuggingFace/GPTNeoForSequenceClassification | ((T([1, 128], i64), 0), {}) |
aten.ne.Scalar | HuggingFace/CamemBert | ((T([1, 512], i64), 1), {}) |
aten.ne.Scalar | TorchBench/speech_transformer | ((T([10, 22], i64), 2), {}) |
aten.ne.Scalar | TorchBench/hf_Longformer | ((T([2, 1024], f16), 0), {}) |
aten.ne.Scalar | TorchBench/hf_Longformer | ((T([2, 1024], i64), 1), {}) |
aten.ne.Scalar | HuggingFace/M2M100ForConditionalGeneration | ((T([2, 128], i64), 1), {}) |
aten.ne.Scalar | HuggingFace/XGLMForCausalLM | ((T([2, 128], i64), 1), {}) |
aten.ne.Scalar | TorchBench/speech_transformer | ((T([21], i64), -1), {}) |
aten.ne.Scalar | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 31], i64, stride=(1, 256)), 1), {}) |
aten.ne.Scalar | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 33], i64, stride=(1, 256)), 1), {}) |
aten.ne.Scalar | HuggingFace/GPT2ForSequenceClassification | ((T([4, 1024], i64), 0), {}) |
aten.ne.Scalar | HuggingFace/RobertaForCausalLM | ((T([4, 128], i64), 0), {}) |
aten.ne.Scalar | TorchBench/fastNLP_Bert | ((T([6, 474], i64), 0), {}) |
aten.ne.Scalar | HuggingFace/RobertaForQuestionAnswering | ((T([64, 128], i64), 0), {}) |
aten.ne.Scalar | HuggingFace/Speech2Text2ForCausalLM | ((T([64, 128], i64), 1), {}) |
aten.ne.Scalar | HuggingFace/MBartForConditionalGeneration | ((T([8, 128], i64), 1), {}) |
aten.ne.Scalar | HuggingFace/PLBartForConditionalGeneration | ((T([8, 128], i64), 1), {}) |
aten.neg.default | TorchBench/vision_maskrcnn | ((T([0, 91], f16),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([1, 88, 10, 10], f16),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([1, 88, 20, 20], f16),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([1, 88, 40, 40], f16),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([1, 88, 5, 5], f16),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([1, 88, 80, 80], f16),), {}) |
aten.neg.default | TorchBench/BERT_pytorch | ((T([16, 128, 768], f16),), {}) |
aten.neg.default | TIMM/convnext_base | ((T([32, 128, 56, 56], f16, stride=(401408, 1, 7168, 128)),), {}) |
aten.neg.default | TIMM/convnext_base | ((T([32, 256, 28, 28], f16, stride=(200704, 1, 7168, 256)),), {}) |
aten.neg.default | TIMM/convnext_base | ((T([32, 512, 14, 14], f16, stride=(100352, 1, 7168, 512)),), {}) |
aten.neg.default | HuggingFace/DebertaForMaskedLM | ((T([4, 512, 768], f32),), {}) |
aten.neg.default | HuggingFace/DebertaForQuestionAnswering | ((T([4, 512, 768], f32),), {}) |
aten.neg.default | TorchBench/timm_efficientdet | ((T([5000], f32, stride=(4,)),), {}) |
aten.neg.default | TorchBench/Super_SloMo | ((T([6, 1, 352, 352], f16),), {}) |
aten.neg.default | TorchBench/Super_SloMo | ((T([6, 2, 351, 352], f16),), {}) |
aten.neg.default | TorchBench/Super_SloMo | ((T([6, 2, 352, 351], f16),), {}) |
aten.neg.default | TorchBench/Super_SloMo | ((T([6, 3, 352, 352], f16),), {}) |
aten.neg.default | TorchBench/fastNLP_Bert | ((T([6, 476, 3072], f16),), {}) |
aten.neg.default | TIMM/poolformer_m36 | ((T([64, 192, 28, 28], f16),), {}) |
aten.neg.default | TorchBench/tts_angular | ((T([64, 256], f16, stride=(0, 0)),), {}) |
aten.neg.default | TIMM/poolformer_m36 | ((T([64, 384, 14, 14], f16),), {}) |
aten.neg.default | TIMM/poolformer_m36 | ((T([64, 768, 7, 7], f16),), {}) |
aten.neg.default | TIMM/poolformer_m36 | ((T([64, 96, 56, 56], f16),), {}) |
aten.neg.default | TorchBench/Super_SloMo | ((T([6], f16),), {}) |
aten.new_empty.default | TorchBench/vision_maskrcnn | ((T([0, 1, 30, 30], f16), [0, 1, 427, 640]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TorchBench/vision_maskrcnn | ((T([0, 1, 30, 30], f16), [0, 1, 459, 640]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TorchBench/vision_maskrcnn | ((T([0, 1, 30, 30], f16), [0, 1, 612, 612]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TorchBench/vision_maskrcnn | ((T([0, 1, 30, 30], f16), [0, 1, 640, 443]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | HuggingFace/AllenaiLongformerBase | ((T([1, 3, 512, 513], f16), [1, 4, 256, 513]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | HuggingFace/AllenaiLongformerBase | ((T([12, 3, 512, 513], f16), [12, 4, 256, 513]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TorchBench/hf_Longformer | ((T([2, 3, 512, 513], f16), [2, 4, 256, 513]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TorchBench/hf_Longformer | ((T([24, 3, 512, 513], f16), [24, 4, 256, 513]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/jx_nest_base | ((T([64, 1, 196, 512], f16), [64, 1, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/jx_nest_base | ((T([64, 16, 196, 128], f16), [64, 1, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 196, 512], f16), [64, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 3136, 128], f16), [64, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/jx_nest_base | ((T([64, 4, 196, 256], f16), [64, 1, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
aten.new_empty.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 49, 1024], f16), [64, 1, 1]), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda', 'pin_memory': False}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.