operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.embedding_dense_backward.default | HuggingFace/MegatronBertForCausalLM | ((T([2, 128, 1024], f16), T([2, 128], i64), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/XGLMForCausalLM | ((T([2, 128, 1024], f16), T([2, 128], i64), 256008, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MegatronBertForCausalLM | ((T([2, 128, 1024], f16), T([2, 128], i64), 29056, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/AlbertForMaskedLM | ((T([2, 512, 128], f16), T([2, 512], i64), 30000, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/AlbertForQuestionAnswering | ((T([2, 512, 128], f16), T([2, 512], i64), 30000, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/AlbertForMaskedLM | ((T([2, 512, 128], f16), T([2, 512], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/AlbertForQuestionAnswering | ((T([2, 512, 128], f16), T([2, 512], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 31, 512], f16), T([256, 31], i64, stride=(1, 256)), 9521, 1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 33, 512], f16), T([256, 33], i64, stride=(1, 256)), 9521, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MobileBertForQuestionAnswering | ((T([32, 128, 128], f16), T([32, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MobileBertForQuestionAnswering | ((T([32, 128, 512], f16), T([32, 128], i64), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/DistilBertForQuestionAnswering | ((T([32, 128, 768], f16), T([32, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BartForCausalLM | ((T([4, 1024, 1024], f16), T([4, 1024], i64), 1026, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BartForCausalLM | ((T([4, 1024, 1024], f16), T([4, 1024], i64), 50265, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/GPT2ForSequenceClassification | ((T([4, 1024, 768], f16), T([4, 1024], i64), 50257, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/PegasusForConditionalGeneration | ((T([4, 128, 1024], f16), T([4, 128], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/OPTForCausalLM | ((T([4, 128, 768], f16), T([4, 128], i64), 2050, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForCausalLM | ((T([4, 128, 768], f16), T([4, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/OPTForCausalLM | ((T([4, 128, 768], f16), T([4, 128], i64), 50272, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForCausalLM | ((T([4, 128, 768], f16), T([4, 128], i64), 512, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForCausalLM | ((T([4, 128, 768], f16), T([4, 128], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Bart | ((T([4, 512, 768], f16), T([4, 512], i64), 1026, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Bert | ((T([4, 512, 768], f16), T([4, 512], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_GPT2 | ((T([4, 512, 768], f16), T([4, 512], i64), 50257, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/DebertaForMaskedLM | ((T([4, 512, 768], f16), T([4, 512], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/DebertaForQuestionAnswering | ((T([4, 512, 768], f16), T([4, 512], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Bart | ((T([4, 512, 768], f16), T([4, 512], i64), 50265, 1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Bert | ((T([4, 512, 768], f16), T([4, 512], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/XLNetLMHeadModel | ((T([512, 4, 1024], f16), T([512, 4], i64), 32000, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/fastNLP_Bert | ((T([6, 476, 768], f16), T([6, 476], i64), 2, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/fastNLP_Bert | ((T([6, 476, 768], f16), T([6, 476], i64), 21128, 0, False), {}) |
aten.embedding_dense_backward.default | TorchBench/fastNLP_Bert | ((T([6, 476, 768], f16), T([6, 476], i64, stride=(0, 1)), 512, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/Speech2Text2ForCausalLM | ((T([64, 128, 256], f16), T([64, 128], i64), 10000, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BlenderbotSmallForCausalLM | ((T([64, 128, 512], f16), T([64, 128], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BlenderbotSmallForConditionalGeneration | ((T([64, 128, 512], f16), T([64, 128], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BertForMaskedLM | ((T([64, 128, 768], f16), T([64, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BertForQuestionAnswering | ((T([64, 128, 768], f16), T([64, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForQuestionAnswering | ((T([64, 128, 768], f16), T([64, 128], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForQuestionAnswering | ((T([64, 128, 768], f16), T([64, 128], i64), 512, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BertForMaskedLM | ((T([64, 128, 768], f16), T([64, 128], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/BertForQuestionAnswering | ((T([64, 128, 768], f16), T([64, 128], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/RobertaForQuestionAnswering | ((T([64, 128, 768], f16), T([64, 128], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/ElectraForQuestionAnswering | ((T([64, 512, 128], f16), T([64, 512], i64), 30522, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/ElectraForQuestionAnswering | ((T([64, 512, 128], f16), T([64, 512], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MBartForConditionalGeneration | ((T([8, 128, 1024], f16), T([8, 128], i64), 1026, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MegatronBertForQuestionAnswering | ((T([8, 128, 1024], f16), T([8, 128], i64), 2, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MegatronBertForQuestionAnswering | ((T([8, 128, 1024], f16), T([8, 128], i64), 29056, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/PegasusForCausalLM | ((T([8, 128, 1024], f16), T([8, 128], i64), 50265, 0, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/MBartForConditionalGeneration | ((T([8, 128, 1024], f16), T([8, 128], i64), 50265, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/TrOCRForCausalLM | ((T([8, 128, 1024], f16), T([8, 128], i64), 50265, 1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/TrOCRForCausalLM | ((T([8, 128, 1024], f16), T([8, 128], i64), 514, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/PLBartForConditionalGeneration | ((T([8, 128, 768], f16), T([8, 128], i64), 1026, -1, False), {}) |
aten.embedding_dense_backward.default | HuggingFace/PLBartForConditionalGeneration | ((T([8, 128, 768], f16), T([8, 128], i64), 50005, 1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Albert | ((T([8, 512, 128], f16), T([8, 512], i64), 30000, 0, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_Albert | ((T([8, 512, 128], f16), T([8, 512], i64, stride=(0, 1)), 2, -1, False), {}) |
aten.embedding_dense_backward.default | TorchBench/hf_DistilBert | ((T([8, 512, 768], f16), T([8, 512], i64), 30522, 0, False), {}) |
aten.eq.Scalar | TorchBench/vision_maskrcnn | ((T([0], i64), 0), {}) |
aten.eq.Scalar | TorchBench/vision_maskrcnn | ((T([0], i64), 1), {}) |
aten.eq.Scalar | TorchBench/vision_maskrcnn | ((T([0], i64), 2), {}) |
aten.eq.Scalar | TorchBench/vision_maskrcnn | ((T([0], i64), 3), {}) |
aten.eq.Scalar | HuggingFace/AllenaiLongformerBase | ((T([1, 256, 1, 257], f16), 1), {}) |
aten.eq.Scalar | HuggingFace/AllenaiLongformerBase | ((T([1, 256, 12, 257], f16, stride=(65792, 257, 0, 1)), 1), {}) |
aten.eq.Scalar | TorchBench/speech_transformer | ((T([10, 22], i64), 2), {}) |
aten.eq.Scalar | TorchBench/BERT_pytorch | ((T([16, 1, 128, 128], b8), 0), {}) |
aten.eq.Scalar | TorchBench/BERT_pytorch | ((T([16, 128, 1], f16), 0), {}) |
aten.eq.Scalar | HuggingFace/DistilBertForMaskedLM | ((T([16, 128], f32), 0), {}) |
aten.eq.Scalar | HuggingFace/BartForConditionalGeneration | ((T([2, 1024], i64), -100), {}) |
aten.eq.Scalar | TorchBench/hf_Longformer | ((T([2, 256, 1, 257], f16, stride=(0, 257, 257, 1)), 1), {}) |
aten.eq.Scalar | TorchBench/hf_Longformer | ((T([2, 256, 12, 257], f16, stride=(0, 257, 0, 1)), 1), {}) |
aten.eq.Scalar | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 1, 1, 33], b8, stride=(1, 8448, 8448, 256)), 0), {}) |
aten.eq.Scalar | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 1, 31, 31], b8, stride=(1, 7936, 256, 7936)), 0), {}) |
aten.eq.Scalar | HuggingFace/DistilBertForQuestionAnswering | ((T([32, 128], f32), 0), {}) |
aten.eq.Scalar | TorchBench/fastNLP_Bert | ((T([6, 474, 1], b8), False), {}) |
aten.eq.Scalar | TorchBench/fastNLP_Bert | ((T([6, 474], b8), False), {}) |
aten.eq.Scalar | TorchBench/fastNLP_Bert | ((T([6, 476], i64), 511), {}) |
aten.eq.Scalar | TorchBench/tts_angular | ((T([64, 1], f16), 0), {}) |
aten.eq.Scalar | HuggingFace/MBartForConditionalGeneration | ((T([8, 128], i64), -100), {}) |
aten.eq.Scalar | HuggingFace/PLBartForConditionalGeneration | ((T([8, 128], i64), -100), {}) |
aten.eq.Scalar | TorchBench/hf_DistilBert | ((T([8, 512], f32), 0), {}) |
aten.eq.Tensor | HuggingFace/OPTForCausalLM | ((T([4, 12, 128, 128], f16), T([], f32)), {}) |
aten.eq.Tensor | TorchBench/timm_efficientdet | ((T([5000, 4], f32), T([4], f16)), {}) |
aten.erf.default | TorchBench/fastNLP_Bert | ((T([6, 476, 3072], f16),), {}) |
aten.exp.default | TorchBench/vision_maskrcnn | ((T([0, 91], f16),), {}) |
aten.exp.default | TorchBench/vision_maskrcnn | ((T([1438452, 1], f16),), {}) |
aten.exp.default | TorchBench/timm_efficientdet | ((T([5000], f32, stride=(4,)),), {}) |
aten.exp.default | TorchBench/fastNLP_Bert | ((T([6, 476, 3072], f16),), {}) |
aten.exp.default | TorchBench/yolov3 | ((T([8, 3, 12, 16, 2], f16, stride=(48960, 16320, 1360, 85, 1)),), {}) |
aten.exp.default | TorchBench/yolov3 | ((T([8, 3, 24, 32, 2], f16, stride=(195840, 65280, 2720, 85, 1)),), {}) |
aten.exp.default | TorchBench/yolov3 | ((T([8, 3, 48, 64, 2], f16, stride=(783360, 261120, 5440, 85, 1)),), {}) |
aten.fill_.Scalar | TorchBench/speech_transformer | ((T([10, 22], i64), -1), {}) |
aten.fill_.Scalar | TorchBench/speech_transformer | ((T([10, 22], i64), 2), {}) |
aten.fill_.Scalar | TorchBench/fastNLP_Bert | ((T([476], i64), 1), {}) |
aten.fill_.Scalar | TorchBench/fastNLP_Bert | ((T([6], i64, stride=(476,)), 2057), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 16), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 32), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 4), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 62), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 64), {}) |
aten.fill_.Scalar | TorchBench/vision_maskrcnn | ((T([], i64), 8), {}) |
aten.fill_.Tensor | TorchBench/speech_transformer | ((T([0], f16), T([], f16)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.