operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 144, 56, 56], f16), T([96, 144, 56, 56], f16), T([144, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 144, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 144, 56, 56], f16), T([96, 24, 56, 56], f16), T([144, 24, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 16, 112, 112], f16), T([96, 32, 112, 112], f16), T([16, 32, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 160, 7, 7], f16), T([96, 576, 7, 7], f16), T([160, 576, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 160, 7, 7], f16), T([96, 960, 7, 7], f16), T([160, 960, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 192, 14, 14], f16), T([96, 192, 28, 28], f16), T([192, 1, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 192, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 192, 28, 28], f16), T([96, 192, 28, 28], f16), T([192, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 192, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 192, 28, 28], f16), T([96, 32, 28, 28], f16), T([192, 32, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 24, 56, 56], f16), T([96, 144, 56, 56], f16), T([24, 144, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 24, 56, 56], f16), T([96, 96, 56, 56], f16), T([24, 96, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 256, 8, 8], f16), T([96, 128, 16, 16], f16), T([256, 128, 1, 1], f16), [0], [2, 2], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 256, 8, 8], f16), T([96, 128, 16, 16], f16), T([256, 128, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 256, 8, 8], f16), T([96, 256, 8, 8], f16), T([256, 256, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 32, 112, 112], f16), T([96, 3, 224, 224], f16), T([32, 3, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 1, [False, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 32, 112, 112], f16), T([96, 32, 112, 112], f16), T([32, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 32, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 32, 28, 28], f16), T([96, 144, 28, 28], f16), T([32, 144, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 32, 28, 28], f16), T([96, 192, 28, 28], f16), T([32, 192, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 320, 7, 7], f16), T([96, 960, 7, 7], f16), T([320, 960, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 384, 14, 14], f16), T([96, 384, 14, 14], f16), T([384, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 384, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 384, 14, 14], f16), T([96, 64, 14, 14], f16), T([384, 64, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 512, 4, 4], f16), T([96, 256, 8, 8], f16), T([512, 256, 1, 1], f16), [0], [2, 2], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 512, 4, 4], f16), T([96, 256, 8, 8], f16), T([512, 256, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 512, 4, 4], f16), T([96, 512, 4, 4], f16), T([512, 512, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 576, 14, 14], f16), T([96, 576, 14, 14], f16), T([576, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 576, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 576, 14, 14], f16), T([96, 96, 14, 14], f16), T([576, 96, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 576, 7, 7], f16), T([96, 576, 14, 14], f16), T([576, 1, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 576, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 64, 14, 14], f16), T([96, 192, 14, 14], f16), T([64, 192, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 64, 14, 14], f16), T([96, 384, 14, 14], f16), T([64, 384, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 64, 32, 32], f16), T([96, 64, 32, 32], f16), T([64, 64, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 64, 32, 32], f16), T([96, 64, 64, 64], f16), T([64, 64, 1, 1], f16), [0], [2, 2], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 64, 32, 32], f16), T([96, 64, 64, 64], f16), T([64, 64, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/LearningToPaint | ((T([96, 64, 64, 64], f16), T([96, 9, 128, 128], f16), T([64, 9, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 1, [False, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 96, 112, 112], f16), T([96, 16, 112, 112], f16), T([96, 16, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 96, 14, 14], f16), T([96, 384, 14, 14], f16), T([96, 384, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 96, 14, 14], f16), T([96, 576, 14, 14], f16), T([96, 576, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 96, 56, 56], f16), T([96, 96, 112, 112], f16), T([96, 1, 3, 3], f16), [0], [2, 2], [1, 1], [1, 1], False, [0, 0], 96, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 960, 7, 7], f16), T([96, 160, 7, 7], f16), T([960, 160, 1, 1], f16), [0], [1, 1], [0, 0], [1, 1], False, [0, 0], 1, [True, True, False]), {}) |
aten.convolution_backward.default | TorchBench/mobilenet_v2 | ((T([96, 960, 7, 7], f16), T([96, 960, 7, 7], f16), T([960, 1, 3, 3], f16), [0], [1, 1], [1, 1], [1, 1], False, [0, 0], 960, [True, True, False]), {}) |
aten.copy_.default | TorchBench/vision_maskrcnn | ((T([0], f16), T([0], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 1024, 12, 513], f16, stride=(6303744, 513, 525312, 1)), T([1, 1024, 12, 513], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([1, 1024, 128, 128], f16), T([1, 1024, 128, 128], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 1024], i64), T([1, 1024], i64)), {}) |
aten.copy_.default | HuggingFace/BigBird | ((T([1, 1024], i64), T([1, 1024], i64)), {}) |
aten.copy_.default | HuggingFace/BigBird | ((T([1, 12, 12, 64, 64], f16), T([1, 12, 12, 64, 64], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([1, 128, 128, 128], f16), T([1, 128, 128, 128], f16)), {}) |
aten.copy_.default | HuggingFace/GPTNeoForCausalLM | ((T([1, 128], i64), T([1, 128], i64)), {}) |
aten.copy_.default | HuggingFace/GPTNeoForSequenceClassification | ((T([1, 128], i64), T([1, 128], i64)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([1, 2048, 64, 64], f16), T([1, 2048, 64, 64], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 255, 255], f16, stride=(525312, 513, 1)), T([1, 255, 255], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 256, 12, 257], f16, stride=(6303744, 513, 525312, 1)), T([1, 256, 12, 257], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 256, 257], f16, stride=(525312, 513, 1)), T([1, 256, 257], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([1, 256, 64, 64], f16), T([1, 256, 64, 64], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([1, 3, 256, 256], f16), T([1, 3, 256, 256], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([1, 3, 256, 256], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1, 3, 256, 257], f16, stride=(525312, 131328, 513, 1)), T([1, 3, 256, 257], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | TorchBench/timm_efficientdet | ((T([1, 3, 640, 640], f16), T([1, 3, 640, 640], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_unet | ((T([1, 3, 640, 959], f16), T([1, 3, 640, 959], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([1, 4096, 32, 32], f16), T([1, 4096, 32, 32], f16)), {}) |
aten.copy_.default | HuggingFace/CamemBert | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/DistillGPT2 | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/ElectraForCausalLM | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/GoogleFnet | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | HuggingFace/YituTechConvBert | ((T([1, 512], i64), T([1, 512], i64)), {}) |
aten.copy_.default | TorchBench/fastNLP_Bert | ((T([1, 6, 474, 768], f16), T([1, 6, 474, 768], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([1, 64, 256, 256], f16), T([1, 64, 256, 256], f16)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([10, 204, 320], f16), T([10, 204, 320], f16)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([10, 21], i64), T([10, 21], i64)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1024, 1, 768], f16), T([1024, 1, 768], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1024, 12, 513], f16, stride=(513, 525312, 1)), T([1024, 12, 513], f16)), {}) |
aten.copy_.default | HuggingFace/XLNetLMHeadModel | ((T([1024, 16, 64], f16), T([1024, 16, 64], f16, stride=(1, 1024, 16384))), {}) |
aten.copy_.default | TorchBench/fambench_dlrm | ((T([1024, 2000], f16), T([1024, 2000], f16)), {}) |
aten.copy_.default | TIMM/convnext_base | ((T([1024, 512, 2, 2], f16), T([1024, 512, 2, 2], f16, stride=(2048, 1, 1024, 512))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([1024, 768], f16), T([1024, 768], f16)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([10], i64), T([10], i64)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([11], i64), T([11], i64)), {}) |
aten.copy_.default | HuggingFace/BigBird | ((T([12, 12, 64, 64], f16), T([12, 12, 64, 64], f16, stride=(64, 49152, 768, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 255, 255], f16, stride=(525312, 513, 1)), T([12, 255, 255], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 255, 255], f16, stride=(525312, 513, 1)), T([12, 255, 255], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 256, 257], f16, stride=(525312, 513, 1)), T([12, 256, 257], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 256, 257], f16, stride=(525312, 513, 1)), T([12, 256, 257], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([12, 3, 256, 256], f16)), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([12, 3, 256, 256], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 3, 256, 257], f16, stride=(525312, 131328, 513, 1)), T([12, 3, 256, 257], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | HuggingFace/AllenaiLongformerBase | ((T([12, 4, 256, 513], f16), T([12, 4, 256, 513], f16)), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 112, 14, 14], f16), T([128, 112, 14, 14], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([128, 128, 128], f16), T([128, 128, 128], f16)), {}) |
aten.copy_.default | TIMM/twins_pcpvt_base | ((T([128, 128, 4, 4], f16), T([128, 128, 4, 4], f16, stride=(2048, 1, 512, 128))), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 16, 112, 112], f16), T([128, 16, 112, 112], f16)), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 160, 7, 7], f16), T([128, 160, 7, 7], f16)), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 24, 56, 56], f16), T([128, 24, 56, 56], f16)), {}) |
aten.copy_.default | TIMM/dm_nfnet_f0 | ((T([128, 3, 192, 192], f16), T([128, 3, 192, 192], f16)), {}) |
aten.copy_.default | TIMM/tinynet_a | ((T([128, 3, 192, 192], f16), T([128, 3, 192, 192], f16)), {}) |
aten.copy_.default | TorchBench/timm_nfnet | ((T([128, 3, 192, 192], f16), T([128, 3, 192, 192], f16)), {}) |
aten.copy_.default | TIMM/coat_lite_mini | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/ese_vovnet19b_dw | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/fbnetc_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/fbnetv3_b | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.