operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.mm.default | HuggingFace/GPTNeoForSequenceClassification | ((T([128, 2], f16), T([2, 2048], f16)), {}) |
aten.mm.default | TorchBench/alexnet | ((T([128, 4096], f16), T([4096, 4096], f16)), {}) |
aten.mm.default | TorchBench/alexnet | ((T([128, 4096], f16), T([4096, 9216], f16)), {}) |
aten.mm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([128, 4096], f16, stride=(1, 128)), T([4096, 128], f16)), {}) |
aten.mm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([128, 4096], f16, stride=(1, 128)), T([4096, 512], f16)), {}) |
aten.mm.default | TorchBench/hf_Albert | ((T([128, 4096], f16, stride=(1, 128)), T([4096, 768], f16)), {}) |
aten.mm.default | HuggingFace/GPTNeoForCausalLM | ((T([128, 50257], f16), T([50257, 2048], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForCausalLM | ((T([128, 512], f16, stride=(1, 128)), T([512, 256], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([128, 6272], f16, stride=(1, 128)), T([6272, 128], f16)), {}) |
aten.mm.default | TIMM/crossvit_9_240 | ((T([128, 64], f16, stride=(1, 128)), T([64, 128], f16)), {}) |
aten.mm.default | TIMM/crossvit_9_240 | ((T([128, 64], f16, stride=(1, 128)), T([64, 128], f16, stride=(51328, 1))), {}) |
aten.mm.default | TIMM/crossvit_9_240 | ((T([128, 64], f16, stride=(1, 51328)), T([64, 256], f16)), {}) |
aten.mm.default | HuggingFace/GPTNeoForCausalLM | ((T([128, 8192], f16), T([8192, 2048], f16)), {}) |
aten.mm.default | HuggingFace/GPTNeoForSequenceClassification | ((T([128, 8192], f16), T([8192, 2048], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([1280, 25216], f16, stride=(1, 1280)), T([25216, 320], f16)), {}) |
aten.mm.default | TorchBench/mobilenet_v3_large | ((T([1280, 32], f16, stride=(1, 1280)), T([32, 960], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([1280, 6272], f16, stride=(1, 1280)), T([6272, 256], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1280, 6272], f16, stride=(1, 1280)), T([6272, 320], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([128100, 512], f16, stride=(1, 128100)), T([512, 1536], f16)), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([128112, 256], f16, stride=(1, 128112)), T([256, 1024], f16)), {}) |
aten.mm.default | TIMM/botnet26t_256 | ((T([131072, 128], f16), T([128, 31], f16, stride=(1, 128))), {}) |
aten.mm.default | TIMM/eca_botnext26ts_256 | ((T([131072, 16], f16), T([16, 31], f16, stride=(1, 16))), {}) |
aten.mm.default | TIMM/botnet26t_256 | ((T([131072, 31], f16), T([31, 128], f16)), {}) |
aten.mm.default | TIMM/eca_botnext26ts_256 | ((T([131072, 31], f16), T([31, 16], f16)), {}) |
aten.mm.default | TIMM/botnet26t_256 | ((T([131072, 31], f16), T([31, 64], f16)), {}) |
aten.mm.default | TIMM/botnet26t_256 | ((T([131072, 64], f16), T([64, 31], f16, stride=(1, 64))), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([144, 65536], f16, stride=(1, 144)), T([65536, 144], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([144, 65536], f16, stride=(1, 144)), T([65536, 288], f16)), {}) |
aten.mm.default | TIMM/sebotnet33ts_256 | ((T([15, 16384], f16, stride=(1, 15)), T([16384, 128], f16)), {}) |
aten.mm.default | TIMM/botnet26t_256 | ((T([15, 32768], f16, stride=(1, 15)), T([32768, 128], f16)), {}) |
aten.mm.default | TIMM/eca_botnext26ts_256 | ((T([15, 32768], f16, stride=(1, 15)), T([32768, 16], f16)), {}) |
aten.mm.default | TorchBench/fambench_dlrm | ((T([1500, 1024], f16, stride=(1, 1500)), T([1024, 1500], f16)), {}) |
aten.mm.default | TorchBench/fambench_dlrm | ((T([1500, 1024], f16, stride=(1, 1500)), T([1024, 2000], f16)), {}) |
aten.mm.default | TIMM/gmlp_s16_224 | ((T([1536, 12544], f16, stride=(1, 1536)), T([12544, 256], f16)), {}) |
aten.mm.default | TIMM/gmixer_24_224 | ((T([1536, 12544], f16, stride=(1, 1536)), T([12544, 384], f16)), {}) |
aten.mm.default | TIMM/jx_nest_base | ((T([1536, 12544], f16, stride=(1, 1536)), T([12544, 512], f16)), {}) |
aten.mm.default | TIMM/swin_base_patch4_window7_224 | ((T([1536, 12544], f16, stride=(1, 1536)), T([12544, 512], f16)), {}) |
aten.mm.default | TIMM/tnt_s_patch16_224 | ((T([1536, 12608], f16, stride=(1, 1536)), T([12608, 384], f16)), {}) |
aten.mm.default | TorchBench/timm_vision_transformer | ((T([1536, 1576], f16, stride=(1, 1536)), T([1576, 384], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([1536, 16448], f16, stride=(1, 1536)), T([16448, 512], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1536, 512], f16), T([512, 1536], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1536, 512], f16), T([512, 1536], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1536, 512], f16, stride=(1, 1536)), T([512, 1536], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1536, 512], f16, stride=(1, 1536)), T([512, 1536], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1536, 512], f16, stride=(1, 1536)), T([512, 6144], f16)), {}) |
aten.mm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1536, 512], f16, stride=(1, 1536)), T([512, 6144], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([1536, 6400], f16, stride=(1, 1536)), T([6400, 512], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([1568, 1024], f16), T([1024, 4096], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([1568, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 1024], f16), T([1024, 512], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 128], f16), T([128, 64], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 2048], f16), T([2048, 512], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 256], f16), T([256, 128], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([1568, 4096], f16), T([4096, 1024], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([1568, 4096], f16), T([4096, 1024], f16, stride=(1, 4096))), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 512], f16), T([512, 2048], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 512], f16), T([512, 512], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([1568, 640], f16), T([640, 320], f16)), {}) |
aten.mm.default | TorchBench/timm_vision_transformer | ((T([1576, 1152], f16), T([1152, 384], f16)), {}) |
aten.mm.default | TorchBench/timm_vision_transformer | ((T([1576, 1536], f16), T([1536, 384], f16)), {}) |
aten.mm.default | TorchBench/timm_vision_transformer | ((T([1576, 384], f16), T([384, 1536], f16)), {}) |
aten.mm.default | TorchBench/timm_vision_transformer | ((T([1576, 384], f16), T([384, 384], f16)), {}) |
aten.mm.default | TIMM/nasnetalarge | ((T([16, 1000], f16), T([1000, 4032], f16)), {}) |
aten.mm.default | TIMM/pnasnet5large | ((T([16, 1000], f16), T([1000, 4320], f16)), {}) |
aten.mm.default | TorchBench/resnet18 | ((T([16, 1000], f16, stride=(0, 0)), T([1000, 512], f16)), {}) |
aten.mm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([16, 2], f16), T([2, 768], f16)), {}) |
aten.mm.default | TIMM/cait_m36_384 | ((T([16, 663552], f16, stride=(1, 16)), T([663552, 16], f16)), {}) |
aten.mm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([16, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | HuggingFace/AlbertForMaskedLM | ((T([16384, 1024], f16, stride=(1, 16384)), T([1024, 4096], f16)), {}) |
aten.mm.default | HuggingFace/AlbertForQuestionAnswering | ((T([16384, 1024], f16, stride=(1, 16384)), T([1024, 4096], f16)), {}) |
aten.mm.default | TIMM/sebotnet33ts_256 | ((T([16384, 128], f16), T([128, 15], f16, stride=(1, 128))), {}) |
aten.mm.default | TIMM/sebotnet33ts_256 | ((T([16384, 15], f16), T([15, 128], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([16384, 192], f16), T([192, 192], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([16384, 192], f16), T([192, 384], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([16384, 384], f16), T([384, 192], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([16384, 576], f16), T([576, 192], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([16448, 1536], f16), T([1536, 512], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([16448, 2048], f16), T([2048, 512], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([16448, 512], f16), T([512, 2048], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([16448, 512], f16), T([512, 512], f16)), {}) |
aten.mm.default | TorchBench/fambench_dlrm | ((T([192, 1024], f16, stride=(1, 192)), T([1024, 1500], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([192, 16384], f16, stride=(1, 192)), T([16384, 192], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([192, 16384], f16, stride=(1, 192)), T([16384, 384], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([192, 401536], f16, stride=(1, 192)), T([401536, 64], f16)), {}) |
aten.mm.default | TIMM/volo_d1_224 | ((T([192, 50176], f16, stride=(1, 192)), T([50176, 192], f16)), {}) |
aten.mm.default | TIMM/volo_d1_224 | ((T([192, 50176], f16, stride=(1, 192)), T([50176, 576], f16)), {}) |
aten.mm.default | TIMM/gmixer_24_224 | ((T([196, 24576], f16, stride=(1, 196)), T([24576, 192], f16)), {}) |
aten.mm.default | TIMM/resmlp_12_224 | ((T([196, 49152], f16, stride=(1, 196)), T([49152, 196], f16)), {}) |
aten.mm.default | TIMM/mixer_b16_224 | ((T([196, 49152], f16, stride=(1, 196)), T([49152, 384], f16)), {}) |
aten.mm.default | TorchBench/nvidia_deeprecommender | ((T([197951, 256], f16, stride=(1, 197951)), T([256, 512], f16)), {}) |
aten.mm.default | TIMM/cait_m36_384 | ((T([2, 1000], f16), T([1000, 768], f16)), {}) |
aten.mm.default | HuggingFace/MegatronBertForQuestionAnswering | ((T([2, 1024], f16, stride=(1, 2)), T([1024, 1024], f16)), {}) |
aten.mm.default | HuggingFace/AlbertForQuestionAnswering | ((T([2, 1024], f16, stride=(1, 2)), T([1024, 4096], f16)), {}) |
aten.mm.default | HuggingFace/GPTNeoForSequenceClassification | ((T([2, 128], f16, stride=(1, 2)), T([128, 2048], f16)), {}) |
aten.mm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([2, 16], f16, stride=(1, 2)), T([16, 768], f16)), {}) |
aten.mm.default | HuggingFace/DebertaForQuestionAnswering | ((T([2, 2048], f16, stride=(1, 2)), T([2048, 768], f16)), {}) |
aten.mm.default | TorchBench/fastNLP_Bert | ((T([2, 2844], f16, stride=(1, 2)), T([2844, 768], f16)), {}) |
aten.mm.default | TIMM/cait_m36_384 | ((T([2, 3072], f16), T([3072, 768], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForQuestionAnswering | ((T([2, 32768], f16, stride=(1, 2)), T([32768, 256], f16)), {}) |
aten.mm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([2, 4096], f16, stride=(1, 2)), T([4096, 512], f16)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.