operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.mm.default | TorchBench/hf_Bert | ((T([2048, 768], f16), T([768, 3072], f16)), {}) |
aten.mm.default | TorchBench/hf_BigBird | ((T([2048, 768], f16), T([768, 3072], f16)), {}) |
aten.mm.default | TorchBench/hf_Longformer | ((T([2048, 768], f16), T([768, 3072], f16)), {}) |
aten.mm.default | TorchBench/hf_GPT2 | ((T([2048, 768], f16), T([768, 3072], f16, stride=(1, 768))), {}) |
aten.mm.default | TIMM/levit_128 | ((T([2048, 768], f16), T([768, 384], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([2048, 768], f16), T([768, 384], f16, stride=(1, 768))), {}) |
aten.mm.default | HuggingFace/PLBartForCausalLM | ((T([2048, 768], f16), T([768, 50005], f16, stride=(1, 768))), {}) |
aten.mm.default | TorchBench/hf_GPT2 | ((T([2048, 768], f16), T([768, 50257], f16, stride=(1, 768))), {}) |
aten.mm.default | TorchBench/hf_Bart | ((T([2048, 768], f16), T([768, 50265], f16, stride=(1, 768))), {}) |
aten.mm.default | HuggingFace/DebertaForMaskedLM | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | HuggingFace/DebertaForQuestionAnswering | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | HuggingFace/DistilBertForMaskedLM | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | HuggingFace/PLBartForCausalLM | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/BERT_pytorch | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/hf_Bart | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/hf_Bert | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/hf_BigBird | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/hf_Longformer | ((T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.mm.default | TorchBench/hf_GPT2 | ((T([2048, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.mm.default | TorchBench/hf_Longformer | ((T([2048, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.mm.default | TorchBench/BERT_pytorch | ((T([2048, 768], f16, stride=(0, 0)), T([768, 3072], f16)), {}) |
aten.mm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 7936], f16, stride=(1, 2048)), T([7936, 512], f16)), {}) |
aten.mm.default | HuggingFace/Speech2Text2ForCausalLM | ((T([2048, 8192], f16, stride=(1, 2048)), T([8192, 256], f16)), {}) |
aten.mm.default | HuggingFace/BlenderbotSmallForCausalLM | ((T([2048, 8192], f16, stride=(1, 2048)), T([8192, 512], f16)), {}) |
aten.mm.default | HuggingFace/BlenderbotSmallForConditionalGeneration | ((T([2048, 8192], f16, stride=(1, 2048)), T([8192, 512], f16)), {}) |
aten.mm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 8448], f16, stride=(1, 2048)), T([8448, 512], f16)), {}) |
aten.mm.default | TorchBench/speech_transformer | ((T([220, 1014], f16, stride=(0, 0)), T([1014, 512], f16)), {}) |
aten.mm.default | TorchBench/speech_transformer | ((T([220, 2048], f16), T([2048, 512], f16)), {}) |
aten.mm.default | TorchBench/speech_transformer | ((T([220, 512], f16), T([512, 1014], f16, stride=(1, 512))), {}) |
aten.mm.default | TorchBench/speech_transformer | ((T([220, 512], f16), T([512, 2048], f16)), {}) |
aten.mm.default | TorchBench/speech_transformer | ((T([220, 512], f16), T([512, 512], f16)), {}) |
aten.mm.default | TIMM/eca_halonext26ts | ((T([23, 262144], f16, stride=(1, 23)), T([262144, 16], f16)), {}) |
aten.mm.default | TIMM/eca_halonext26ts | ((T([23, 65536], f16, stride=(1, 23)), T([65536, 16], f16)), {}) |
aten.mm.default | TIMM/cait_m36_384 | ((T([2304, 1152], f16, stride=(1, 2304)), T([1152, 768], f16)), {}) |
aten.mm.default | TIMM/beit_base_patch16_224 | ((T([2304, 12608], f16, stride=(1, 2304)), T([12608, 768], f16)), {}) |
aten.mm.default | TIMM/vit_base_patch16_224 | ((T([2304, 12608], f16, stride=(1, 2304)), T([12608, 768], f16)), {}) |
aten.mm.default | TIMM/deit_base_distilled_patch16_224 | ((T([2304, 12672], f16, stride=(1, 2304)), T([12672, 768], f16)), {}) |
aten.mm.default | HuggingFace/DebertaForMaskedLM | ((T([2304, 2048], f16, stride=(1, 2304)), T([2048, 768], f16)), {}) |
aten.mm.default | HuggingFace/DebertaForQuestionAnswering | ((T([2304, 2048], f16, stride=(1, 2304)), T([2048, 768], f16)), {}) |
aten.mm.default | TIMM/tnt_s_patch16_224 | ((T([24, 200704], f16, stride=(1, 24)), T([200704, 24], f16)), {}) |
aten.mm.default | TIMM/tnt_s_patch16_224 | ((T([24, 200704], f16, stride=(1, 24)), T([200704, 96], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([240, 4096], f16, stride=(1, 240)), T([4096, 240], f16)), {}) |
aten.mm.default | TIMM/mobilevit_s | ((T([240, 4096], f16, stride=(1, 240)), T([4096, 480], f16)), {}) |
aten.mm.default | TIMM/gmixer_24_224 | ((T([24576, 196], f16), T([196, 192], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([25088, 1024], f16), T([1024, 128], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([25088, 1024], f16), T([1024, 256], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([25088, 1024], f16), T([1024, 256], f16, stride=(1, 1024))), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([25088, 128], f16), T([128, 1024], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([25088, 128], f16), T([128, 128], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([25088, 128], f16), T([128, 128], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([25088, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.mm.default | TIMM/levit_128 | ((T([25088, 128], f16), T([128, 256], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([25088, 256], f16), T([256, 1024], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([25088, 256], f16), T([256, 1024], f16, stride=(1, 256))), {}) |
aten.mm.default | TIMM/levit_128 | ((T([25088, 256], f16), T([256, 128], f16, stride=(1, 256))), {}) |
aten.mm.default | TIMM/resmlp_12_224 | ((T([25088, 384], f16), T([384, 1536], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([25216, 1280], f16), T([1280, 320], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([25216, 320], f16), T([320, 1280], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([25216, 320], f16), T([320, 320], f16)), {}) |
aten.mm.default | TIMM/coat_lite_mini | ((T([25216, 960], f16), T([960, 320], f16)), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([256, 1024], f16), T([1024, 1024], f16)), {}) |
aten.mm.default | HuggingFace/MegatronBertForCausalLM | ((T([256, 1024], f16), T([1024, 1024], f16)), {}) |
aten.mm.default | HuggingFace/XGLMForCausalLM | ((T([256, 1024], f16), T([1024, 1024], f16)), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([256, 1024], f16), T([1024, 128112], f16, stride=(1, 1024))), {}) |
aten.mm.default | HuggingFace/XGLMForCausalLM | ((T([256, 1024], f16), T([1024, 256008], f16, stride=(1, 1024))), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([256, 1024], f16), T([1024, 4096], f16)), {}) |
aten.mm.default | HuggingFace/MegatronBertForCausalLM | ((T([256, 1024], f16), T([1024, 4096], f16)), {}) |
aten.mm.default | HuggingFace/XGLMForCausalLM | ((T([256, 1024], f16), T([1024, 4096], f16)), {}) |
aten.mm.default | TorchBench/nvidia_deeprecommender | ((T([256, 1024], f16), T([1024, 512], f16)), {}) |
aten.mm.default | TIMM/gmlp_s16_224 | ((T([256, 12544], f16, stride=(1, 256)), T([12544, 768], f16)), {}) |
aten.mm.default | TIMM/crossvit_9_240 | ((T([256, 12608], f16, stride=(1, 256)), T([12608, 256], f16)), {}) |
aten.mm.default | TIMM/crossvit_9_240 | ((T([256, 12608], f16, stride=(1, 256)), T([12608, 768], f16)), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([256, 128112], f16), T([128112, 1024], f16)), {}) |
aten.mm.default | TIMM/twins_pcpvt_base | ((T([256, 1568], f16, stride=(1, 256)), T([1568, 128], f16)), {}) |
aten.mm.default | TorchBench/nvidia_deeprecommender | ((T([256, 197951], f16), T([197951, 512], f16)), {}) |
aten.mm.default | TIMM/levit_128 | ((T([256, 2048], f16, stride=(1, 256)), T([2048, 256], f16)), {}) |
aten.mm.default | TIMM/convnext_base | ((T([256, 25088], f16, stride=(1, 256)), T([25088, 1024], f16)), {}) |
aten.mm.default | HuggingFace/XGLMForCausalLM | ((T([256, 256008], f16), T([256008, 1024], f16)), {}) |
aten.mm.default | HuggingFace/MegatronBertForCausalLM | ((T([256, 29056], f16), T([29056, 1024], f16)), {}) |
aten.mm.default | TorchBench/pytorch_struct | ((T([256, 30], f16), T([30, 256], f16)), {}) |
aten.mm.default | TorchBench/pytorch_struct | ((T([256, 30], f16, stride=(1, 256)), T([30, 256], f16)), {}) |
aten.mm.default | TorchBench/tts_angular | ((T([256, 3200], f16, stride=(1, 256)), T([3200, 768], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256, 32768], f16, stride=(1, 256)), T([32768, 1024], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256, 32768], f16, stride=(1, 256)), T([32768, 128], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256, 32768], f16, stride=(1, 256)), T([32768, 256], f16)), {}) |
aten.mm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([256, 4096], f16), T([4096, 1024], f16)), {}) |
aten.mm.default | HuggingFace/MegatronBertForCausalLM | ((T([256, 4096], f16), T([4096, 1024], f16)), {}) |
aten.mm.default | HuggingFace/XGLMForCausalLM | ((T([256, 4096], f16), T([4096, 1024], f16)), {}) |
aten.mm.default | TIMM/jx_nest_base | ((T([256, 50176], f16, stride=(1, 256)), T([50176, 1024], f16)), {}) |
aten.mm.default | TIMM/swin_base_patch4_window7_224 | ((T([256, 50176], f16, stride=(1, 256)), T([50176, 1024], f16)), {}) |
aten.mm.default | TIMM/jx_nest_base | ((T([256, 50176], f16, stride=(1, 256)), T([50176, 256], f16)), {}) |
aten.mm.default | TIMM/swin_base_patch4_window7_224 | ((T([256, 50176], f16, stride=(1, 256)), T([50176, 256], f16)), {}) |
aten.mm.default | TIMM/swin_base_patch4_window7_224 | ((T([256, 50176], f16, stride=(1, 256)), T([50176, 512], f16)), {}) |
aten.mm.default | TorchBench/nvidia_deeprecommender | ((T([256, 512], f16), T([512, 1024], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForCausalLM | ((T([256, 512], f16), T([512, 256], f16)), {}) |
aten.mm.default | TorchBench/nvidia_deeprecommender | ((T([256, 512], f16), T([512, 512], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForCausalLM | ((T([256, 512], f16, stride=(1, 256)), T([512, 1024], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForCausalLM | ((T([256, 512], f16, stride=(1, 256)), T([512, 128], f16)), {}) |
aten.mm.default | HuggingFace/ElectraForCausalLM | ((T([256, 512], f16, stride=(1, 256)), T([512, 256], f16)), {}) |
aten.mm.default | TIMM/pit_b_224 | ((T([256, 61568], f16, stride=(1, 256)), T([61568, 1024], f16)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.