operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.copy_.default | TIMM/hardcorenas_a | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/hrnet_w18 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/lcnet_050 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/levit_128 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/mnasnet_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/mobilenetv2_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/mobilenetv3_large_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/nfnet_l0 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/regnety_002 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/repvgg_a2 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/res2net50_14w_8s | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/res2next50 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/resmlp_12_224 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/resnet18 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/rexnet_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/selecsls42b | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/spnasnet_100 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/tf_efficientnet_b0 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/visformer_small | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TorchBench/alexnet | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TorchBench/shufflenet_v2_x1_0 | ((T([128, 3, 224, 224], f16), T([128, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/botnet26t_256 | ((T([128, 3, 256, 256], f16), T([128, 3, 256, 256], f16)), {}) |
aten.copy_.default | TIMM/eca_botnext26ts_256 | ((T([128, 3, 256, 256], f16), T([128, 3, 256, 256], f16)), {}) |
aten.copy_.default | TIMM/eca_halonext26ts | ((T([128, 3, 256, 256], f16), T([128, 3, 256, 256], f16)), {}) |
aten.copy_.default | TIMM/gernet_l | ((T([128, 3, 256, 256], f16), T([128, 3, 256, 256], f16)), {}) |
aten.copy_.default | TIMM/adv_inception_v3 | ((T([128, 3, 299, 299], f16), T([128, 3, 299, 299], f16)), {}) |
aten.copy_.default | TIMM/gluon_inception_v3 | ((T([128, 3, 299, 299], f16), T([128, 3, 299, 299], f16)), {}) |
aten.copy_.default | TIMM/inception_v3 | ((T([128, 3, 299, 299], f16), T([128, 3, 299, 299], f16)), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 40, 28, 28], f16), T([128, 40, 28, 28], f16)), {}) |
aten.copy_.default | TorchBench/Background_Matting | ((T([128, 64, 3, 3], f16), T([128, 64, 3, 3], f16, stride=(576, 1, 192, 64))), {}) |
aten.copy_.default | TIMM/ghostnet_100 | ((T([128, 80, 14, 14], f16), T([128, 80, 14, 14], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([128], f16), T([128], f16)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([12], i64), T([12], i64)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([13], i64), T([13], i64)), {}) |
aten.copy_.default | HuggingFace/BigBird | ((T([144, 64, 64], f16), T([144, 64, 64], f16)), {}) |
aten.copy_.default | TIMM/resmlp_12_224 | ((T([1536, 384], f16), T([1536, 384], f16, stride=(1, 1536))), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([16, 128, 64, 64], f16), T([16, 128, 64, 64], f16)), {}) |
aten.copy_.default | HuggingFace/DistilBertForMaskedLM | ((T([16, 128], i64), T([16, 128], i64)), {}) |
aten.copy_.default | HuggingFace/MBartForCausalLM | ((T([16, 128], i64), T([16, 128], i64)), {}) |
aten.copy_.default | HuggingFace/MobileBertForMaskedLM | ((T([16, 128], i64), T([16, 128], i64)), {}) |
aten.copy_.default | HuggingFace/PLBartForCausalLM | ((T([16, 128], i64), T([16, 128], i64)), {}) |
aten.copy_.default | TorchBench/BERT_pytorch | ((T([16, 128], i64), T([16, 128], i64)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([16, 256, 32, 32], f16), T([16, 256, 32, 32], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([16, 3, 128, 128], f16), T([16, 3, 128, 128], f16)), {}) |
aten.copy_.default | TorchBench/resnet18 | ((T([16, 3, 224, 224], f16), T([16, 3, 224, 224], f16)), {}) |
aten.copy_.default | TIMM/nasnetalarge | ((T([16, 3, 331, 331], f16), T([16, 3, 331, 331], f16)), {}) |
aten.copy_.default | TIMM/pnasnet5large | ((T([16, 3, 331, 331], f16), T([16, 3, 331, 331], f16)), {}) |
aten.copy_.default | HuggingFace/LayoutLMForMaskedLM | ((T([16, 512], i64), T([16, 512], i64)), {}) |
aten.copy_.default | HuggingFace/LayoutLMForSequenceClassification | ((T([16, 512], i64), T([16, 512], i64)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([16, 5], f16), T([16, 5], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([16, 64, 128, 128], f16), T([16, 64, 128, 128], f16)), {}) |
aten.copy_.default | HuggingFace/LayoutLMForSequenceClassification | ((T([16], i64), T([16], i64)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([16], i64), T([16], i64)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([18], i64), T([18], i64)), {}) |
aten.copy_.default | TIMM/gmlp_s16_224 | ((T([196, 196], f16), T([196, 196], f16, stride=(1, 196))), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([19], i64), T([19], i64)), {}) |
aten.copy_.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1], i64), T([1], i64)), {}) |
aten.copy_.default | HuggingFace/GPTNeoForSequenceClassification | ((T([1], i64), T([1], i64)), {}) |
aten.copy_.default | HuggingFace/BartForConditionalGeneration | ((T([2, 1023], i64, stride=(1024, 1)), T([2, 1023], i64)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 1024, 12, 513], f16, stride=(6303744, 513, 525312, 1)), T([2, 1024, 12, 513], f16)), {}) |
aten.copy_.default | HuggingFace/BartForConditionalGeneration | ((T([2, 1024], i64), T([2, 1024], i64)), {}) |
aten.copy_.default | TorchBench/hf_BigBird | ((T([2, 1024], i64), T([2, 1024], i64)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 1024], i64), T([2, 1024], i64)), {}) |
aten.copy_.default | TorchBench/hf_BigBird | ((T([2, 12, 12, 64, 64], f16), T([2, 12, 12, 64, 64], f16)), {}) |
aten.copy_.default | TorchBench/hf_BigBird | ((T([2, 12, 12, 64, 64], f16), T([2, 12, 12, 64, 64], f16, stride=(786432, 64, 49152, 768, 1))), {}) |
aten.copy_.default | HuggingFace/XGLMForCausalLM | ((T([2, 127], i64, stride=(128, 1)), T([2, 127], i64)), {}) |
aten.copy_.default | HuggingFace/M2M100ForConditionalGeneration | ((T([2, 128], i64), T([2, 128], i64)), {}) |
aten.copy_.default | HuggingFace/MegatronBertForCausalLM | ((T([2, 128], i64), T([2, 128], i64)), {}) |
aten.copy_.default | HuggingFace/XGLMForCausalLM | ((T([2, 128], i64), T([2, 128], i64)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 255, 255], f16, stride=(525312, 513, 1)), T([2, 255, 255], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 256, 12, 257], f16, stride=(6303744, 513, 525312, 1)), T([2, 256, 12, 257], f16)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 256, 257], f16, stride=(525312, 513, 1)), T([2, 256, 257], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([2, 3, 256, 256], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([2, 3, 256, 257], f16, stride=(525312, 131328, 513, 1)), T([2, 3, 256, 257], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | TIMM/cait_m36_384 | ((T([2, 3, 384, 384], f16), T([2, 3, 384, 384], f16)), {}) |
aten.copy_.default | HuggingFace/AlbertForMaskedLM | ((T([2, 512], i64), T([2, 512], i64)), {}) |
aten.copy_.default | HuggingFace/AlbertForQuestionAnswering | ((T([2, 512], i64), T([2, 512], i64)), {}) |
aten.copy_.default | TorchBench/speech_transformer | ((T([22], i64), T([22], i64)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 255, 255], f16, stride=(525312, 513, 1)), T([24, 255, 255], f16)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 255, 255], f16, stride=(525312, 513, 1)), T([24, 255, 255], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 256, 257], f16, stride=(525312, 513, 1)), T([24, 256, 257], f16)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 256, 257], f16, stride=(525312, 513, 1)), T([24, 256, 257], f16, stride=(787968, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([24, 3, 256, 256], f16)), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 3, 256, 256], f16, stride=(525312, 131328, 513, 1)), T([24, 3, 256, 256], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 3, 256, 257], f16, stride=(525312, 131328, 513, 1)), T([24, 3, 256, 257], f16, stride=(787968, 262656, 513, 1))), {}) |
aten.copy_.default | TorchBench/hf_Longformer | ((T([24, 4, 256, 513], f16), T([24, 4, 256, 513], f16)), {}) |
aten.copy_.default | TorchBench/fambench_dlrm | ((T([248, 1024], i64), T([248, 1024], i64)), {}) |
aten.copy_.default | TIMM/convnext_base | ((T([256, 128, 2, 2], f16), T([256, 128, 2, 2], f16, stride=(512, 1, 256, 128))), {}) |
aten.copy_.default | TIMM/jx_nest_base | ((T([256, 128, 3, 3], f16), T([256, 128, 3, 3], f16, stride=(1152, 1, 384, 128))), {}) |
aten.copy_.default | TorchBench/Background_Matting | ((T([256, 128, 3, 3], f16), T([256, 128, 3, 3], f16, stride=(1152, 1, 384, 128))), {}) |
aten.copy_.default | TIMM/levit_128 | ((T([256, 128], f16), T([256, 128], f16, stride=(1, 256))), {}) |
aten.copy_.default | TorchBench/nvidia_deeprecommender | ((T([256, 197951], f16), T([256, 197951], f16)), {}) |
aten.copy_.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 31, 512], f16), T([256, 31, 512], f16)), {}) |
aten.copy_.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 31], i64, stride=(1, 256)), T([256, 31], i64, stride=(1, 256))), {}) |
aten.copy_.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 33, 512], f16), T([256, 33, 512], f16)), {}) |
aten.copy_.default | TorchBench/attention_is_all_you_need_pytorch | ((T([256, 33], i64, stride=(1, 256)), T([256, 33], i64, stride=(1, 256))), {}) |
aten.copy_.default | TorchBench/pytorch_CycleGAN_and_pix2pix | ((T([256, 64, 64], f16), T([256, 64, 64], f16)), {}) |
aten.copy_.default | TorchBench/pytorch_stargan | ((T([256], f16), T([256], f16)), {}) |
aten.copy_.default | TorchBench/hf_BigBird | ((T([288, 64, 64], f16), T([288, 64, 64], f16)), {}) |
aten.copy_.default | HuggingFace/AlbertForQuestionAnswering | ((T([2], i64), T([2], i64)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.