operator name stringclasses 180
values | used in model stringclasses 155
values | args stringlengths 19 5.24k |
|---|---|---|
aten.im2col.default | HuggingFace/YituTechConvBert | ((T([1, 384, 512, 1], f16), [9, 1], [1, 1], [4, 0], [1, 1]), {}) |
aten.im2col.default | TIMM/volo_d1_224 | ((T([64, 192, 28, 28], f16, stride=(150528, 1, 5376, 192)), [3, 3], [1, 1], [1, 1], [2, 2]), {}) |
aten.im2col.default | TIMM/tnt_s_patch16_224 | ((T([64, 24, 56, 56], f16), [4, 4], [1, 1], [0, 0], [4, 4]), {}) |
aten.im2col_backward.default | HuggingFace/YituTechConvBert | ((T([1, 3456, 512], f16, stride=(1769472, 1, 3456)), [512, 1], [9, 1], [1, 1], [4, 0], [1, 1]), {}) |
aten.im2col_backward.default | TIMM/volo_d1_224 | ((T([64, 1728, 196], f16), [28, 28], [3, 3], [1, 1], [1, 1], [2, 2]), {}) |
aten.im2col_backward.default | TIMM/tnt_s_patch16_224 | ((T([64, 384, 196], f16, stride=(75264, 1, 384)), [56, 56], [4, 4], [1, 1], [0, 0], [4, 4]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0, 256, 7, 7], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0, 4], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0, 5], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0, 91, 28, 28], f16), [T([0], i64), T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([0], i64), [T([0], i64)]), {}) |
aten.index.Tensor | HuggingFace/GPTNeoForSequenceClassification | ((T([1, 128, 2], f16), [T([1], i64), T([1], i64)]), {}) |
aten.index.Tensor | TorchBench/fambench_dlrm | ((T([1024, 249, 249], f16), [None, T([30876], i64), T([30876], i64)]), {}) |
aten.index.Tensor | TIMM/levit_128 | ((T([12, 16], f16), [None, T([16, 16], i64)]), {}) |
aten.index.Tensor | TIMM/levit_128 | ((T([16, 49], f16), [None, T([16, 49], i64)]), {}) |
aten.index.Tensor | HuggingFace/BigBird | ((T([16, 64], f32), [T([504], i64)]), {}) |
aten.index.Tensor | TorchBench/hf_BigBird | ((T([16, 64], f32), [T([504], i64)]), {}) |
aten.index.Tensor | TIMM/swin_base_patch4_window7_224 | ((T([169, 16], f16), [T([2401], i64)]), {}) |
aten.index.Tensor | TIMM/swin_base_patch4_window7_224 | ((T([169, 32], f16), [T([2401], i64)]), {}) |
aten.index.Tensor | TIMM/swin_base_patch4_window7_224 | ((T([169, 4], f16), [T([2401], i64)]), {}) |
aten.index.Tensor | TIMM/swin_base_patch4_window7_224 | ((T([169, 8], f16), [T([2401], i64)]), {}) |
aten.index.Tensor | TorchBench/speech_transformer | ((T([21], i64), [T([21], b8)]), {}) |
aten.index.Tensor | TorchBench/fastNLP_Bert | ((T([2869], i64), [T([6, 474], i64)]), {}) |
aten.index.Tensor | HuggingFace/GPT2ForSequenceClassification | ((T([4, 1024, 2], f16), [T([4], i64), T([4], i64)]), {}) |
aten.index.Tensor | TIMM/levit_128 | ((T([4, 196], f16), [None, T([196, 196], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([4, 359613, 4], f16), [T([4, 1], i64), T([4, 5000], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([4, 359613], f16), [T([4, 1], i64), T([4, 5000], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([4, 359613], i64, stride=(0, 1)), [T([4, 1], i64), T([4, 5000], i64)]), {}) |
aten.index.Tensor | TorchBench/timm_efficientdet | ((T([5000, 1], f32), [T([100], i64)]), {}) |
aten.index.Tensor | TorchBench/timm_efficientdet | ((T([5000, 1], i64), [T([100], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([5000, 4], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/timm_efficientdet | ((T([5000, 4], f32), [T([100], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([5000], f16), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/vision_maskrcnn | ((T([5000], i64), [T([0], i64)]), {}) |
aten.index.Tensor | TorchBench/fastNLP_Bert | ((T([6, 474, 768], f16, stride=(365568, 768, 1)), [T([6, 474], i64, stride=(1, 0)), T([6, 474], i64, stride=(475, 1))]), {}) |
aten.index.Tensor | TIMM/beit_base_patch16_224 | ((T([732, 12], f16), [T([38809], i64)]), {}) |
aten.index.Tensor | TorchBench/timm_efficientdet | ((T([76725, 4], f16, stride=(1, 76725)), [T([5000], i64)]), {}) |
aten.index.Tensor | TorchBench/Super_SloMo | ((T([7], f16), [T([6], i64)]), {}) |
aten.index.Tensor | TIMM/levit_128 | ((T([8, 196], f16), [None, T([49, 196], i64)]), {}) |
aten.index.Tensor | TIMM/levit_128 | ((T([8, 49], f16), [None, T([49, 49], i64)]), {}) |
aten.index_add.default | HuggingFace/BigBird | ((T([192, 64, 64], f16), 0, T([504], i64), T([504, 64, 64], f16)), {}) |
aten.index_add.default | TorchBench/hf_BigBird | ((T([384, 64, 64], f16), 0, T([1008], i64), T([1008, 64, 64], f16)), {}) |
aten.index_add.default | HuggingFace/XLNetLMHeadModel | ((T([4, 16, 512, 1023], f16), 3, T([512], i64), T([4, 16, 512, 512], f16)), {}) |
aten.index_add_.default | HuggingFace/AllenaiLongformerBase | ((T([1179648], f16), 0, T([2359296], i64), T([2359296], f16)), {}) |
aten.index_add_.default | TorchBench/hf_Longformer | ((T([1572864], f16), 0, T([2359296], i64), T([2359296], f16)), {}) |
aten.index_add_.default | TorchBench/hf_Longformer | ((T([2359296], f16), 0, T([4718592], i64), T([4718592], f16)), {}) |
aten.index_add_.default | HuggingFace/AllenaiLongformerBase | ((T([786432], f16), 0, T([1179648], i64), T([1179648], f16)), {}) |
aten.index_put.default | TorchBench/vision_maskrcnn | ((T([0, 256, 7, 7], f16), [T([0], i64)], T([0, 256, 7, 7], f16)), {}) |
aten.index_put.default | HuggingFace/GPTNeoForSequenceClassification | ((T([1, 128, 2], f16), [T([1], i64), T([1], i64)], T([1, 2], f16), True), {}) |
aten.index_put.default | TorchBench/fambench_dlrm | ((T([1024, 249, 249], f16), [None, T([30876], i64), T([30876], i64)], T([1024, 30876], f16, stride=(31068, 1)), True), {}) |
aten.index_put.default | TIMM/levit_128 | ((T([12, 16], f16), [None, T([16, 16], i64)], T([12, 16, 16], f16), True), {}) |
aten.index_put.default | TIMM/levit_128 | ((T([16, 49], f16), [None, T([16, 49], i64)], T([16, 16, 49], f16), True), {}) |
aten.index_put.default | TIMM/swin_base_patch4_window7_224 | ((T([169, 16], f16), [T([2401], i64)], T([2401, 16], f16, stride=(1, 2401)), True), {}) |
aten.index_put.default | TIMM/swin_base_patch4_window7_224 | ((T([169, 32], f16), [T([2401], i64)], T([2401, 32], f16, stride=(1, 2401)), True), {}) |
aten.index_put.default | TIMM/swin_base_patch4_window7_224 | ((T([169, 4], f16), [T([2401], i64)], T([2401, 4], f16, stride=(1, 2401)), True), {}) |
aten.index_put.default | TIMM/swin_base_patch4_window7_224 | ((T([169, 8], f16), [T([2401], i64)], T([2401, 8], f16, stride=(1, 2401)), True), {}) |
aten.index_put.default | HuggingFace/GPT2ForSequenceClassification | ((T([4, 1024, 2], f16), [T([4], i64), T([4], i64)], T([4, 2], f16), True), {}) |
aten.index_put.default | TIMM/levit_128 | ((T([4, 196], f16), [None, T([196, 196], i64)], T([4, 196, 196], f16), True), {}) |
aten.index_put.default | TIMM/beit_base_patch16_224 | ((T([732, 12], f16), [T([38809], i64)], T([38809, 12], f16, stride=(1, 38809)), True), {}) |
aten.index_put.default | TIMM/levit_128 | ((T([8, 196], f16), [None, T([49, 196], i64)], T([8, 49, 196], f16), True), {}) |
aten.index_put.default | TIMM/levit_128 | ((T([8, 49], f16), [None, T([49, 49], i64)], T([8, 49, 49], f16), True), {}) |
aten.index_put_.default | TorchBench/vision_maskrcnn | ((T([0, 256, 14, 14], f16), [T([0], i64)], T([0, 256, 14, 14], f16)), {}) |
aten.index_put_.default | TorchBench/vision_maskrcnn | ((T([0, 256, 7, 7], f16), [T([0], i64)], T([0, 256, 7, 7], f16)), {}) |
aten.index_put_.default | TorchBench/fastNLP_Bert | ((T([6, 476], i64), [T([6], i64), T([6], i64)], T([], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54687], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54692], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54697], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54701], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54704], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54705], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54707], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54710], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54711], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54712], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54713], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54714], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54715], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54716], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54717], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54718], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54719], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54722], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54723], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54725], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54727], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54729], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54730], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54731], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54732], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54733], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54734], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54735], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54736], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54737], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54738], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54739], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54740], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54741], i64)), {}) |
aten.index_select.default | TorchBench/fambench_dlrm | ((T([1024, 192], f16, stride=(47808, 1)), 0, T([54742], i64)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.