kernels-bot commited on
Commit
cec577f
·
verified ·
1 Parent(s): f81bfcb

Uploaded using `kernel-builder`.

Browse files
build/torch210-cxx11-cu126-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6fc8bd519ff37c3e79c24bd6bd252352b01f070fa8f7c1a8432c45f2bc22b52f
3
  size 448533504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f47a63daab4ebdc998cefbbc4d4d102023b8f70c2ee86200264fa0919f44b4
3
  size 448533504
build/torch210-cxx11-cu126-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch210-cxx11-cu128-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97e690df2995d1d87a4cc3f8eb17f0d3ef80fc36796ef6d593f13c7e6aedf3fd
3
  size 1037990952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b6609d449c0c2b797a02c1e07f6855619cdc6bf5dc923418906abb9d961c5a
3
  size 1037990952
build/torch210-cxx11-cu128-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch210-cxx11-cu130-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26dbdabe7ffa5834612cf37913f637c91aa991d757f4e42b4876c1921ecbeeb7
3
  size 1008644888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6cf249bdaefd8ba60c325972bad9ac0721f089da07672aa18bf0402839a923c
3
  size 1008644888
build/torch210-cxx11-cu130-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch211-cxx11-cu126-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c35ba9816d590f803a80a45fcb5be581dfceb15af4f86daf3758ba46f56ef3b
3
  size 448529832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:941532b23944f12e7f6f31a47e2c5b3e4729bb1fac383c681ac87644daece1fd
3
  size 448529832
build/torch211-cxx11-cu126-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch211-cxx11-cu128-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cef2b4d449b3efb635e6226e2d0257b080677daa631e732c73905f94ea9bdb7c
3
  size 1037987184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3ea5a90c44ff702cc27cbf3a42afdde5d8c1795d637885bdd8e818ab3d84124
3
  size 1037987184
build/torch211-cxx11-cu128-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch211-cxx11-cu130-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27fa941b83482a166cbd246e95694161e89557eef2a5b284016d3e4765dce49a
3
  size 1008641088
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbcd4f2e08ce4884e4736b086bda13504c6488e6682e5b2db49f048117e14797
3
  size 1008641088
build/torch211-cxx11-cu130-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"
build/torch29-cxx11-cu129-aarch64-linux/{_flash_attn2_cuda_958fa11.abi3.so → _flash_attn2_cuda_f1a742f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3363c49d10c61078a9a05359fef626f4ed3b0ee0f68eb5a88828823bfcfe651
3
  size 1043164184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65a859e7037f7b126de8444430c737d80d191d920847871e4667516feda15c84
3
  size 1043164184
build/torch29-cxx11-cu129-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn2_cuda_958fa11
3
- ops = torch.ops._flash_attn2_cuda_958fa11
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn2_cuda_958fa11::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn2_cuda_f1a742f
3
+ ops = torch.ops._flash_attn2_cuda_f1a742f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn2_cuda_f1a742f::{op_name}"