id int64 2.74B 3.05B | title stringlengths 1 255 | user stringlengths 2 26 | state stringclasses 2
values | labels listlengths 0 24 | comments int64 0 206 | author_association stringclasses 4
values | body stringlengths 7 62.5k ⌀ | is_title bool 1
class |
|---|---|---|---|---|---|---|---|---|
2,758,536,451 | triton.codegen_upcast_to_fp32 breaks bitcast/bitwise ops | Xynonners | open | [
"triaged",
"module: type promotion",
"oncall: pt2",
"module: inductor"
] | 1 | NONE | ### 🐛 Describe the bug
It seems, that after using a .view(int_dtype) on a float tensor,
triton.codegen_upcast_to_fp32 (enabled by default) attempts to recast that bitcast int back to a fp32 float.
ablation: inductor
short reproducer:
```python
import torch
@torch.compile(options={"triton.codegen_upcast_to_fp32": False})
def round_down_to_pow2(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
tensor_int = tensor.view(torch.int16)
y = (tensor_int & 0)
return y.view(dtype)
@torch.compile(options={"triton.codegen_upcast_to_fp32": True})
def round_down_to_pow2_fp32(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
tensor_int = tensor.view(torch.int16)
y = (tensor_int & 0)
return y.view(dtype)
tensor = torch.tensor(0.5, dtype=torch.bfloat16, device="cuda")
round_down_to_pow2(tensor)
print("non-upcast: success")
#the following fails
round_down_to_pow2_fp32(tensor)
print("upcast: success")
```
original code:
```python
import torch
import triton.language as tl
def compute_exp_only_mask(total_bits, mantissa_bits):
"""Compute a single exponent only mask to clear both the sign and mantissa bits."""
mantissa_mask = ~(-1 + (1 << mantissa_bits))
sign_mask = -1 + (1 << (-1 + total_bits))
return mantissa_mask & sign_mask
def compute_exponent_shift(total_bits, mantissa_bits):
"""Compute the bit position of the exponent for floating-point formats."""
return mantissa_bits
#use signed int for https://github.com/pytorch/pytorch/issues/58734
DTYPE_MAPPING = {
torch.float64: {
'float_dtype': tl.float64,
'int_dtype': tl.uint64,
'int_dtype_native': torch.int64,
'bit_mask': compute_exp_only_mask(64, 52),
'exponent_shift': compute_exponent_shift(64, 52),
},
torch.float32: {
'float_dtype': tl.float32,
'int_dtype': tl.uint32,
'int_dtype_native': torch.int32,
'bit_mask': compute_exp_only_mask(32, 23),
'exponent_shift': compute_exponent_shift(32, 23),
},
torch.float16: {
'float_dtype': tl.float16,
'int_dtype': tl.uint16,
'int_dtype_native': torch.int16,
'bit_mask': compute_exp_only_mask(16, 10),
'exponent_shift': compute_exponent_shift(16, 10),
},
torch.bfloat16: {
'float_dtype': tl.bfloat16,
'int_dtype': tl.uint16,
'int_dtype_native': torch.int16,
'bit_mask': compute_exp_only_mask(16, 7),
'exponent_shift': compute_exponent_shift(16, 7),
},
torch.float8_e4m3fn: {
'float_dtype': tl.float8e4nv,
'int_dtype': tl.uint8,
'int_dtype_native': torch.uint8,
'bit_mask': compute_exp_only_mask(8, 3),
'exponent_shift': compute_exponent_shift(8, 3),
},
torch.float8_e4m3fnuz: {
'float_dtype': tl.float8e4b8,
'int_dtype': tl.uint8,
'int_dtype_native': torch.uint8,
'bit_mask': compute_exp_only_mask(8, 3),
'exponent_shift': compute_exponent_shift(8, 3),
},
torch.float8_e5m2: {
'float_dtype': tl.float8e5,
'int_dtype': tl.uint8,
'int_dtype_native': torch.uint8,
'bit_mask': compute_exp_only_mask(8, 2),
'exponent_shift': compute_exponent_shift(8, 2),
},
torch.float8_e5m2fnuz: {
'float_dtype': tl.float8e5b16,
'int_dtype': tl.uint8,
'int_dtype_native': torch.uint8,
'bit_mask': compute_exp_only_mask(8, 2),
'exponent_shift': compute_exponent_shift(8, 2),
},
}
@torch.compile(options={"triton.codegen_upcast_to_fp32": False})
def round_down_to_pow2(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
mapping = DTYPE_MAPPING[dtype]
int_dtype = mapping['int_dtype_native']
bit_mask = mapping['bit_mask']
tensor_int = tensor.view(int_dtype)
y = (tensor_int & bit_mask)
return y.view(dtype)
@torch.compile(options={"triton.codegen_upcast_to_fp32": False})
def round_down_down_to_pow2(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
mapping = DTYPE_MAPPING[dtype]
int_dtype = mapping['int_dtype_native']
bit_mask = mapping['bit_mask']
exponent_shift = mapping['exponent_shift']
tensor_int = tensor.view(int_dtype)
rounded_bits = (tensor_int & bit_mask)
exponent = rounded_bits >> exponent_shift
# exponent = torch.where(exponent > 0, -1 + exponent, exponent) << exponent_shift
exponent = exponent - (exponent > 0).to(dtype=exponent.dtype)
y = exponent << exponent_shift
return y.view(dtype)
@torch.compile(options={"triton.codegen_upcast_to_fp32": True})
def round_down_to_pow2_fp32(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
mapping = DTYPE_MAPPING[dtype]
int_dtype = mapping['int_dtype_native']
bit_mask = mapping['bit_mask']
tensor_int = tensor.view(int_dtype)
y = (tensor_int & bit_mask)
return y.view(dtype)
@torch.compile(options={"triton.codegen_upcast_to_fp32": True})
def round_down_down_to_pow2_fp32(tensor: torch.Tensor) -> torch.Tensor:
dtype = tensor.dtype
mapping = DTYPE_MAPPING[dtype]
int_dtype = mapping['int_dtype_native']
bit_mask = mapping['bit_mask']
exponent_shift = mapping['exponent_shift']
tensor_int = tensor.view(int_dtype)
rounded_bits = (tensor_int & bit_mask)
exponent = rounded_bits >> exponent_shift
# exponent = torch.where(exponent > 0, -1 + exponent, exponent) << exponent_shift
exponent = exponent - (exponent > 0).to(dtype=exponent.dtype)
y = exponent << exponent_shift
return y.view(dtype)
tensor = torch.tensor(0.5, dtype=torch.bfloat16, device="cuda")
round_down_to_pow2(tensor)
round_down_down_to_pow2(tensor)
print("non-upcast: success")
#the following fails
round_down_to_pow2_fp32(tensor)
round_down_down_to_pow2_fp32(tensor)
print("upcast: success")
```
tlparse:
[dedicated_log_torch_trace_ooo_n_47.log](https://github.com/user-attachments/files/18243852/dedicated_log_torch_trace_ooo_n_47.log)
### Error logs
```python
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
CompilationError: at 10:11:
def triton_poi_fused_bitwise_and_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0)).to(tl.float32)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tmp1.to(tl.bfloat16).to(tl.int16, bitcast=True).to(tl.float32)
tmp3 = tl.full([1], 32640, tl.int16)
tmp4 = tmp2 & tmp3
^
IncompatibleTypeErrorImpl('invalid operands of type triton.language.float32 and triton.language.float32')
```
### Versions
Collecting environment information...
PyTorch version: 2.6.0.dev20241127+cu124
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: Arch Linux (x86_64)
GCC version: (GCC) 14.2.1 20240910
Clang version: 18.1.8
CMake version: version 3.30.3
Libc version: glibc-2.40
Python version: 3.11.10 (main, Sep 9 2024, 22:11:19) [Clang 18.1.8 ] (64-bit runtime)
Python platform: Linux-6.6.43-273-tkg-bore-x86_64-with-glibc2.40
Is CUDA available: True
CUDA runtime version: 12.6.68
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA RTX 6000 Ada Generation
GPU 1: NVIDIA RTX 6000 Ada Generation
Nvidia driver version: 560.35.03
cuDNN version: Probably one of the following:
/usr/lib/libcudnn.so.9.2.1
/usr/lib/libcudnn_adv.so.9.2.1
/usr/lib/libcudnn_cnn.so.9.2.1
/usr/lib/libcudnn_engines_precompiled.so.9.2.1
/usr/lib/libcudnn_engines_runtime_compiled.so.9.2.1
/usr/lib/libcudnn_graph.so.9.2.1
/usr/lib/libcudnn_heuristic.so.9.2.1
/usr/lib/libcudnn_ops.so.9.2.1
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 24
On-line CPU(s) list: 0-23
Vendor ID: GenuineIntel
Model name: 12th Gen Intel(R) Core(TM) i9-12900K
CPU family: 6
Model: 151
Thread(s) per core: 2
Core(s) per socket: 16
Socket(s): 1
Stepping: 2
CPU(s) scaling MHz: 24%
CPU max MHz: 5300.0000
CPU min MHz: 800.0000
BogoMIPS: 6374.40
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect user_shstk avx_vnni dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req hfi vnmi umip pku ospke waitpkg gfni vaes vpclmulqdq tme rdpid movdiri movdir64b fsrm md_clear serialize pconfig arch_lbr ibt flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 640 KiB (16 instances)
L1i cache: 768 KiB (16 instances)
L2 cache: 14 MiB (10 instances)
L3 cache: 30 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-23
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Mitigation; Clear Register File
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI BHI_DIS_S
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==2.1.3
[pip3] nvidia-cublas-cu12==12.4.5.8
[pip3] nvidia-cuda-cupti-cu12==12.4.127
[pip3] nvidia-cuda-nvrtc-cu12==12.4.127
[pip3] nvidia-cuda-runtime-cu12==12.4.127
[pip3] nvidia-cudnn-cu12==9.1.0.70
[pip3] nvidia-cufft-cu12==11.2.1.3
[pip3] nvidia-curand-cu12==10.3.5.147
[pip3] nvidia-cusolver-cu12==11.6.1.9
[pip3] nvidia-cusparse-cu12==12.3.1.170
[pip3] nvidia-cusparselt-cu12==0.6.2
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.4.127
[pip3] nvidia-nvtx-cu12==12.4.127
[pip3] pytorch-triton==3.2.0+git35c6c7c6
[pip3] torch==2.6.0.dev20241127+cu124
[pip3] torch-optimi==0.2.1
[pip3] torch-xla==2.5.0
[pip3] torchaudio==2.5.1
[pip3] torchvision==0.20.1
[pip3] triton==3.1.0
[pip3] triton-nightly==3.0.0.post20240716052845
[conda] No relevant packages
cc @nairbv @mruberry @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @aakhundov | true |
2,758,532,367 | [Inductor][CPP] Enable Bias add for Group GEMM Template | leslie-fang-intel | closed | [
"open source",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 1 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143850
* __->__ #143820
* #143796
**Summary**
In this PR, we move the `store_output` and `store_pointwise_nodes` to standalone functions for Group GEMM epilogue fusion to prepare for following Epilogue fusion PR. And we support Bias add as the epilogue fusion for Group GEMM.
**Test Plan**
```
python -u -m pytest -s -v test/inductor/test_cpu_select_algorithm.py -k test_group_linear_epilogue
```
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,506,184 | Update torch-xpu-ops commit pin | xytintel | closed | [
"open source",
"topic: not user facing",
"module: inductor",
"ciflow/xpu"
] | 4 | CONTRIBUTOR | Update the torch-xpu-ops commit to [0f48ac](https://github.com/intel/torch-xpu-ops/commit/0f48ac07e42ce30d2d07447f4b49bb4ab23f8e64), includes:
- Fix building issue for transformer related operators
- Improve XPU operator coverage
- Performance optimization for several SYCL kernels
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,467,374 | [inductor] Move GPUTarget backwards compat to triton_compat.py | jansel | closed | [
"Merged",
"topic: not user facing",
"module: inductor",
"ciflow/inductor",
"ciflow/inductor-rocm"
] | 4 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143835
* __->__ #143818
* #143817
* #143815
* #143814
* #143813
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,467,327 | [inductor] Drop support for pre-ASTSource Triton | jansel | closed | [
"Merged",
"module: inductor",
"ciflow/inductor",
"release notes: inductor"
] | 1 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143835
* #143818
* __->__ #143817
* #143815
* #143814
* #143813
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,419,961 | pytorch v2.4.1 build for nvidia jetson orin nano 8GB | lida2003 | closed | [
"module: build",
"triaged",
"module: jetson"
] | 2 | NONE | ### 🐛 Describe the bug
pytorch v2.4.1 build for nvidia jetson orin 8GB
Previous discussion here FYI: https://forums.developer.nvidia.com/t/request-build-script-for-pytorch-or-up-to-date-pytorh-binary-release-supporting-jetson-boards-running-l4t35-6-ubuntu20-04/316972/12
```
Software part of jetson-stats 4.2.12 - (c) 2024, Raffaello Bonghi
Model: NVIDIA Orin Nano Developer Kit - Jetpack 5.1.4 [L4T 35.6.0]
NV Power Mode[0]: 15W
Serial Number: [XXX Show with: jetson_release -s XXX]
Hardware:
- P-Number: p3767-0005
- Module: NVIDIA Jetson Orin Nano (Developer kit)
Platform:
- Distribution: Ubuntu 20.04 focal
- Release: 5.10.216-tegra
jtop:
- Version: 4.2.12
- Service: Active
Libraries:
- CUDA: 11.4.315
- cuDNN: 8.6.0.166
- TensorRT: 8.5.2.2
- VPI: 2.4.8
- OpenCV: 4.9.0 - with CUDA: YES
DeepStream C/C++ SDK version: 6.3
Python Environment:
Python 3.8.10
GStreamer: YES (1.16.3)
NVIDIA CUDA: YES (ver 11.4, CUFFT CUBLAS FAST_MATH)
OpenCV version: 4.9.0 CUDA True
YOLO version: 8.3.33
Torch version: 2.1.0a0+41361538.nv23.06
Torchvision version: 0.16.1+fdea156
DeepStream SDK version: 1.1.8
```
### Error logs
```
Building wheel torch-2.4.1
-- Building version 2.4.1
cmake --build . --target install --config Release
[1/2048] Linking CXX shared library lib/libc10.so
FAILED: lib/libc10.so
: && /usr/bin/c++ -fPIC -ffunction-sections -fdata-sections -D_GLIBCXX_USE_CXX11_ABI=1 -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-stringop-overflow -Wsuggest-override -Wno-psabi -Wno-error=pedantic -Wno-error=old-style-cast -Wno-missing-braces -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow -O3 -DNDEBUG -DNDEBUG -T/home/daniel/Work/pytorch_v2.4.1/cmake/linker_script.ld -Wl,--no-as-needed -T/home/daniel/Work/pytorch_v2.4.1/cmake/linker_script.ld -rdynamic -Wl,--no-as-needed -shared -Wl,-soname,libc10.so -o lib/libc10.so c10/CMakeFiles/c10.dir/core/Allocator.cpp.o c10/CMakeFiles/c10.dir/core/AutogradState.cpp.o c10/CMakeFiles/c10.dir/core/CPUAllocator.cpp.o c10/CMakeFiles/c10.dir/core/ConstantSymNodeImpl.cpp.o c10/CMakeFiles/c10.dir/core/CopyBytes.cpp.o c10/CMakeFiles/c10.dir/core/DefaultDtype.cpp.o c10/CMakeFiles/c10.dir/core/Device.cpp.o c10/CMakeFiles/c10.dir/core/DeviceType.cpp.o c10/CMakeFiles/c10.dir/core/DispatchKey.cpp.o c10/CMakeFiles/c10.dir/core/DispatchKeySet.cpp.o c10/CMakeFiles/c10.dir/core/GeneratorImpl.cpp.o c10/CMakeFiles/c10.dir/core/GradMode.cpp.o c10/CMakeFiles/c10.dir/core/InferenceMode.cpp.o c10/CMakeFiles/c10.dir/core/RefcountedDeleter.cpp.o c10/CMakeFiles/c10.dir/core/SafePyObject.cpp.o c10/CMakeFiles/c10.dir/core/Scalar.cpp.o c10/CMakeFiles/c10.dir/core/ScalarType.cpp.o c10/CMakeFiles/c10.dir/core/Storage.cpp.o c10/CMakeFiles/c10.dir/core/StorageImpl.cpp.o c10/CMakeFiles/c10.dir/core/Stream.cpp.o c10/CMakeFiles/c10.dir/core/SymBool.cpp.o c10/CMakeFiles/c10.dir/core/SymFloat.cpp.o c10/CMakeFiles/c10.dir/core/SymInt.cpp.o c10/CMakeFiles/c10.dir/core/SymIntArrayRef.cpp.o c10/CMakeFiles/c10.dir/core/SymNodeImpl.cpp.o c10/CMakeFiles/c10.dir/core/SymbolicShapeMeta.cpp.o c10/CMakeFiles/c10.dir/core/TensorImpl.cpp.o c10/CMakeFiles/c10.dir/core/TensorOptions.cpp.o c10/CMakeFiles/c10.dir/core/UndefinedTensorImpl.cpp.o c10/CMakeFiles/c10.dir/core/WrapDimMinimal.cpp.o c10/CMakeFiles/c10.dir/core/impl/COW.cpp.o c10/CMakeFiles/c10.dir/core/impl/COWDeleter.cpp.o c10/CMakeFiles/c10.dir/core/impl/DeviceGuardImplInterface.cpp.o c10/CMakeFiles/c10.dir/core/impl/GPUTrace.cpp.o c10/CMakeFiles/c10.dir/core/impl/HermeticPyObjectTLS.cpp.o c10/CMakeFiles/c10.dir/core/impl/LocalDispatchKeySet.cpp.o c10/CMakeFiles/c10.dir/core/impl/PyInterpreter.cpp.o c10/CMakeFiles/c10.dir/core/impl/PyObjectSlot.cpp.o c10/CMakeFiles/c10.dir/core/impl/PythonDispatcherTLS.cpp.o c10/CMakeFiles/c10.dir/core/impl/SizesAndStrides.cpp.o c10/CMakeFiles/c10.dir/core/impl/TorchDispatchModeTLS.cpp.o c10/CMakeFiles/c10.dir/core/impl/alloc_cpu.cpp.o c10/CMakeFiles/c10.dir/core/thread_pool.cpp.o c10/CMakeFiles/c10.dir/mobile/CPUCachingAllocator.cpp.o c10/CMakeFiles/c10.dir/mobile/CPUProfilingAllocator.cpp.o c10/CMakeFiles/c10.dir/util/ApproximateClock.cpp.o c10/CMakeFiles/c10.dir/util/Backtrace.cpp.o c10/CMakeFiles/c10.dir/util/Bfloat16.cpp.o c10/CMakeFiles/c10.dir/util/C++17.cpp.o c10/CMakeFiles/c10.dir/util/DeadlockDetection.cpp.o c10/CMakeFiles/c10.dir/util/Exception.cpp.o c10/CMakeFiles/c10.dir/util/Float8_e4m3fn.cpp.o c10/CMakeFiles/c10.dir/util/Float8_e4m3fnuz.cpp.o c10/CMakeFiles/c10.dir/util/Float8_e5m2.cpp.o c10/CMakeFiles/c10.dir/util/Float8_e5m2fnuz.cpp.o c10/CMakeFiles/c10.dir/util/Half.cpp.o c10/CMakeFiles/c10.dir/util/LeftRight.cpp.o c10/CMakeFiles/c10.dir/util/Logging.cpp.o c10/CMakeFiles/c10.dir/util/MathConstants.cpp.o c10/CMakeFiles/c10.dir/util/Metaprogramming.cpp.o c10/CMakeFiles/c10.dir/util/Optional.cpp.o c10/CMakeFiles/c10.dir/util/ParallelGuard.cpp.o c10/CMakeFiles/c10.dir/util/SmallVector.cpp.o c10/CMakeFiles/c10.dir/util/StringUtil.cpp.o c10/CMakeFiles/c10.dir/util/ThreadLocalDebugInfo.cpp.o c10/CMakeFiles/c10.dir/util/TypeCast.cpp.o c10/CMakeFiles/c10.dir/util/TypeList.cpp.o c10/CMakeFiles/c10.dir/util/TypeTraits.cpp.o c10/CMakeFiles/c10.dir/util/Type_demangle.cpp.o c10/CMakeFiles/c10.dir/util/Type_no_demangle.cpp.o c10/CMakeFiles/c10.dir/util/Unicode.cpp.o c10/CMakeFiles/c10.dir/util/UniqueVoidPtr.cpp.o c10/CMakeFiles/c10.dir/util/complex_math.cpp.o c10/CMakeFiles/c10.dir/util/flags_use_gflags.cpp.o c10/CMakeFiles/c10.dir/util/flags_use_no_gflags.cpp.o c10/CMakeFiles/c10.dir/util/int128.cpp.o c10/CMakeFiles/c10.dir/util/intrusive_ptr.cpp.o c10/CMakeFiles/c10.dir/util/numa.cpp.o c10/CMakeFiles/c10.dir/util/signal_handler.cpp.o c10/CMakeFiles/c10.dir/util/tempfile.cpp.o c10/CMakeFiles/c10.dir/util/thread_name.cpp.o c10/CMakeFiles/c10.dir/util/typeid.cpp.o -Wl,-rpath,::::::: /usr/lib/aarch64-linux-gnu/libnuma.so lib/libcpuinfo.a -pthread && /usr/local/bin/cmake -E __run_co_compile --lwyu="ldd;-u;-r" --source=lib/libc10.so && :
/usr/bin/ld: error: linker script file '/home/daniel/Work/pytorch_v2.4.1/cmake/linker_script.ld' appears multiple times
collect2: error: ld returned 1 exit status
[8/2048] Building CXX object c10/test/CMakeFiles/c10_complex_math_test.dir/util/complex_math_test.cpp.o
ninja: build stopped: subcommand failed.
```
### Versions
```
daniel@daniel-nvidia:~/Work/pytorch$ python3 collect_env.py
Collecting environment information...
PyTorch version: N/A
Is debug build: N/A
CUDA used to build PyTorch: N/A
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (aarch64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: Could not collect
CMake version: version 3.31.0
Libc version: glibc-2.31
Python version: 3.8.10 (default, Nov 7 2024, 13:10:47) [GCC 9.4.0] (64-bit runtime)
Python platform: Linux-5.10.216-tegra-aarch64-with-glibc2.29
Is CUDA available: N/A
CUDA runtime version: 11.4.315
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: Could not collect
Nvidia driver version: Could not collect
cuDNN version: Probably one of the following:
/usr/lib/aarch64-linux-gnu/libcudnn.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_adv_infer.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_adv_train.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_cnn_infer.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_cnn_train.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_ops_infer.so.8.6.0
/usr/lib/aarch64-linux-gnu/libcudnn_ops_train.so.8.6.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: N/A
CPU:
Architecture: aarch64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 6
On-line CPU(s) list: 0-5
Thread(s) per core: 1
Core(s) per socket: 3
Socket(s): 2
Vendor ID: ARM
Model: 1
Model name: ARMv8 Processor rev 1 (v8l)
Stepping: r0p1
CPU max MHz: 1510.4000
CPU min MHz: 115.2000
BogoMIPS: 62.50
L1d cache: 384 KiB
L1i cache: 384 KiB
L2 cache: 1.5 MiB
L3 cache: 2 MiB
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; __user pointer sanitization
Vulnerability Spectre v2: Mitigation; CSV2, but not BHB
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm lrcpc dcpop asimddp uscat ilrcpc flagm
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] onnx==1.17.0
[pip3] onnx-graphsurgeon==0.3.12
[pip3] onnxruntime==1.16.3
[pip3] onnxruntime-gpu==1.17.0
[pip3] onnxslim==0.1.36
[pip3] optree==0.13.1
[pip3] torch==2.1.0a0+41361538.nv23.6
[pip3] torch2trt==0.5.0
[pip3] torchvision==0.16.1
[conda] Could not collect
```
cc @malfet @seemethere @ptrblck @puririshi98 @chauhang @penguinwu | true |
2,758,414,759 | [inductor] Minor refactor of hip compile_meta | jansel | closed | [
"Merged",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 1 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143835
* #143818
* #143817
* __->__ #143815
* #143814
* #143813
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,402,028 | [inductor] Refactor conditional triton imports into triton_compat.py | jansel | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 3 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143835
* #143818
* #143817
* #143815
* __->__ #143814
* #143813
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,381,276 | [inductor] Reorder imports in codecache.py | jansel | closed | [
"Merged",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 1 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143835
* #143818
* #143817
* #143815
* #143814
* __->__ #143813
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,375,582 | [inductor] Used fixed configs for contiguous reductions | jansel | open | [
"Stale",
"module: inductor",
"module: dynamo",
"ciflow/inductor",
"no-stale",
"release notes: inductor"
] | 2 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143812
* #142295
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,367,285 | [Functorch] Refactor vmapify autograd function: remove cell mutation | yanboliang | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing"
] | 3 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143811
| true |
2,758,320,265 | "Access denied" error at PyTorch ROCm 6.2+ wheel repo | runtimeHorror | closed | [
"needs reproduction",
"module: binaries",
"module: rocm",
"triaged"
] | 3 | NONE | ### 🐛 Describe the bug
Cannot access the directory or download anything from the repo.
Running
```
pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4
Looking in indexes: https://download.pytorch.org/whl/rocm6.2.4
```
gives
```
ERROR: Could not find a version that satisfies the requirement torch (from versions: none)
ERROR: No matching distribution found for torch
```
Opening the URL, `https://download.pytorch.org/whl/rocm6.2.4` , in a browser leads to an error page saying "AccessDenied".

But this is not limited to the repo for ROCm 6.2.4. `https://download.pytorch.org/whl/rocm6.2` has the exact same issue.
### Versions
PyTorch version: N/A
Is debug build: N/A
CUDA used to build PyTorch: N/A
ROCM used to build PyTorch: N/A
OS: Arch Linux (x86_64)
GCC version: (GCC) 14.2.1 20240910
Clang version: 18.1.8
CMake version: version 3.31.3
Libc version: glibc-2.40
Python version: 3.13.1 (main, Dec 4 2024, 18:05:56) [GCC 14.2.1 20240910] (64-bit runtime)
Python platform: Linux-6.12.6-arch1-1-x86_64-with-glibc2.40
Is CUDA available: N/A
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: Could not collect
Nvidia driver version: Could not collect
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: N/A
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 12
On-line CPU(s) list: 0-11
Vendor ID: AuthenticAMD
Model name: AMD Ryzen 5 5600X 6-Core Processor
CPU family: 25
Model: 33
Thread(s) per core: 2
Core(s) per socket: 6
Socket(s): 1
Stepping: 2
Frequency boost: enabled
CPU(s) scaling MHz: 92%
CPU max MHz: 4651.0000
CPU min MHz: 550.0000
BogoMIPS: 7402.64
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local user_shstk clzero irperf xsaveerptr rdpru wbnoinvd arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm debug_swap
Virtualization: AMD-V
L1d cache: 192 KiB (6 instances)
L1i cache: 192 KiB (6 instances)
L2 cache: 3 MiB (6 instances)
L3 cache: 32 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-11
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Mitigation; Safe RET
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines; IBPB conditional; IBRS_FW; STIBP always-on; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==2.2.1
[conda] Could not collect
cc @seemethere @malfet @osalpekar @atalman @jeffdaily @sunway513 @jithunnair-amd @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd | true |
2,758,230,733 | Inductor with dynamic shapes fails for randint with >INT_MAX maximum value | ngimel | open | [
"triaged",
"oncall: pt2",
"module: inductor"
] | 0 | COLLABORATOR | The generated annotation for max value (`ks1`) is `i32`
```
@triton_heuristics.pointwise(
size_hints={'x': 1048576},
filename=__file__,
triton_meta={'signature': {'in_ptr0': '*i64', 'out_ptr0': '*i64', 'load_seed_offset': 'i32', 'ks1': 'i32', 'xnumel': 'i32'}, 'device': DeviceProperties(type='cuda', index=0, multi_processor_count=132, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, warp_size=32), 'constants': {}, 'configs': [AttrsDescriptor.from_dict({'arg_properties': {'tt.divisibility': (0, 1), 'tt.equal_to': ()}, 'cls': 'AttrsDescriptor'})]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_randint_0', 'mutated_arg_names': [], 'optimize_mem': True, 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': '0DEDF01B8E4DD92A8B59F7523F798A141186FCC78AC75613AB9342C0CD404D81', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False, 'compile_id': '0/0', 'is_forward': True},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_randint_0(in_ptr0, out_ptr0, load_seed_offset, ks1, xnumel, XBLOCK : tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
```
and at runtime, if max value is > INT_MAX, there's a failure.
To repro:
with #143787
```
python test/inductor/test_torchinductor_codegen_dynamic_shapes.py -v -k test_randint_distribution
```
#143787 doesn't make any inductor changes, it just adds a test to make sure inductor produces correct distribution.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,222,876 | Inductor cache: Revamp how we handle frozen params | masnesral | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 13 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143808
Summary: In https://github.com/pytorch/pytorch/pull/143563 we have a report of a problem with the treatment of frozen params in the inductor cache implementation. There seems to be a path where new constants are added in the `GraphLowering`. On a cache hit when we try to find those constant names in the `torch.fx.GraphModule`, they do not exist. The current approach treats all constants differently if the GM has any frozen params. This PR changes the approach to only treat the _frozen_ params specially, but store all other constants in the cache entry (as we do without freezing):
1) When creating a cache entry, store the names of any frozen params, but the values of any other constants.
2) On a cache hit, restore the values of the frozen params by looking up in the current GM.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,125,478 | XPU ConvTranspose2d Causes DataLoader Memory Leak | ekaakurniawan | closed | [
"triaged",
"module: xpu"
] | 4 | NONE | ### 🐛 Describe the bug
I run the following notebook on XPU (device_type = "xpu") failed with "Too many open files" error. It seems the DataLoader does not close the files. The memory increases slowly from 2 GiB to 8 GiB within 3 epochs. Running on CPU (device_type = "cpu") is fine.
[Convolutional Autoencoder Notebook](https://github.com/ekaakurniawan/DLND/blob/development/assignments/P3-CNN/L5-autoencoder/Convolutional_Autoencoder_Exercise.ipynb)
I suspect the issue is caused by ConvTranspose2d layer because the following notebook without the layer is working fine on XPU.
[Simple Autoencoder Notebook](https://github.com/ekaakurniawan/DLND/blob/development/assignments/P3-CNN/L5-autoencoder/Simple_Autoencoder_Exercise.ipynb)
Please find the [steps to setup](https://github.com/ekaakurniawan/DLND?tab=readme-ov-file#intel-gpu) as well as the following entire error message.
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[8], line 11
6 train_loss = 0.0
8 ###################
9 # train the model #
10 ###################
---> 11 for data in train_loader:
12 # _ stands in for labels, here
13 # no need to flatten images
14 images, _ = data
15 images = images.to(device)
File [~/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py:708](http://localhost:8888/home/eka/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py#line=707), in _BaseDataLoaderIter.__next__(self)
705 if self._sampler_iter is None:
706 # TODO(https://github.com/pytorch/pytorch/issues/76750)
707 self._reset() # type: ignore[call-arg]
--> 708 data = self._next_data()
709 self._num_yielded += 1
710 if (
711 self._dataset_kind == _DatasetKind.Iterable
712 and self._IterableDataset_len_called is not None
713 and self._num_yielded > self._IterableDataset_len_called
714 ):
File ~/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py:1458, in _MultiProcessingDataLoaderIter._next_data(self)
1455 return self._process_data(data)
1457 assert not self._shutdown and self._tasks_outstanding > 0
-> 1458 idx, data = self._get_data()
1459 self._tasks_outstanding -= 1
1460 if self._dataset_kind == _DatasetKind.Iterable:
1461 # Check for _IterableDatasetStopIteration
File [~/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py:1420](http://localhost:8888/home/eka/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py#line=1419), in _MultiProcessingDataLoaderIter._get_data(self)
1416 # In this case, `self._data_queue` is a `queue.Queue`,. But we don't
1417 # need to call `.task_done()` because we don't use `.join()`.
1418 else:
1419 while True:
-> 1420 success, data = self._try_get_data()
1421 if success:
1422 return data
File [~/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py:1282](http://localhost:8888/home/eka/Workspace/pytorch_arc/pytorch_arc_env/lib/python3.12/site-packages/torch/utils/data/dataloader.py#line=1281), in _MultiProcessingDataLoaderIter._try_get_data(self, timeout)
1280 except OSError as e:
1281 if e.errno == errno.EMFILE:
-> 1282 raise RuntimeError(
1283 "Too many open files. Communication with the"
1284 " workers is no longer possible. Please increase the"
1285 " limit using `ulimit -n` in the shell or change the"
1286 " sharing strategy by calling"
1287 " `torch.multiprocessing.set_sharing_strategy('file_system')`"
1288 " at the beginning of your code"
1289 ) from None
1290 raise
```RuntimeError: Too many open files. Communication with the workers is no longer possible. Please increase the limit using `ulimit -n` in the shell or change the sharing strategy by calling `torch.multiprocessing.set_sharing_strategy('file_system')` at the beginning of your code```
### Versions
```
$ python collect_env.py
Collecting environment information...
PyTorch version: 2.6.0+xpu
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: Ubuntu 24.04.1 LTS (x86_64)
GCC version: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0
Clang version: Could not collect
CMake version: version 3.28.3
Libc version: glibc-2.39
Python version: 3.12.3 (main, Nov 6 2024, 18:32:19) [GCC 13.2.0] (64-bit runtime)
Python platform: Linux-6.8.0-51-generic-x86_64-with-glibc2.39
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 24
On-line CPU(s) list: 0-23
Vendor ID: GenuineIntel
Model name: Intel(R) Core(TM) Ultra 9 285K
CPU family: 6
Model: 198
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s): 24
Stepping: 2
CPU(s) scaling MHz: 30%
CPU max MHz: 5100.0000
CPU min MHz: 800.0000
BogoMIPS: 7372.80
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault intel_ppin ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdt_a rdseed adx smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect user_shstk avx_vnni lam wbnoinvd dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req hfi vnmi umip pku ospke waitpkg gfni vaes vpclmulqdq tme rdpid bus_lock_detect movdiri movdir64b fsrm md_clear serialize arch_lbr ibt flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 768 KiB (20 instances)
L1i cache: 1.3 MiB (20 instances)
L2 cache: 40 MiB (12 instances)
L3 cache: 36 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-23
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==2.1.2
[pip3] pytorch-triton-xpu==3.2.0
[pip3] torch==2.6.0+xpu
[pip3] torchaudio==2.6.0+xpu
[pip3] torchvision==0.21.0+xpu
[pip3] triton==3.2.0
[conda] Could not collect
```
cc @gujinghui @EikanWang @fengyuan14 @guangyey | true |
2,758,125,260 | Enable clang-tidy on torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp | cyyever | closed | [
"oncall: distributed",
"oncall: jit",
"triaged",
"open source",
"Merged",
"Reverted",
"ciflow/trunk",
"release notes: distributed (c10d)",
"ciflow/periodic",
"ci-no-td",
"ciflow/inductor-cu126"
] | 14 | COLLABORATOR | Fixes #ISSUE_NUMBER
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @EikanWang @jgong5 @wenzhe-nrv @sanchitintel @voznesenskym @penguinwu @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,758,123,547 | Remove remove_non_owning_ref_types | cyyever | closed | [
"open source",
"Stale",
"topic: not user facing"
] | 9 | COLLABORATOR | Fixes #ISSUE_NUMBER
| true |
2,758,108,965 | [17/N] Fix extra warnings brought by clang-tidy-17 | cyyever | closed | [
"module: cpu",
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing"
] | 10 | COLLABORATOR | Fixes #ISSUE_NUMBER
cc @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 | true |
2,757,878,225 | XPU Manywheel builds linux and windows are failing since Dec 23, 2024 | atalman | closed | [
"module: binaries",
"triaged",
"module: xpu"
] | 4 | CONTRIBUTOR | ### 🐛 Describe the bug
I see following Failures on XPU builds since Dec 23, 2024:
Linux XPU:
https://github.com/pytorch/pytorch/actions/runs/12474101389/job/34819441679
Windows XPU:
https://github.com/pytorch/pytorch/actions/runs/12478637812/job/34826509154
```
[linux-binary-manywheel / manywheel-py3_9-xpu-build / build](https://hud.pytorch.org/pr/pytorch/pytorch/143776#34819441679) ([gh](https://github.com/pytorch/pytorch/actions/runs/12474101389/job/34819441679))
/pytorch/third_party/torch-xpu-ops/src/ATen/native/transformers/SDPUtils.cpp:63:18: error: expected primary-expression before ‘bool
```
cc @seemethere @malfet @osalpekar @gujinghui @EikanWang @fengyuan14 @guangyey @chuanqi129
### Versions
2.7.0 nightly | true |
2,757,877,917 | [inductor] fix the `adaptive_avg_pool` on processing int64 | shaoyuyoung | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor"
] | 7 | CONTRIBUTOR | Fixes #143801
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,757,876,873 | [inductor] `AdaptiveAvgPool` behaves differently on eager and inductor when meeting internal int64 dtypes | shaoyuyoung | closed | [
"oncall: pt2"
] | 1 | CONTRIBUTOR | ### 🐛 Describe the bug
related to #143752.
#143762 fixes #143752.
However, I found that after #143762 landed, `AdaptiveAvgPool` still has the same issue.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
from torch._inductor import config
config.fallback_random = True
class Model(torch.nn.Module):
def __init__(self, pool_operator):
super(Model, self).__init__()
self.pool = pool_operator
def forward(self, x):
x = torch.argmax(x, dim=1)
# when touching here, x.dtype=torch.int64
x = self.pool(x)
return x
def run_test(dim, device, backend):
op_inst = eval(f"nn.AdaptiveAvgPool{dim}d(5)")
model = Model(op_inst).to(device)
x = torch.randn([1] * (dim + 2)).to(device)
if backend == "inductor":
model = torch.compile(model)
try:
y = model(x)
print(f"succeed on {device} with {backend}: {y.dtype}")
except Exception as e:
print(f"fail on {device} with {backend}: {e}")
run_test(1, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
run_test(1, "cpu", "inductor") # succeed on cpu with inductor: torch.int64
run_test(1, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
run_test(1, "cuda", "inductor") # fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
run_test(2, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
run_test(2, "cpu", "inductor") # succeed on cpu with inductor: torch.int64
run_test(2, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
run_test(2, "cuda", "inductor") # # fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
run_test(3, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool3d_cpu" not implemented for 'Long'
run_test(3, "cpu", "inductor") # fail on cpu with inductor: "adaptive_max_pool3d_cpu" not implemented for 'Long'
run_test(3, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool3d_cuda" not implemented for 'Long'
run_test(3, "cuda", "inductor") # fail on cuda with inductor: "adaptive_max_pool3d_cuda" not implemented for 'Long'
```
### Error logs
```
fail on cpu with eager: "adaptive_avg_pool2d" not implemented for 'Long'
succeed on cpu with inductor: torch.int64
fail on cuda with eager: "adaptive_avg_pool2d_cuda" not implemented for 'Long'
succeed on cuda with inductor: torch.int64
fail on cpu with eager: "adaptive_avg_pool2d" not implemented for 'Long'
succeed on cpu with inductor: torch.int64
fail on cuda with eager: "adaptive_avg_pool2d_cuda" not implemented for 'Long'
succeed on cuda with inductor: torch.int64
fail on cpu with eager: "adaptive_avg_pool3d_cpu" not implemented for 'Long'
fail on cpu with inductor: "adaptive_avg_pool3d_cpu" not implemented for 'Long'
fail on cuda with eager: "adaptive_avg_pool3d_cuda" not implemented for 'Long'
fail on cuda with inductor: "adaptive_avg_pool3d_cuda" not implemented for 'Long'
```
### Versions
main
cc @chauhang @penguinwu | true |
2,757,694,262 | The tensor-based computation of exponentiation and logarithmic operations is much slower than using NumPy | yxma2015 | open | [
"needs reproduction",
"module: performance",
"module: cpu",
"triaged"
] | 1 | NONE | ### 🐛 Describe the bug
Hi there, hope this message finds you well.
I have encountered a significant performance issue when using PyTorch tensors for exponentiation (torch.exp()) and logarithmic operations (torch.log()) compared to NumPy. Specifically, these tensor operations are much slower than their NumPy counterparts. This issue is likely real. When I tested the following code, I didn't use a GPU.
The issue lies in the` loss_5()` function. On my machine, when implementing` loss_5` with **NumPy** in the example below, it took **23** seconds, but when using PyTorch, it took **781** seconds.
```Python
# -*-coding:utf-8 -*-
import numpy as np
import tqdm
from sklearn.decomposition import NMF
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
import torch
import torch.nn as nn
from sklearn.cluster import SpectralClustering
from sklearn.metrics.pairwise import cosine_similarity
def init_graph(low_dim_x):
n_spot = low_dim_x.shape[0]
n_neighbor = 15
init_W = cosine_similarity(low_dim_x)
"""cos_init = np.zeros((n_spot, n_spot))
for i in range(n_spot):
vec = init_W[i, :]
distance = vec.argsort()[:: -1]
for t in range(n_neighbor + 1):
y = distance[t]
cos_init[i, y] = init_W[i, y]"""
return init_W
def spectral_clustering(x: np.array, n_cluster: int) -> np.array:
"""
Args:
x (np.array): feature matrix $x /in R^{N times D}$
n_cluster (int): cluster number
Returns:
np.array: clustering labels
"""
model = SpectralClustering(n_clusters=n_cluster,
assign_labels='discretize',
random_state=0).fit(x)
labels = model.labels_
partition = [[] for i in range(n_cluster)]
for i in range(x.shape[0]):
partition[labels[i]].append(i + 1)
"""grids = np.zeros((x.shape[0],x.shape[0]))
for i in range(x.shape[0]):
for j in range(x.shape[0]):
if model.labels_[i] == model.labels_[j]:
grids[i,j] = 1"""
return partition
def get_laplace_matrix(x):
#x = x + np.eye(x.shape[0])
degree_matrix = np.zeros((x.shape[0], x.shape[0]))
for i in range(x.shape[0]):
degree_matrix[i, i] = sum(x[i, :])
lap = degree_matrix - x
#lap = lap + 0.01*np.eye(lap.shape[0])
return lap
def nmf_ini(x: np.array, rank: np.array) -> np.array:
"""do NMF(non-negative matrix factorization) with a given matrix x and expected dimension.
Args:
x (np.array): non-negative matrix X to be factorized
dimension (np.array): dimension
Returns:
np.array: (W, H) whose product approximates the non-negative matrix X
"""
"""model = NMF(n_components=dimension, init='random', random_state=0, max_iter=500)
w = model.fit_transform(x)
h = model.components_"""
u, s, v = np.linalg.svd(x, full_matrices=False)
w_ini = u[:,:rank]
h_ini = np.diag(s[:rank])@v[:rank,:]
return w_ini, h_ini
class MVFC(nn.Module):
def __init__(self, parameters):
super(MVFC, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.gene_number = nn.Parameter(
torch.tensor(parameters['gene_number']), requires_grad=False)
self.spot_number = nn.Parameter(
torch.tensor(parameters['spot_number']), requires_grad=False)
self.feature_dimension = nn.Parameter(
torch.tensor(parameters['feature_dimension']), requires_grad=False)
self.alpha = nn.Parameter(
torch.tensor(parameters['alpha']), requires_grad=False)
self.beta = nn.Parameter(
torch.tensor(parameters['beta']), requires_grad=False)
self.gamma = nn.Parameter(
torch.tensor(parameters['gamma']), requires_grad=False)
self.eta = nn.Parameter(
torch.tensor(parameters['eta']), requires_grad=False)
self.epochs = nn.Parameter(
torch.tensor(parameters['epochs']), requires_grad=False)
self.base_spot = nn.Parameter(torch.rand((self.spot_number, self.feature_dimension),
dtype=torch.float32)
)
self.base_spot_g = nn.Parameter(torch.rand((self.gene_number, self.feature_dimension),
dtype=torch.float32))
self.feature_fusion = nn.Parameter(torch.rand((self.feature_dimension,
self.spot_number),
dtype = torch.float32 ) )
self.affinity_graph = nn.Parameter(torch.rand((self.spot_number,
self.spot_number),
dtype=torch.float32))
def objective_function(self,
w1,
w2,
lap_w2,
lap_w1):
"""
Args:
input:
Returns:
"""
loss_component = self.compute_loss(w1 = w1,
w2 = w2,
lap_w2 = lap_w2,lap_w1=lap_w1)
return loss_component
def initialize(self, w1,w2):
print("model initializing...")
with torch.no_grad():
n_components = int(self.feature_dimension.detach())
w, h = nmf_ini(w1.to("cpu").detach().numpy(),n_components)
w = torch.from_numpy(w).float().to(self.device)
h = torch.from_numpy(h).float().to(self.device)
self.base_spot_g.data, self.feature_fusion.data = w, h
w, h = nmf_ini(w2.to("cpu").detach().numpy(), n_components)
w = torch.from_numpy(w).float().to(self.device)
h = torch.from_numpy(h).float().to(self.device)
self.base_spot.data, self.feature_fusion.data = w, h
w1.to(self.device)
w2.to(self.device)
print("model initialized...")
def compute_loss(self,w1,w2,lap_w2,lap_w1):
# TODO
loss = torch.zeros(6,dtype=torch.float32)
# ST NMF
loss[0] = self.loss_0(w1=w1)
# spatial NMF
loss[1] = self.loss_1(w2=w2)
# penalty
#loss[2] = self.loss_2()
# lpp
loss[3] = self.loss_3(lap_w2=lap_w2, lap_w1=lap_w1)
# affinity graph
loss[4] = self.loss_4()
# contrastive loss
loss[5] = self.loss_5(w2)
return loss
def loss_0(self,w1):
return torch.norm(w1 - self.base_spot_g @ self.feature_fusion )
# self representation
"""def loss_0(self, w1):
return torch.norm(w1 - w1 @ (self.feature_fusion + self.sr_gene))"""
def loss_1(self,w2):
return self.alpha * torch.norm(w2 - self.base_spot @ self.feature_fusion)
def loss_2(self):
return self.beta*torch.norm(self.affinity_graph,p=1)
def loss_3(self, lap_w2, lap_w1):
return self.gamma * torch.trace(self.feature_fusion @ lap_w2 @ self.feature_fusion.T)
def loss_4(self):
return self.eta * torch.norm(self.feature_fusion - self.feature_fusion @ self.affinity_graph)
def loss_5(self,w2):
contrastive_loss = 0
for i in range(self.affinity_graph.shape[0]):
denominator = torch.sum(
torch.exp(self.affinity_graph[i,:])) - torch.exp(self.affinity_graph[i,i])
for j in torch.where(w2 != 0)[0]:
numerator = torch.exp(self.affinity_graph[i,j])
contrastive_loss += -torch.log(numerator / denominator)
return contrastive_loss
def loss_5_numpy(self, w2):
contrastive_loss = 0
for i in range(self.affinity_graph.shape[0]):
affinity = self.affinity_graph.to("cpu").detach().numpy()
denominator = (np.sum(
np.exp(affinity[i, :])) - np.exp(affinity[i, i]))
for j in torch.where(w2 != 0)[0]:
numerator = np.exp(affinity[i,j])
contrastive_loss += -np.log(numerator / denominator)
self.affinity_graph.to(self.device)
return torch.tensor(contrastive_loss.astype(np.float32))
def forward(self,w1,w2, lap_w2,lap_w1):
self.feature_fusion.data = torch.nn.functional.relu(self.feature_fusion.data)
self.base_spot_g.data = torch.nn.functional.relu(self.base_spot_g.data)
self.base_spot.data = torch.nn.functional.relu(self.base_spot.data)
self.affinity_graph.data = torch.nn.functional.relu(self.affinity_graph.data)
self.affinity_graph.data =(self.affinity_graph.data + self.affinity_graph.data.T)/2
return self.objective_function(w1,w2,lap_w2,lap_w1)
# test
def test(w1, w2, parameters):
w1_cos = init_graph(w1.T)
lap_w2 = get_laplace_matrix(w2).astype(np.float32)
lap_w1 = get_laplace_matrix(w1_cos).astype(np.float32)
model = MVFC(parameters=parameters)
model.affinity_graph.data = torch.from_numpy(w1_cos.astype(np.float32))
model = model.to(model.device)
w1 = torch.from_numpy(w1)
w2 = torch.from_numpy(w2)
lap_w2 = torch.from_numpy(lap_w2)
lap_w1 = torch.from_numpy(lap_w1)
w1 = w1.to(model.device)
w2 = w2.to(model.device)
lap_w2 = lap_w2.to(model.device)
lap_w1 = lap_w1.to(model.device)
model.initialize(w1, w2)
print("the model is built!")
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_history = np.zeros((model.epochs, 6))
for k in range(model.epochs):
optimizer.zero_grad()
loss = model.forward(w1,w2,lap_w2,lap_w1)
loss_history[k,:] = loss.detach().numpy()[:]
loss = torch.sum(loss)
print(f"\rEpoch {k + 1}'s loss is:{loss}",end=" ")
#model.affinity_graph = nn.Parameter(torch.clamp(model.affinity_graph,min=0))
"""model.feature_fusion = nn.Parameter(torch.clamp(model.feature_fusion, min=0))
model.sr_gene = nn.Parameter(torch.clamp(model.sr_gene, min=0))
model.sr_spatial = nn.Parameter(torch.clamp(model.sr_spatial, min=0))"""
loss.backward()
optimizer.step()
print("optimized end!")
# clustering
#partition = spectral_clustering(model.feature_fusion.detach().numpy(), 11)
return (model.affinity_graph.to("cpu").detach().numpy(),
model.feature_fusion.to("cpu").detach().numpy(),
loss_history,
model.base_spot_g.to("cpu").detach().numpy(),
model.base_spot.to("cpu").detach().numpy())
w1 = np.random.normal(loc=1,scale=0.1,size=(20,100))
w2 = np.random.normal(loc=1,scale=0.1,size=(100,100))
parameters = {
"device": "cpu" if torch.cuda.is_available() else "cuda:0",
"gene_number": w1.shape[0],
"feature_dimension": 10,
"alpha": 0.8,
"beta": 0.8,
"gamma": 0.8,
"eta": 0.8,
"spot_number": w1.shape[1],
"epochs": 10,
"n_cluster":10
}
import time
start = time.time()
test(w1, w2, parameters)
end = time.time()
print(end - start)
```
### Versions
Collecting environment information...
PyTorch version: 2.5.1+cu124
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.5 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: Could not collect
Libc version: glibc-2.35
Python version: 3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0] (64-bit runtime)
Python platform: Linux-5.15.167.4-microsoft-standard-WSL2-x86_64-with-glibc2.35
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 24
On-line CPU(s) list: 0-23
Vendor ID: GenuineIntel
Model name: 13th Gen Intel(R) Core(TM) i7-13700
CPU family: 6
Model: 183
Thread(s) per core: 2
Core(s) per socket: 12
Socket(s): 1
Stepping: 1
BogoMIPS: 4223.99
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht sy
scall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4
_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibr
s_enhanced tpr_shadow vnmi ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves avx_vnni umip waitpkg gfni vaes vpclmulqdq rdpid movdiri movdir64b fsrm md_clear serialize flush_l1d arch_capabilities
Virtualization: VT-x
Hypervisor vendor: Microsoft
Virtualization type: full
L1d cache: 576 KiB (12 instances)
L1i cache: 384 KiB (12 instances)
L2 cache: 24 MiB (12 instances)
L3 cache: 30 MiB (1 instance)
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Vulnerable: No microcode
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI BHI_DIS_S
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==1.26.4
[pip3] numpy-groupies==0.11.2
[pip3] nvidia-cublas-cu12==12.4.5.8
[pip3] nvidia-cuda-cupti-cu12==12.4.127
[pip3] nvidia-cuda-nvrtc-cu12==12.4.127
[pip3] nvidia-cuda-runtime-cu12==12.4.127
[pip3] nvidia-cudnn-cu12==9.1.0.70
[pip3] nvidia-cufft-cu12==11.2.1.3
[pip3] nvidia-curand-cu12==10.3.5.147
[pip3] nvidia-cusolver-cu12==11.6.1.9
[pip3] nvidia-cusparse-cu12==12.3.1.170
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.4.127
[pip3] nvidia-nvtx-cu12==12.4.127
[pip3] torch==2.5.1
[pip3] torchaudio==2.5.1
[pip3] torchvision==0.20.1
[pip3] triton==3.1.0
[conda] Could not collect
cc @msaroufim @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 | true |
2,757,545,033 | Add get_stream_from_external API for CUDA backend | guangyey | closed | [
"open source",
"Merged",
"ciflow/trunk",
"release notes: python_frontend"
] | 2 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143849
* __->__ #143799
* #141123
* #141119
* #142347
| true |
2,757,456,027 | FlightRecorderEventTest::test_all_events is flaky | lw | closed | [
"oncall: distributed",
"module: flaky-tests"
] | 2 | CONTRIBUTOR | ### 🐛 Describe the bug
The test test/distributed/flight_recorder/test_fr_analysis.py::FlightRecorderEventTest::test_all_events is flaky.
You can see here a sample failure: https://github.com/pytorch/pytorch/actions/runs/12470584195/job/34807434998?pr=143747.
This flakiness was introduced in https://github.com/pytorch/pytorch/pull/143354. I left a comment on that PR explaining where it comes from.
I'm applying a band-aid in https://github.com/pytorch/pytorch/pull/143747, but this test needs to be rewritten in order to make sense.
### Versions
main branch
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @clee2000 @wdvr | true |
2,757,330,788 | Propagate callable parameter types using ParamSpec (#142306) | kaspell | closed | [
"oncall: distributed",
"module: cpu",
"module: typing",
"open source",
"better-engineering",
"Merged",
"ciflow/trunk",
"release notes: distributed (fsdp)",
"module: dynamo",
"ciflow/inductor"
] | 10 | CONTRIBUTOR | The codebase has a few locations where callable parameter type information is lost when the unpackings *args and **kwargs are typed as Any. Refactor these instances to retain type information using typing_extensions.ParamSpec.
Also, in these functions, enforce return type with TypeVar.
Addresses #142306
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 @ezyang @malfet @xuzhao9 @gramster @voznesenskym @penguinwu @EikanWang @Guobing-Chen @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @amjames | true |
2,757,327,237 | [Inductor][CPP] Enable Grouped GEMM Template | leslie-fang-intel | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 5 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143897
* __->__ #143796
**Summary**
Enable the CPP Grouped GEMM Fusion, lowering and Grouped GEMM Template following the RFC: https://github.com/pytorch/pytorch/issues/144012
- Support flexible number of GEMMs
- Share activation across GEMMs
- The Grouped GEMM Template supports independent activations
- However, the pattern matcher requires an anchor node, which is as the shared activation across GEMMs
- Each GEMM can have a unique weight but same sizes
- Each GEMM can have a unique bias or None
- Current PR does not yet support biases; this will be addressed in a follow-up epilogue fusion PR
- Each GEMM have its own epilogues
- Epilogue fusion is not yet supported in this PR and will be enabled in an upcoming follow-up epilogue fusion PR
**Test Plan**
```
python -u -m pytest -s -v test/inductor/test_cpu_select_algorithm.py -k test_grouped_linear
python -u -m pytest -s -v test/inductor/test_cpu_select_algorithm.py -k test_grouped_linear_invalid
python -u -m pytest -s -v test/inductor/test_cpu_cpp_wrapper.py -k test_grouped_linear
```
**Example**
Here is the example and generated code
```
batch_size = 4
in_features = 512
out_features = 1024
dtype = torch.bfloat16
class M(torch.nn.Module):
def __init__(self, bias):
super().__init__()
self.linear0 = torch.nn.Linear(in_features, out_features, bias=False)
self.linear1 = torch.nn.Linear(in_features, out_features, bias=False)
def forward(self, x):
return self.linear0(x), self.linear1(x)
if __name__ == "__main__":
with torch.no_grad():
input = torch.randn(batch_size, in_features, dtype=dtype)
m = M(bias=bias).to(dtype=dtype).eval()
cm = torch.compile(m)
act_res = cm(input)
```
Generated Code: https://gist.github.com/leslie-fang-intel/ed2e8d23aeb3586eb504feeace692e16#file-grouped-gemm-generated-code-py
**Next Step**
- Support Epilogue fusion
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov @BoyuanFeng | true |
2,757,293,256 | PyTorch source code build failed on some Windows 11 environment caused by C++ protocol buffer compiler | chuanqi129 | open | [
"module: build",
"module: windows",
"triaged"
] | 2 | COLLABORATOR | ### 🐛 Describe the bug
The pytorch source code build crashed on Windows 11 caused by **C++ protocol buffer compiler**
```
>python setup.py bdist_wheel
Building wheel torch-2.6.0a0+git0189052
-- Building version 2.6.0a0+git0189052
cmake --build . --target install --config Release
[1/2444] Running C++ protocol buffer compiler on C:/User...rch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.proto
FAILED: third_party/onnx/onnx/onnx_onnx_torch-ml.pb.cc third_party/onnx/onnx/onnx_onnx_torch-ml.pb.h C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.pb.cc C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.pb.h
C:\WINDOWS\system32\cmd.exe /C "cd /D C:\Users\arc\chuanqiw\pytorch\build\third_party\onnx && C:\Users\arc\chuanqiw\pytorch\build\bin\protoc.exe C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.proto -I C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx --cpp_out dllexport_decl=ONNX_API:C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx && C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages\cmake\data\bin\cmake.exe -DFILENAME=C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.pb.h -DNAMESPACES=onnx_torch -P C:/Users/arc/chuanqiw/pytorch/cmake/ProtoBufPatch.cmake && C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages\cmake\data\bin\cmake.exe -DFILENAME=C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.pb.cc -DNAMESPACES=onnx_torch -P C:/Users/arc/chuanqiw/pytorch/cmake/ProtoBufPatch.cmake"
[26/2444] Building CXX object third_party\ideep\mkl-dnn\...ommon\CMakeFiles\dnnl_common.dir\memory_zero_pad.cpp.obj
ninja: build stopped: subcommand failed.
```
If I download pre-built [protobuf 3.13](https://github.com/protocolbuffers/protobuf/releases/tag/v3.13.0) `protoc.exe` binary to `C:\Users\arc\chuanqiw\pytorch\build\bin\protoc.exe`, the build can be worked around.
Full configuration.
```
>python setup.py bdist_wheel
Building wheel torch-2.6.0a0+git0189052
-- Building version 2.6.0a0+git0189052
cmake -GNinja -DBUILD_PYTHON=True -DBUILD_TEST=True -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=C:\Users\arc\chuanqiw\pytorch\torch -DCMAKE_PREFIX_PATH=C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages -DPython_EXECUTABLE=C:\Users\arc\miniforge3\envs\chuanqiw_build\python.exe -DTORCH_BUILD_VERSION=2.6.0a0+git0189052 -DUSE_NUMPY=True C:\Users\arc\chuanqiw\pytorch
-- The CXX compiler identification is MSVC 19.41.34123.0
-- The C compiler identification is MSVC 19.41.34123.0
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Not forcing any particular BLAS to be found
CMake Warning at CMakeLists.txt:422 (message):
TensorPipe cannot be used on Windows. Set it to OFF
CMake Warning at CMakeLists.txt:424 (message):
KleidiAI cannot be used on Windows. Set it to OFF
-- Performing Test C_HAS_AVX_1
-- Performing Test C_HAS_AVX_1 - Success
-- Performing Test C_HAS_AVX2_1
-- Performing Test C_HAS_AVX2_1 - Success
-- Performing Test C_HAS_AVX512_1
-- Performing Test C_HAS_AVX512_1 - Success
-- Performing Test CXX_HAS_AVX_1
-- Performing Test CXX_HAS_AVX_1 - Success
-- Performing Test CXX_HAS_AVX2_1
-- Performing Test CXX_HAS_AVX2_1 - Success
-- Performing Test CXX_HAS_AVX512_1
-- Performing Test CXX_HAS_AVX512_1 - Success
-- Current compiler supports avx2 extension. Will build perfkernels.
-- Performing Test CAFFE2_COMPILER_SUPPORTS_AVX512_EXTENSIONS
-- Performing Test CAFFE2_COMPILER_SUPPORTS_AVX512_EXTENSIONS - Success
-- Current compiler supports avx512f extension. Will build fbgemm.
-- Performing Test COMPILER_SUPPORTS_HIDDEN_VISIBILITY
-- Performing Test COMPILER_SUPPORTS_HIDDEN_VISIBILITY - Failed
-- Performing Test COMPILER_SUPPORTS_HIDDEN_INLINE_VISIBILITY
-- Performing Test COMPILER_SUPPORTS_HIDDEN_INLINE_VISIBILITY - Failed
-- Could not find hardware support for NEON on this machine.
-- No OMAP3 processor on this machine.
-- No OMAP4 processor on this machine.
-- Compiler does not support SVE extension. Will not build perfkernels.
-- Performing Test HAS/UTF_8
-- Performing Test HAS/UTF_8 - Success
CUDA_TOOLKIT_ROOT_DIR not found or specified
-- Could NOT find CUDA (missing: CUDA_TOOLKIT_ROOT_DIR CUDA_NVCC_EXECUTABLE CUDA_INCLUDE_DIRS CUDA_CUDART_LIBRARY)
CMake Warning at cmake/public/cuda.cmake:31 (message):
PyTorch: CUDA cannot be found. Depending on whether you are building
PyTorch or a PyTorch dependent library, the next warning / error will give
you more info.
Call Stack (most recent call first):
cmake/Dependencies.cmake:44 (include)
CMakeLists.txt:865 (include)
CMake Warning at cmake/Dependencies.cmake:76 (message):
Not compiling with CUDA. Suppress this warning with -DUSE_CUDA=OFF.
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
CMake Warning at cmake/Dependencies.cmake:95 (message):
Not compiling with XPU. Could NOT find SYCL.Suppress this warning with
-DUSE_XPU=OFF.
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
-- Building using own protobuf under third_party per request.
-- Use custom protobuf build.
CMake Deprecation Warning at third_party/protobuf/cmake/CMakeLists.txt:2 (cmake_minimum_required):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
--
-- 3.13.0.0
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed
-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - not found
-- Found Threads: TRUE
-- Caffe2 protobuf include directory: $<BUILD_INTERFACE:C:/Users/arc/chuanqiw/pytorch/third_party/protobuf/src>$<INSTALL_INTERFACE:include>
-- Trying to find preferred BLAS backend of choice: MKL
-- MKL_THREADING = OMP
-- Looking for sys/types.h
-- Looking for sys/types.h - found
-- Looking for stdint.h
-- Looking for stdint.h - found
-- Looking for stddef.h
-- Looking for stddef.h - found
-- Check size of void*
-- Check size of void* - done
-- MKL_THREADING = OMP
CMake Warning at cmake/Dependencies.cmake:208 (message):
MKL could not be found. Defaulting to Eigen
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
CMake Warning at cmake/Dependencies.cmake:256 (message):
Preferred BLAS (MKL) cannot be found, now searching for a general BLAS
library
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
-- MKL_THREADING = OMP
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_sequential - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_sequential - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - libiomp5md - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - libiomp5md - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl - guide - pthread - m]
-- Library mkl: not found
-- MKL library not found
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Checking for [Accelerate]
-- Library Accelerate: BLAS_Accelerate_LIBRARY-NOTFOUND
-- Checking for [vecLib]
-- Library vecLib: BLAS_vecLib_LIBRARY-NOTFOUND
-- Checking for [flexiblas]
-- Library flexiblas: BLAS_flexiblas_LIBRARY-NOTFOUND
-- Checking for [openblas]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m - gomp]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [libopenblas]
-- Library libopenblas: BLAS_libopenblas_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran - pthread]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [acml - gfortran]
-- Library acml: BLAS_acml_LIBRARY-NOTFOUND
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Could NOT find Atlas (missing: Atlas_CBLAS_INCLUDE_DIR Atlas_CLAPACK_INCLUDE_DIR Atlas_CBLAS_LIBRARY Atlas_BLAS_LIBRARY Atlas_LAPACK_LIBRARY)
-- Checking for [ptf77blas - atlas - gfortran]
-- Library ptf77blas: BLAS_ptf77blas_LIBRARY-NOTFOUND
-- Checking for []
-- Looking for sgemm_
-- Looking for sgemm_ - not found
-- Cannot find a library with BLAS API. Not using BLAS.
-- Using pocketfft in directory: C:/Users/arc/chuanqiw/pytorch/third_party/pocketfft/
-- The ASM compiler identification is MSVC
-- Found assembler: C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe
-- Building for XNNPACK_TARGET_PROCESSOR: x86_64
-- Generating microkernels.cmake
No microkernel found in src\reference\binary-elementwise.cc
No microkernel found in src\reference\packing.cc
No microkernel found in src\reference\unary-elementwise.cc
-- Found Git: C:/Program Files/Git/cmd/git.exe (found version "2.41.0.windows.2")
-- git version: v1.6.1 normalized to 1.6.1
-- Version: 1.6.1
-- Looking for shm_open in rt
-- Looking for shm_open in rt - not found
-- Performing Test HAVE_STD_REGEX
-- Performing Test HAVE_STD_REGEX
-- Performing Test HAVE_STD_REGEX -- success
-- Performing Test HAVE_GNU_POSIX_REGEX
-- Performing Test HAVE_GNU_POSIX_REGEX
-- Performing Test HAVE_GNU_POSIX_REGEX -- failed to compile
-- Performing Test HAVE_POSIX_REGEX
-- Performing Test HAVE_POSIX_REGEX
-- Performing Test HAVE_POSIX_REGEX -- failed to compile
-- Performing Test HAVE_STEADY_CLOCK
-- Performing Test HAVE_STEADY_CLOCK
-- Performing Test HAVE_STEADY_CLOCK -- success
CMake Warning (dev) at third_party/fbgemm/CMakeLists.txt:93 (find_package):
Policy CMP0148 is not set: The FindPythonInterp and FindPythonLibs modules
are removed. Run "cmake --help-policy CMP0148" for policy details. Use
the cmake_policy command to set the policy and suppress this warning.
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found PythonInterp: C:/Users/arc/miniforge3/envs/chuanqiw_build/python.exe (found version "3.10.15")
-- Performing Test COMPILER_SUPPORTS_AVX512
-- Performing Test COMPILER_SUPPORTS_AVX512 - Success
-- MKL_THREADING = OMP
-- Check OMP with lib C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/lib/x64/libomp.lib and flags -openmp:experimental
-- MKL_THREADING = OMP
-- Check OMP with lib C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/lib/x64/libomp.lib and flags -openmp:experimental
CMake Warning (dev) at C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/share/cmake-3.30/Modules/FindPackageHandleStandardArgs.cmake:441 (message):
The package name passed to `find_package_handle_standard_args` (OpenMP_C)
does not match the name of the calling package (OpenMP). This can lead to
problems in calling code that expects `find_package` result variables
(e.g., `_FOUND`) to follow a certain pattern.
Call Stack (most recent call first):
cmake/Modules/FindOpenMP.cmake:590 (find_package_handle_standard_args)
third_party/fbgemm/CMakeLists.txt:136 (find_package)
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found OpenMP_C: -openmp:experimental
CMake Warning (dev) at C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/share/cmake-3.30/Modules/FindPackageHandleStandardArgs.cmake:441 (message):
The package name passed to `find_package_handle_standard_args` (OpenMP_CXX)
does not match the name of the calling package (OpenMP). This can lead to
problems in calling code that expects `find_package` result variables
(e.g., `_FOUND`) to follow a certain pattern.
Call Stack (most recent call first):
cmake/Modules/FindOpenMP.cmake:590 (find_package_handle_standard_args)
third_party/fbgemm/CMakeLists.txt:136 (find_package)
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found OpenMP_CXX: -openmp:experimental
-- Found OpenMP: TRUE
CMake Warning at third_party/fbgemm/CMakeLists.txt:138 (message):
OpenMP found! OpenMP_C_INCLUDE_DIRS =
CMake Warning at third_party/fbgemm/CMakeLists.txt:232 (message):
==========
CMake Warning at third_party/fbgemm/CMakeLists.txt:233 (message):
CMAKE_BUILD_TYPE = Release
CMake Warning at third_party/fbgemm/CMakeLists.txt:234 (message):
CMAKE_CXX_FLAGS_DEBUG is /Z7 /Ob0 /Od /RTC1 /bigobj
CMake Warning at third_party/fbgemm/CMakeLists.txt:235 (message):
CMAKE_CXX_FLAGS_RELEASE is /O2 /Ob2 /DNDEBUG /bigobj
CMake Warning at third_party/fbgemm/CMakeLists.txt:236 (message):
==========
** AsmJit Summary **
ASMJIT_DIR=C:/Users/arc/chuanqiw/pytorch/third_party/fbgemm/third_party/asmjit
ASMJIT_TEST=FALSE
ASMJIT_TARGET_TYPE=SHARED
ASMJIT_DEPS=
ASMJIT_LIBS=asmjit
ASMJIT_CFLAGS=
ASMJIT_PRIVATE_CFLAGS=-MP;-GF;-Zc:__cplusplus;-Zc:inline;-Zc:strictStrings;-Zc:threadSafeInit-;-W4
ASMJIT_PRIVATE_CFLAGS_DBG=-GS
ASMJIT_PRIVATE_CFLAGS_REL=-GS-;-O2;-Oi
CMake Deprecation Warning at third_party/ittapi/CMakeLists.txt:7 (cmake_minimum_required):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
CMake Deprecation Warning at third_party/FP16/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
CMake Deprecation Warning at third_party/psimd/CMakeLists.txt:1 (CMAKE_MINIMUM_REQUIRED):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
-- Using third party subdirectory Eigen.
-- Found Python: C:\Users\arc\miniforge3\envs\chuanqiw_build\python.exe (found version "3.10.15") found components: Interpreter Development.Module NumPy
-- Using third_party/pybind11.
-- pybind11 include dirs: C:/Users/arc/chuanqiw/pytorch/cmake/../third_party/pybind11/include
-- Could NOT find OpenTelemetryApi (missing: OpenTelemetryApi_INCLUDE_DIRS)
-- Using third_party/opentelemetry-cpp.
-- opentelemetry api include dirs: C:/Users/arc/chuanqiw/pytorch/cmake/../third_party/opentelemetry-cpp/api/include
-- Could NOT find MPI_C (missing: MPI_C_LIB_NAMES MPI_C_HEADER_DIR MPI_C_WORKS)
-- Could NOT find MPI_CXX (missing: MPI_CXX_LIB_NAMES MPI_CXX_HEADER_DIR MPI_CXX_WORKS)
-- Could NOT find MPI (missing: MPI_C_FOUND MPI_CXX_FOUND)
CMake Warning at cmake/Dependencies.cmake:939 (message):
Not compiling with MPI. Suppress this warning with -DUSE_MPI=OFF
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
-- Adding OpenMP CXX_FLAGS: -openmp:experimental
-- Will link against OpenMP libraries: C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/lib/x64/libomp.lib
CMake Deprecation Warning at third_party/gloo/CMakeLists.txt:1 (cmake_minimum_required):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
CMake Warning (dev) at third_party/gloo/CMakeLists.txt:21 (option):
Policy CMP0077 is not set: option() honors normal variables. Run "cmake
--help-policy CMP0077" for policy details. Use the cmake_policy command to
set the policy and suppress this warning.
For compatibility with older versions of CMake, option is clearing the
normal variable 'BUILD_BENCHMARK'.
This warning is for project developers. Use -Wno-dev to suppress it.
CMake Warning (dev) at third_party/gloo/CMakeLists.txt:35 (option):
Policy CMP0077 is not set: option() honors normal variables. Run "cmake
--help-policy CMP0077" for policy details. Use the cmake_policy command to
set the policy and suppress this warning.
For compatibility with older versions of CMake, option is clearing the
normal variable 'USE_NCCL'.
This warning is for project developers. Use -Wno-dev to suppress it.
CMake Warning (dev) at third_party/gloo/CMakeLists.txt:36 (option):
Policy CMP0077 is not set: option() honors normal variables. Run "cmake
--help-policy CMP0077" for policy details. Use the cmake_policy command to
set the policy and suppress this warning.
For compatibility with older versions of CMake, option is clearing the
normal variable 'USE_RCCL'.
This warning is for project developers. Use -Wno-dev to suppress it.
-- MSVC detected
-- Set USE_REDIS OFF
-- Set USE_IBVERBS OFF
-- Set USE_NCCL OFF
-- Set USE_RCCL OFF
-- Set USE_LIBUV ON
-- Only USE_LIBUV is supported on Windows
-- Gloo build as SHARED library
CMake Warning (dev) at third_party/onnx/CMakeLists.txt:106 (find_package):
Policy CMP0148 is not set: The FindPythonInterp and FindPythonLibs modules
are removed. Run "cmake --help-policy CMP0148" for policy details. Use
the cmake_policy command to set the policy and suppress this warning.
This warning is for project developers. Use -Wno-dev to suppress it.
Generated: C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx_onnx_torch-ml.proto
Generated: C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx-operators_onnx_torch-ml.proto
Generated: C:/Users/arc/chuanqiw/pytorch/build/third_party/onnx/onnx/onnx-data_onnx_torch.proto
--
-- ******** Summary ********
-- CMake version : 3.30.5
-- CMake command : C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/bin/cmake.exe
-- System : Windows
-- C++ compiler : C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe
-- C++ compiler version : 19.41.34123.0
-- CXX flags : /DWIN32 /D_WINDOWS /GR /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL /EHsc /wd26812
-- Build type : Release
-- Compile definitions : ONNX_ML=1;ONNXIFI_ENABLE_EXT=1;__STDC_FORMAT_MACROS
-- CMAKE_PREFIX_PATH : C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages
-- CMAKE_INSTALL_PREFIX : C:/Users/arc/chuanqiw/pytorch/torch
-- CMAKE_MODULE_PATH : C:/Users/arc/chuanqiw/pytorch/cmake/Modules;C:/Users/arc/chuanqiw/pytorch/cmake/public/../Modules_CUDA_fix
--
-- ONNX version : 1.17.0
-- ONNX NAMESPACE : onnx_torch
-- ONNX_USE_LITE_PROTO : OFF
-- USE_PROTOBUF_SHARED_LIBS : OFF
-- Protobuf_USE_STATIC_LIBS : ON
-- ONNX_DISABLE_EXCEPTIONS : OFF
-- ONNX_DISABLE_STATIC_REGISTRATION : OFF
-- ONNX_WERROR : OFF
-- ONNX_BUILD_TESTS : OFF
-- ONNX_BUILD_SHARED_LIBS :
-- BUILD_SHARED_LIBS : OFF
--
-- Protobuf compiler :
-- Protobuf includes :
-- Protobuf libraries :
-- BUILD_ONNX_PYTHON : OFF
-- Found CUDA with FP16 support, compiling with torch.cuda.HalfTensor
-- Adding -DNDEBUG to compile flags
CMake Warning at cmake/Dependencies.cmake:1408 (message):
Not compiling with MAGMA. Suppress this warning with -DUSE_MAGMA=OFF.
Call Stack (most recent call first):
CMakeLists.txt:865 (include)
-- Could not find hardware support for NEON on this machine.
-- No OMAP3 processor on this machine.
-- No OMAP4 processor on this machine.
-- MKL_THREADING = OMP
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_sequential - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_sequential - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - libiomp5md - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - libiomp5md - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl - guide - pthread - m]
-- Library mkl: not found
-- MKL library not found
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Checking for [Accelerate]
-- Library Accelerate: BLAS_Accelerate_LIBRARY-NOTFOUND
-- Checking for [vecLib]
-- Library vecLib: BLAS_vecLib_LIBRARY-NOTFOUND
-- Checking for [flexiblas]
-- Library flexiblas: BLAS_flexiblas_LIBRARY-NOTFOUND
-- Checking for [openblas]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m - gomp]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [libopenblas]
-- Library libopenblas: BLAS_libopenblas_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran - pthread]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [acml - gfortran]
-- Library acml: BLAS_acml_LIBRARY-NOTFOUND
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Could NOT find Atlas (missing: Atlas_CBLAS_INCLUDE_DIR Atlas_CLAPACK_INCLUDE_DIR Atlas_CBLAS_LIBRARY Atlas_BLAS_LIBRARY Atlas_LAPACK_LIBRARY)
-- Checking for [ptf77blas - atlas - gfortran]
-- Library ptf77blas: BLAS_ptf77blas_LIBRARY-NOTFOUND
-- Checking for []
-- Cannot find a library with BLAS API. Not using BLAS.
-- LAPACK requires BLAS
-- Cannot find a library with LAPACK API. Not using LAPACK.
disabling CUDA because NOT USE_CUDA is set
disabling ROCM because NOT USE_ROCM is set
-- MIOpen not found. Compiling without MIOpen support
-- Will build oneDNN UKERNEL
-- MKL_THREADING = OMP
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core - libiomp5md]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_intel_thread - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_intel_thread - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_sequential - mkl_core]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_sequential - mkl_core]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - libiomp5md - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - libiomp5md - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl_intel_lp64 - mkl_core - pthread]
-- Library mkl_intel_lp64: not found
-- Checking for [mkl_intel - mkl_core - pthread]
-- Library mkl_intel: not found
-- Checking for [mkl - guide - pthread - m]
-- Library mkl: not found
-- MKL library not found
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Checking for [Accelerate]
-- Library Accelerate: BLAS_Accelerate_LIBRARY-NOTFOUND
-- Checking for [vecLib]
-- Library vecLib: BLAS_vecLib_LIBRARY-NOTFOUND
-- Checking for [flexiblas]
-- Library flexiblas: BLAS_flexiblas_LIBRARY-NOTFOUND
-- Checking for [openblas]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [openblas - pthread - m - gomp]
-- Library openblas: BLAS_openblas_LIBRARY-NOTFOUND
-- Checking for [libopenblas]
-- Library libopenblas: BLAS_libopenblas_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [goto2 - gfortran - pthread]
-- Library goto2: BLAS_goto2_LIBRARY-NOTFOUND
-- Checking for [acml - gfortran]
-- Library acml: BLAS_acml_LIBRARY-NOTFOUND
-- Checking for [blis]
-- Library blis: BLAS_blis_LIBRARY-NOTFOUND
-- Could NOT find Atlas (missing: Atlas_CBLAS_INCLUDE_DIR Atlas_CLAPACK_INCLUDE_DIR Atlas_CBLAS_LIBRARY Atlas_BLAS_LIBRARY Atlas_LAPACK_LIBRARY)
-- Checking for [ptf77blas - atlas - gfortran]
-- Library ptf77blas: BLAS_ptf77blas_LIBRARY-NOTFOUND
-- Checking for []
-- Cannot find a library with BLAS API. Not using BLAS.
-- MKLDNN_CPU_RUNTIME = OMP
CMake Deprecation Warning at third_party/ideep/mkl-dnn/CMakeLists.txt:17 (cmake_minimum_required):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
-- DNNL_TARGET_ARCH: X64
-- DNNL_LIBRARY_NAME: dnnl
CMake Warning (dev) at C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/share/cmake-3.30/Modules/FindPackageHandleStandardArgs.cmake:441 (message):
The package name passed to `find_package_handle_standard_args` (OpenMP_C)
does not match the name of the calling package (OpenMP). This can lead to
problems in calling code that expects `find_package` result variables
(e.g., `_FOUND`) to follow a certain pattern.
Call Stack (most recent call first):
cmake/Modules/FindOpenMP.cmake:590 (find_package_handle_standard_args)
third_party/ideep/mkl-dnn/cmake/OpenMP.cmake:55 (find_package)
third_party/ideep/mkl-dnn/CMakeLists.txt:119 (include)
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found OpenMP_C: -openmp:experimental
CMake Warning (dev) at C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/share/cmake-3.30/Modules/FindPackageHandleStandardArgs.cmake:441 (message):
The package name passed to `find_package_handle_standard_args` (OpenMP_CXX)
does not match the name of the calling package (OpenMP). This can lead to
problems in calling code that expects `find_package` result variables
(e.g., `_FOUND`) to follow a certain pattern.
Call Stack (most recent call first):
cmake/Modules/FindOpenMP.cmake:590 (find_package_handle_standard_args)
third_party/ideep/mkl-dnn/cmake/OpenMP.cmake:55 (find_package)
third_party/ideep/mkl-dnn/CMakeLists.txt:119 (include)
This warning is for project developers. Use -Wno-dev to suppress it.
-- Found OpenMP_CXX: -openmp:experimental
-- Enabled testing coverage: CI
-- Enabled workload: TRAINING
-- Enabled primitives: ALL
-- Enabled primitive CPU ISA: ALL
-- Enabled primitive GPU ISA: ALL
-- Enabled GeMM kernels ISA: ALL
-- Primitive cache is enabled
-- Experimental functionality for ukernels is enabled
-- The ASM_MASM compiler identification is MSVC
-- Found assembler: C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/ml64.exe
-- Graph component is enabled
-- Graph compiler backend is disabled.
-- Found MKL-DNN: TRUE
-- {fmt} version: 11.0.2
-- Build type: Release
-- Using CPU-only version of Kineto
-- Configuring Kineto dependency:
-- KINETO_SOURCE_DIR = C:/Users/arc/chuanqiw/pytorch/third_party/kineto/libkineto
-- KINETO_BUILD_TESTS = OFF
-- KINETO_LIBRARY_TYPE = static
CMake Warning (dev) at third_party/kineto/libkineto/CMakeLists.txt:15 (find_package):
Policy CMP0148 is not set: The FindPythonInterp and FindPythonLibs modules
are removed. Run "cmake --help-policy CMP0148" for policy details. Use
the cmake_policy command to set the policy and suppress this warning.
This warning is for project developers. Use -Wno-dev to suppress it.
INFO CUDA_SOURCE_DIR =
INFO ROCM_SOURCE_DIR =
INFO CUPTI unavailable or disabled - not building GPU profilers
-- Kineto: FMT_SOURCE_DIR = C:/Users/arc/chuanqiw/pytorch/third_party/fmt
-- Kineto: FMT_INCLUDE_DIR = C:/Users/arc/chuanqiw/pytorch/third_party/fmt/include
INFO CUPTI_INCLUDE_DIR = /extras/CUPTI/include
INFO ROCTRACER_INCLUDE_DIR = /include/roctracer
INFO DYNOLOG_INCLUDE_DIR = C:/Users/arc/chuanqiw/pytorch/third_party/kineto/libkineto/third_party/dynolog/
INFO IPCFABRIC_INCLUDE_DIR = C:/Users/arc/chuanqiw/pytorch/third_party/kineto/libkineto/third_party/dynolog//dynolog/src/ipcfabric/
-- Configured Kineto (CPU)
-- Performing Test HAS/WD4624
-- Performing Test HAS/WD4624 - Success
-- Performing Test HAS/WD4068
-- Performing Test HAS/WD4068 - Success
-- Performing Test HAS/WD4067
-- Performing Test HAS/WD4067 - Success
-- Performing Test HAS/WD4267
-- Performing Test HAS/WD4267 - Success
-- Performing Test HAS/WD4661
-- Performing Test HAS/WD4661 - Success
-- Performing Test HAS/WD4717
-- Performing Test HAS/WD4717 - Success
-- Performing Test HAS/WD4244
-- Performing Test HAS/WD4244 - Success
-- Performing Test HAS/WD4804
-- Performing Test HAS/WD4804 - Success
-- Performing Test HAS/WD4273
-- Performing Test HAS/WD4273 - Success
-- Performing Test HAS_WNO_STRINGOP_OVERFLOW
-- Performing Test HAS_WNO_STRINGOP_OVERFLOW - Failed
--
-- Use the C++ compiler to compile (MI_USE_CXX=ON)
--
-- Library base name: mimalloc
-- Version : 1.8
-- Build type : release
-- C++ Compiler : C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe
-- Compiler flags : /Zc:__cplusplus
-- Compiler defines :
-- Link libraries : psapi;shell32;user32;advapi32;bcrypt
-- Build targets : static
--
-- Performing Test HAS_WDEPRECATED
-- Performing Test HAS_WDEPRECATED - Failed
-- don't use NUMA
-- Looking for backtrace
-- Looking for backtrace - not found
-- Could NOT find Backtrace (missing: Backtrace_LIBRARY Backtrace_INCLUDE_DIR)
-- headers outputs:
-- sources outputs:
-- declarations_yaml outputs:
-- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT
-- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT - Failed
-- Using ATen parallel backend: OMP
disabling CUDA because USE_CUDA is set false
-- Could NOT find OpenSSL, try to set the path to OpenSSL root folder in the system variable OPENSSL_ROOT_DIR (missing: OPENSSL_CRYPTO_LIBRARY OPENSSL_INCLUDE_DIR)
-- Check size of long double
-- Check size of long double - done
-- Performing Test COMPILER_SUPPORTS_FLOAT128
-- Performing Test COMPILER_SUPPORTS_FLOAT128 - Failed
-- Performing Test COMPILER_SUPPORTS_SSE2
-- Performing Test COMPILER_SUPPORTS_SSE2 - Success
-- Performing Test COMPILER_SUPPORTS_SSE4
-- Performing Test COMPILER_SUPPORTS_SSE4 - Success
-- Performing Test COMPILER_SUPPORTS_AVX
-- Performing Test COMPILER_SUPPORTS_AVX - Success
-- Performing Test COMPILER_SUPPORTS_FMA4
-- Performing Test COMPILER_SUPPORTS_FMA4 - Success
-- Performing Test COMPILER_SUPPORTS_AVX2
-- Performing Test COMPILER_SUPPORTS_AVX2 - Success
-- Performing Test COMPILER_SUPPORTS_AVX512F
-- Performing Test COMPILER_SUPPORTS_AVX512F - Success
-- Found OpenMP_C: -openmp:experimental (found version "2.0")
-- Found OpenMP_CXX: -openmp:experimental (found version "2.0")
-- Found OpenMP: TRUE (found version "2.0")
-- Performing Test COMPILER_SUPPORTS_OPENMP
-- Performing Test COMPILER_SUPPORTS_OPENMP - Success
-- Performing Test COMPILER_SUPPORTS_OMP_SIMD
-- Performing Test COMPILER_SUPPORTS_OMP_SIMD - Failed
-- Performing Test COMPILER_SUPPORTS_WEAK_ALIASES
-- Performing Test COMPILER_SUPPORTS_WEAK_ALIASES - Failed
-- Performing Test COMPILER_SUPPORTS_BUILTIN_MATH
-- Performing Test COMPILER_SUPPORTS_BUILTIN_MATH - Failed
-- Performing Test COMPILER_SUPPORTS_SYS_GETRANDOM
-- Performing Test COMPILER_SUPPORTS_SYS_GETRANDOM - Failed
-- Configuring build for SLEEF-v3.6.0
Target system: Windows-10.0.22631
Target processor: AMD64
Host system: Windows-10.0.22631
Host processor: AMD64
Detected C compiler: MSVC @ C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe
CMake: 3.30.5
Make program: C:/Users/arc/miniforge3/envs/chuanqiw_build/Scripts/ninja.exe
-- Using option `/D_CRT_SECURE_NO_WARNINGS /D_CRT_NONSTDC_NO_DEPRECATE ` to compile libsleef
-- Building shared libs : OFF
-- Building static test bins: OFF
-- MPFR : LIB_MPFR-NOTFOUND
-- GMP : LIBGMP-NOTFOUND
-- RT :
-- FFTW3 : LIBFFTW3-NOTFOUND
-- OPENSSL :
-- SDE : SDE_COMMAND-NOTFOUND
-- COMPILER_SUPPORTS_OPENMP : FALSE
AT_INSTALL_INCLUDE_DIR include/ATen/core
core header install: C:/Users/arc/chuanqiw/pytorch/build/aten/src/ATen/core/TensorBody.h
core header install: C:/Users/arc/chuanqiw/pytorch/build/aten/src/ATen/core/aten_interned_strings.h
core header install: C:/Users/arc/chuanqiw/pytorch/build/aten/src/ATen/core/enum_tag.h
CMake Deprecation Warning at test/edge/CMakeLists.txt:1 (cmake_minimum_required):
Compatibility with CMake < 3.5 will be removed from a future version of
CMake.
Update the VERSION argument <min> value or use a ...<max> suffix to tell
CMake that the project does not need compatibility with older versions.
-- Performing Test HAS_WNO_UNUSED_PRIVATE_FIELD
-- Performing Test HAS_WNO_UNUSED_PRIVATE_FIELD - Failed
-- Generating sources for unboxing kernels C:\Users\arc\miniforge3\envs\chuanqiw_build\python.exe;-m;torchgen.gen_executorch;--source-path=C:/Users/arc/chuanqiw/pytorch/test/edge/../../test/edge;--install-dir=C:/Users/arc/chuanqiw/pytorch/build/out;--tags-path=C:/Users/arc/chuanqiw/pytorch/test/edge/../../aten/src/ATen/native/tags.yaml;--aten-yaml-path=C:/Users/arc/chuanqiw/pytorch/test/edge/../../aten/src/ATen/native/native_functions.yaml;--use-aten-lib;--op-selection-yaml-path=C:/Users/arc/chuanqiw/pytorch/test/edge/../../test/edge/selected_operators.yaml;--custom-ops-yaml-path=C:/Users/arc/chuanqiw/pytorch/test/edge/../../test/edge/custom_ops.yaml
CMake Warning at CMakeLists.txt:1275 (message):
Generated cmake files are only fully tested if one builds with system glog,
gflags, and protobuf. Other settings may generate files that are not well
tested.
--
-- ******** Summary ********
-- General:
-- CMake version : 3.30.5
-- CMake command : C:/Users/arc/miniforge3/envs/chuanqiw_build/Lib/site-packages/cmake/data/bin/cmake.exe
-- System : Windows
-- C++ compiler : C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.41.34120/bin/Hostx64/x64/cl.exe
-- C++ compiler id : MSVC
-- C++ compiler version : 19.41.34123.0
-- Using ccache if found : OFF
-- CXX flags : /DWIN32 /D_WINDOWS /GR /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOCUPTI -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_FBGEMM -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE /wd4624 /wd4068 /wd4067 /wd4267 /wd4661 /wd4717 /wd4244 /wd4804 /wd4273
-- Shared LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Static LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Module LD flags : /machine:x64 /ignore:4049 /ignore:4217 /ignore:4099
-- Build type : Release
-- Compile definitions : ONNX_ML=1;ONNXIFI_ENABLE_EXT=1;ONNX_NAMESPACE=onnx_torch;_CRT_SECURE_NO_DEPRECATE=1;USE_EXTERNAL_MZCRC;MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS;FLASHATTENTION_DISABLE_ALIBI;WIN32_LEAN_AND_MEAN;_UCRT_LEGACY_INFINITY;NOMINMAX;USE_MIMALLOC
-- CMAKE_PREFIX_PATH : C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages
-- CMAKE_INSTALL_PREFIX : C:/Users/arc/chuanqiw/pytorch/torch
-- USE_GOLD_LINKER : OFF
--
-- TORCH_VERSION : 2.6.0
-- BUILD_STATIC_RUNTIME_BENCHMARK: OFF
-- BUILD_BINARY : OFF
-- BUILD_CUSTOM_PROTOBUF : ON
-- Link local protobuf : ON
-- BUILD_PYTHON : True
-- Python version : 3.10.15
-- Python executable : C:\Users\arc\miniforge3\envs\chuanqiw_build\python.exe
-- Python library : C:/Users/arc/miniforge3/envs/chuanqiw_build/libs/python310.lib
-- Python includes : C:/Users/arc/miniforge3/envs/chuanqiw_build/include
-- Python site-package : C:\Users\arc\miniforge3\envs\chuanqiw_build\Lib\site-packages
-- BUILD_SHARED_LIBS : ON
-- CAFFE2_USE_MSVC_STATIC_RUNTIME : OFF
-- BUILD_TEST : True
-- BUILD_JNI : OFF
-- BUILD_MOBILE_AUTOGRAD : OFF
-- BUILD_LITE_INTERPRETER: OFF
-- INTERN_BUILD_MOBILE :
-- TRACING_BASED : OFF
-- USE_BLAS : 0
-- USE_LAPACK : 0
-- USE_ASAN : OFF
-- USE_TSAN : OFF
-- USE_CPP_CODE_COVERAGE : OFF
-- USE_CUDA : OFF
-- USE_XPU : OFF
-- USE_ROCM : OFF
-- BUILD_NVFUSER :
-- USE_EIGEN_FOR_BLAS : ON
-- USE_FBGEMM : ON
-- USE_FAKELOWP : OFF
-- USE_KINETO : ON
-- USE_GFLAGS : OFF
-- USE_GLOG : OFF
-- USE_LITE_PROTO : OFF
-- USE_PYTORCH_METAL : OFF
-- USE_PYTORCH_METAL_EXPORT : OFF
-- USE_MPS : OFF
-- CAN_COMPILE_METAL :
-- USE_MKL : OFF
-- USE_MKLDNN : ON
-- USE_MKLDNN_ACL : OFF
-- USE_MKLDNN_CBLAS : OFF
-- USE_UCC : OFF
-- USE_ITT : ON
-- USE_NCCL : OFF
-- USE_NNPACK : OFF
-- USE_NUMPY : ON
-- USE_OBSERVERS : ON
-- USE_OPENCL : OFF
-- USE_OPENMP : ON
-- USE_MIMALLOC : ON
-- USE_MIMALLOC_ON_MKL : OFF
-- USE_VULKAN : OFF
-- USE_PROF : OFF
-- USE_PYTORCH_QNNPACK : OFF
-- USE_XNNPACK : ON
-- USE_DISTRIBUTED : ON
-- USE_MPI : OFF
-- USE_GLOO : ON
-- USE_GLOO_WITH_OPENSSL : OFF
-- USE_TENSORPIPE : OFF
-- Public Dependencies :
-- Private Dependencies : Threads::Threads;pthreadpool;cpuinfo;XNNPACK;microkernels-prod;fbgemm;ittnotify;fp16;caffe2::openmp;gloo;fmt::fmt-header-only;kineto
-- Public CUDA Deps. :
-- Private CUDA Deps. :
-- USE_COREML_DELEGATE : OFF
-- BUILD_LAZY_TS_BACKEND : ON
-- USE_ROCM_KERNEL_ASSERT : OFF
-- Performing Test HAS_WMISSING_PROTOTYPES
-- Performing Test HAS_WMISSING_PROTOTYPES - Failed
-- Performing Test HAS_WERROR_MISSING_PROTOTYPES
-- Performing Test HAS_WERROR_MISSING_PROTOTYPES - Failed
-- Configuring done (76.9s)
-- Generating done (2.8s)
-- Build files have been written to: C:/Users/arc/chuanqiw/pytorch/build
cmake --build . --target install --config Release
```
### Versions
```
Collecting environment information...
PyTorch version: N/A
Is debug build: N/A
CUDA used to build PyTorch: N/A
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 11 Enterprise (10.0.22631 64-bit)
GCC version: Could not collect
Clang version: Could not collect
CMake version: version 3.30.5
Libc version: N/A
Python version: 3.10.15 | packaged by conda-forge | (main, Oct 16 2024, 01:15:49) [MSC v.1941 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.22631-SP0
Is CUDA available: N/A
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: Could not collect
Nvidia driver version: Could not collect
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: N/A
CPU:
Name: 12th Gen Intel(R) Core(TM) i9-12900
Manufacturer: GenuineIntel
Family: 207
Architecture: 9
ProcessorType: 3
DeviceID: CPU0
CurrentClockSpeed: 2400
MaxClockSpeed: 2400
L2CacheSize: 14336
L2CacheSpeed: None
Revision: None
Versions of relevant libraries:
[pip3] numpy==2.1.2
[pip3] optree==0.13.0
[conda] numpy 2.1.2 pypi_0 pypi
[conda] optree 0.13.0 pypi_0 pypi
```
cc @malfet @seemethere @peterjc123 @mszhanyi @skyline75489 @nbcsm @iremyux @Blackhex | true |
2,757,268,473 | [DONT MERGE]xpu env build cpu whl | chuanqi129 | closed | [
"open source",
"Stale",
"ciflow/binaries",
"topic: not user facing"
] | 2 | COLLABORATOR | Fixes #ISSUE_NUMBER
| true |
2,757,254,912 | Fix empty matrix handling of addmv in inductor | maybeLee | closed | [
"triaged",
"open source",
"Merged",
"Stale",
"ciflow/trunk",
"topic: not user facing",
"module: inductor"
] | 16 | CONTRIBUTOR | This is a resubmission of my previous PR that I accidentally deleted, apology in advance if any inconvenience caused. Below are details of this PR.
Fix an issue when torch.addmv behaves inconsistent between torch.compile mode and eager mode. Here is the code to reproduce:
```
import torch
import numpy as np
@torch.compile
def test_optimized(input, mat, vec):
return torch.addmv(input, mat, vec)
def test(input, mat, vec):
return torch.addmv(input, mat, vec)
input = torch.tensor([2], dtype=torch.int32)
mat = torch.tensor(np.random.randn(0, 0), dtype=torch.int32)
vec = torch.tensor([])
origin_out = test(input, mat, vec)
optimized_out = test_optimized(input, mat, vec)
print(origin_out) # tensor([2.])
print(optimized_out) # tensor([])
```
According to the equation (https://pytorch.org/docs/stable/generated/torch.addmv.html), when matrix and vector is empty, returning `[2.]` seems more reasonable to me.
Following the cpu implementation of this API:https://github.com/pytorch/pytorch/blob/e97b97af56204230f1030bd297dda9bc6b053a4c/aten/src/ATen/native/Blas.cpp#L62
I add an additional branch to handle empty matrix
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,757,253,087 | [don't merge] use vs2022 build windows cpu wheel. | xuhancn | closed | [
"open source",
"ciflow/binaries",
"topic: not user facing"
] | 10 | COLLABORATOR | Fixes #ISSUE_NUMBER
| true |
2,757,249,041 | [inducotr] [cuda] `frexp` output different result when meeting `inf` | shaoyuyoung | open | [
"triaged",
"oncall: pt2",
"module: inductor",
"upstream triton"
] | 8 | CONTRIBUTOR | ### 🐛 Describe the bug
**symptom**: When input tensor is `inf`, the second tensor returned by `frexp` is `-2147483648`. Eager output is zero (CPU inductor is also zero)
**device**: only cuda
**exposed area**: only input tensor is `inf` (`nan` wouldn't trigger inconsistency)
**code**
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
torch.set_grad_enabled(False)
from torch._inductor import config
config.fallback_random = True
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
a, b = torch.frexp(x)
return b
model = Model().cuda()
x = torch.Tensor([float("inf")]).cuda()
inputs = [x]
output = model(*inputs)
c_model = torch.compile(model)
c_output = c_model(*inputs)
print(output)
print(c_output)
```
### Error logs
```
tensor([0], device='cuda:0', dtype=torch.int32)
tensor([-2147483648], device='cuda:0', dtype=torch.int32)
```
### Versions
PyTorch version: 2.6.0.dev20241218+cu126
OS: Ubuntu 20.04.6 LTS (x86_64)
CPU: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
GPU: V100
<details>
<summary>click for detailed env</summary>
```
PyTorch version: 2.6.0.dev20241218+cu126
Is debug build: False
CUDA used to build PyTorch: 12.6
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: 16.0.1
CMake version: version 3.26.0
Libc version: glibc-2.31
Python version: 3.12.7 | packaged by Anaconda, Inc. | (main, Oct 4 2024, 13:27:36) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-202-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 12.6.68
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: Tesla V100-SXM2-32GB
GPU 1: Tesla V100-SXM2-32GB
GPU 2: Tesla V100-SXM2-32GB
GPU 3: Tesla V100-SXM2-32GB
Nvidia driver version: 560.35.03
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_precompiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_runtime_compiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_graph.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_heuristic.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops.so.9.6.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 40 bits physical, 48 bits virtual
CPU(s): 20
On-line CPU(s) list: 0-19
Thread(s) per core: 1
Core(s) per socket: 20
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
Stepping: 7
CPU MHz: 2499.996
BogoMIPS: 4999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 640 KiB
L1i cache: 640 KiB
L2 cache: 80 MiB
L3 cache: 16 MiB
NUMA node0 CPU(s): 0-19
Vulnerability Gather data sampling: Unknown: Dependent on hypervisor status
Vulnerability Itlb multihit: KVM: Vulnerable
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Retbleed: Mitigation; IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; IBRS; IBPB conditional; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI SW loop, KVM SW loop
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch topoext cpuid_fault invpcid_single pti ssbd ibrs ibpb fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat umip pku ospke avx512_vnni
Versions of relevant libraries:
[pip3] numpy==1.26.4
[pip3] nvidia-cublas-cu12==12.6.4.1
[pip3] nvidia-cuda-cupti-cu12==12.6.80
[pip3] nvidia-cuda-nvrtc-cu12==12.6.77
[pip3] nvidia-cuda-runtime-cu12==12.6.77
[pip3] nvidia-cudnn-cu12==9.5.1.17
[pip3] nvidia-cufft-cu12==11.3.0.4
[pip3] nvidia-curand-cu12==10.3.7.77
[pip3] nvidia-cusolver-cu12==11.7.1.2
[pip3] nvidia-cusparse-cu12==12.5.4.2
[pip3] nvidia-cusparselt-cu12==0.6.3
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.6.85
[pip3] nvidia-nvtx-cu12==12.6.77
[pip3] onnx==1.17.0
[pip3] onnxruntime==1.20.1
[pip3] onnxscript==0.1.0.dev20241205
[pip3] optree==0.13.1
[pip3] pytorch-triton==3.2.0+gitf9cdf582
[pip3] torch==2.6.0.dev20241218+cu126
[pip3] torchaudio==2.6.0.dev20241218+cu126
[pip3] torchvision==0.22.0.dev20241218+cu126
[pip3] triton==3.0.0
[conda] numpy 1.26.4 pypi_0 pypi
[conda] nvidia-cublas-cu12 12.6.4.1 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.6.80 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.5.1.17 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.3.0.4 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.7.77 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.7.1.2 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.5.4.2 pypi_0 pypi
[conda] nvidia-cusparselt-cu12 0.6.3 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.6.85 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.6.77 pypi_0 pypi
[conda] optree 0.13.1 pypi_0 pypi
[conda] pytorch-triton 3.2.0+gitf9cdf582 pypi_0 pypi
[conda] torch 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchaudio 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchvision 0.22.0.dev20241218+cu126 pypi_0 pypi
[conda] triton 3.0.0 pypi_0 pypi
```
</details>
cc @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @aakhundov @bertmaher @int3 @davidberard98 @nmacchioni @embg @peterbell10 | true |
2,757,200,944 | Flex attention with nested tensors, bug in `create_nested_block_mask` | VivekPanyam | closed | [
"triaged",
"module: nestedtensor",
"oncall: pt2",
"module: higher order operators",
"module: pt2-dispatcher",
"module: flex attention"
] | 2 | CONTRIBUTOR | The following code is from `_nested_mod_func_adapter` which is a helper function used by `create_nested_block_mask`. Conceptually, it wraps a mod function that operates on individual batch items of a nested tensor and transforms the inputs so it works on a single packed item. However, the below code doesn't appear to update the batch argument (`b`) before calling the original mod function.
https://github.com/pytorch/pytorch/blob/6ccb8ed1868984d9d2ea4e48a085508d1027cd9b/torch/nn/attention/flex_attention.py#L985-L990
Since `create_nested_block_mask` effectively packs all the batch items from the nested tensor into a single item, it appears like the helper should do something like `b_nested = q_seq_idx[q_idx]` and pass that in place of `b` to `orig_mod_func`.
Otherwise, it appears that the wrapped mod func has no way of knowing which batch item it's operating on.
cc @cpuhrsch @jbschlosser @bhosmer @drisspg @soulitzer @davidberard98 @YuqingJ @chauhang @penguinwu @zou3519 @ydwu4 @bdhirsh @yf225 @Chillee @yanboliang @BoyuanFeng | true |
2,757,170,812 | fix randint distribution for large max | ngimel | closed | [
"Merged",
"Reverted",
"ciflow/trunk",
"release notes: cpp",
"module: inductor",
"ciflow/inductor",
"ci-no-td"
] | 15 | COLLABORATOR | Fixes #ISSUE_NUMBER
Similar to #143682, for large maximum values we were sampling integers via % and it doesn't provide uniform distribution. Here we limit the max skew to approx 1% (random32 is used for max values `<= 2**32 / 128`)
This comes with significant perf penalty, especially for cuda, but it's a pretty bad bug, so we'll have to figure out what can be done to improve it.
`torch.compile` has always been producing correct results for this, and it's performance is also significantly better than current eager (eager is ~660 GB/s on H100, torch.compile 1200 GB/s), so we have to figure out why torch.compile is better.
`__launch_bounds__` slightly regress perf, so perhaps we can figure out how to specify them better, but it's only 20-30 GB/s, so the big difference is still unexplained.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,757,161,174 | @custom_op extensions could not be export.export()ed via AOT and run from C++ | borisfom | closed | [
"module: docs",
"module: error checking",
"triaged",
"module: custom-operators",
"oncall: pt2",
"oncall: export",
"module: pt2-dispatcher"
] | 18 | CONTRIBUTOR | ### 🐛 Describe the bug
Here is the repro. I am adding a @custom_op to a working example that saves ExportedProgram via AOT and runs it from C++. When I add custom operation, it stops working :
Error: Could not find schema for mylib::custom_add.
```
import torch
def custom_add_direct(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
@torch.library.custom_op("mylib::custom_add", mutates_args=(),
device_types="cuda",
)
def _(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return custom_add_direct(a,b)
@torch.library.register_fake("mylib::custom_add")
def _(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.empty_like(a)
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(10, 16)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(16, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
y = self.relu(x)
x = self.fc2(torch.ops.mylib.custom_add(x, y))
x = self.sigmoid(x)
return x
with torch.no_grad():
device = "cuda" if torch.cuda.is_available() else "cpu"
model = Model().to(device=device)
example_inputs = (torch.randn(8, 10, device=device),)
# Export the model
exported = torch.export.export(model, example_inputs)
# Compile the model
output_path = torch._inductor.aoti_compile_and_package(
exported,
example_inputs,
package_path="model.pt2",
)
```
Here's the C++ code, it runs model.pt2 perfectly if I replace "torch.ops.mylib.custom_add(x, y)" above with "x+y" :
```
#include <iostream>
#include <vector>
#include <torch/torch.h>
#include <torch/csrc/inductor/aoti_package/model_package_loader.h>
int main() {
c10::InferenceMode mode;
// Load the compiled model
torch::inductor::AOTIModelPackageLoader loader("model.pt2");
// Prepare input tensor
std::vector<torch::Tensor> inputs = {torch::randn({8, 10}, at::kCUDA)};
// Run inference
std::vector<torch::Tensor> outputs = loader.run(inputs);
// Print the result
std::cout << "Inference result:" << std::endl;
std::cout << outputs[0] << std::endl;
return 0;
}
```
### Versions
Pytorch nightly
cc @svekars @brycebortree @sekyondaMeta @AlannaBurke @malfet @chauhang @penguinwu @avikchaudhuri @gmagogsfm @zhxchen17 @tugsbayasgalan @angelayi @suo @ydwu4 @zou3519 @bdhirsh @yf225 | true |
2,757,106,794 | UNSTABLE periodic / linux-focal-rocm6.2-py3.10 / test (distributed) | jithunnair-amd | closed | [
"module: rocm",
"module: ci",
"unstable"
] | 2 | COLLABORATOR | We are working on updating labels and `.env` files on the ROCm runners
cc @jeffdaily @sunway513 @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd @seemethere @malfet @pytorch/pytorch-dev-infra | true |
2,757,101,184 | [Intel GPU] Avoid copy when the input of Matmul is broadcasted | jianyizh | closed | [
"module: cpu",
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"ciflow/xpu"
] | 20 | CONTRIBUTOR | Avoid copy when the input of Matmul is 3D and broadcasted on batch dim. oneDNN support implicit broadcast semantics i.e., src can be broadcasted into weight if the corresponding dimension in src is 1 (and vice versa). On Max 1100, timm resmlp_12_224 amp_fp16 inference with bs=128 can improve from 42ms to 13.7 ms on torch.compile and 57.5ms to 32ms on eager mode.
cc @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 | true |
2,757,100,952 | Generalize pin memory logic for accelerator when non blocking copy happened | guangyey | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"ciflow/mps",
"ciflow/xpu",
"module: accelerator"
] | 6 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143783
* #144959
# Motivation
fix https://github.com/pytorch/pytorch/issues/143641
Generalize pin memory logic for accelerator when non-blocking copy happened. Each accelerator has its implementation on `empty_strided`. The accelerator which doesn't have pin memory mechanism could ignore or mimic when pin_out is True.
cc @albanD @EikanWang | true |
2,757,064,887 | [micro_pipeline_tp] don't pass return_A to fused_all_gather_scaled_matmul | yifuwang | closed | [
"oncall: distributed",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 3 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143782
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,757,026,080 | torch/accelerator: fix device type comparison (#143541) | guangyey | closed | [
"open source"
] | 2 | COLLABORATOR | This was failing without the fix:
```
python -c 'import torch; d=torch.device("xpu:0"); torch.accelerator.current_stream(d)'
```
with:
```
ValueError: xpu doesn't match the current accelerator xpu.
```
CC: @guangyey, @EikanWang
Pull Request resolved: https://github.com/pytorch/pytorch/pull/143541
Approved by: https://github.com/guangyey, https://github.com/albanD
(cherry picked from commit 7314cf44ae719dfbc9159496030ce84d152686e4)
Fixes #ISSUE_NUMBER
| true |
2,757,024,078 | Looking for valid compiling option for extension based on torch-2.1.0+cpu.cxx11.abi | dilililiwhy | open | [
"high priority",
"needs reproduction",
"module: crash",
"module: cpp-extensions",
"triaged",
"has workaround"
] | 9 | CONTRIBUTOR | ### 🐛 Describe the bug
Try to compile extension based on [torch-2.1.0+cpu.cxx11.abi](https://download.pytorch.org/whl/cpu-cxx11-abi/torch-2.1.0%2Bcpu.cxx11.abi-cp39-cp39-linux_x86_64.whl#sha256=f100b87d0e307dcac6321dd8f4895f14f6fa6974a921e9e7369bd9c7be4f0d5d) and set D_GLIBCXX_USE_CXX11_ABI=1.
env info:
```
Arch: x86_64
GCC version: (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9)
CMake version: version 3.18.4
Libc version: glibc-2.28
```
An segmentation fault occurs during pybind11 initialization when import the extension which inherits the torch._C._distributed_c10d.Backend. Tried the following options but none of them solved the problem:
1. set(CXX_STANDARD_REQUIRED ON)
2. string(APPEND CMAKE_CXX_FLAGS " -fabi-version=11")
Only using self-compiled torch package in same environment can fix the problem, and it seems that some _**static_strings**_ are missing in [torch-2.1.0+cpu.cxx11.abi](https://download.pytorch.org/whl/cpu-cxx11-abi/torch-2.1.0%2Bcpu.cxx11.abi-cp39-cp39-linux_x86_64.whl#sha256=f100b87d0e307dcac6321dd8f4895f14f6fa6974a921e9e7369bd9c7be4f0d5d) by tracing _**internals_pp**_ in torch/inculde/pybind11/detail/internals.h.
```
inline internals **&get_internals_pp() {
static internals **internals_pp = nullptr;
return internals_pp;
}
```
missing static_strings
```
...
[38] = "torch._C._distributed_c10d._ProcessGroupWrapper",
[39] = "torch._C._distributed_c10d._Options",
[40] = "torch._C._distributed_c10d.Device",
[41] = "torch._C._distributed_c10d.ProcessGroupGloo",
[42] = "torch._C._distributed_c10d.Backend",
[43] = "torch._C._distributed_c10d.Options",
[44] = "torch._C._distributed_c10d.BackendType",
[45] = "torch._C._distributed_c10d.ProcessGroup",
...
```
**Is there any pybind11 requirements are missing?**
### Versions
PyTorch version: 2.1.0+cpu-cxx11-abi
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: AlmaLinux 8.10 (Cerulean Leopard) (x86_64)
GCC version: (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9)
Clang version: Could not collect
CMake version: version 3.18.4
Libc version: glibc-2.28
Python version: 3.9.21 (main, Dec 17 2024, 07:34:47) [GCC 14.2.1 20240801 (Red Hat 14.2.1-1)] (64-bit runtime)
Python platform: Linux-3.10.0-1160.119.1.el7.x86_64-x86_64-with-glibc2.28
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 32
On-line CPU(s) list: 0-31
Thread(s) per core: 2
Core(s) per socket: 16
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Gold 6266C CPU @ 3.00GHz
Stepping: 7
CPU MHz: 3000.000
BogoMIPS: 6000.00
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 1024K
L3 cache: 30976K
NUMA node0 CPU(s): 0-31
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] numpy==1.21.3
[pip3] torch==2.1.0+cpu.cxx11.abi
[conda] numpy 1.24.4 pypi_0 pypi
cc @ezyang @gchanan @zou3519 @kadeng @msaroufim @malfet @seemethere @xmfan | true |
2,757,014,985 | [inductor] [dtype] `ReplicationPad` raise dtype error on eager but pass the check on indcutor | shaoyuyoung | closed | [
"triaged",
"oncall: pt2",
"module: inductor"
] | 0 | CONTRIBUTOR | ### 🐛 Describe the bug
**symptom**: when using a normal input to this model, `signbit` output a `bool` value. `replication_pad` rejects bool on eager but pass the check on inductor. I'm not sure which one should be taken.
**device**: both on cpu and cuda
**exposed area**: ReplicationPad1d, ReplicationPad2d, ReplicationPad3d
**relation**: similarly logic to #143752
**code**
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
torch.set_grad_enabled(False)
from torch._inductor import config
config.fallback_random = True
class Model(nn.Module):
def __init__(self, pad_operator):
super(Model, self).__init__()
self.pad = pad_operator
self.signbit = torch.signbit
def forward(self, x):
x = self.signbit(x)
x = self.pad(x)
return x
def run_test(dim, device, backend):
r_pad = eval(f"nn.ReplicationPad{dim}d(padding=1)")
model = Model(r_pad).to(device)
x = torch.randn([1] * (dim + 2)).to(device)
if backend == "inductor":
model = torch.compile(model)
try:
y = model(x)
print(f"succeed on {device} with {backend}: {y.dtype}")
except Exception as e:
print(f"fail on {device} with {backend}: {e}")
run_test(1, "cpu", "eager") # fail on cpu with eager: "replication_pad1d" not implemented for 'Bool'
run_test(1, "cpu", "inductor") # succeed on cpu with inductor: torch.bool
run_test(1, "cuda", "eager") # fail on cuda with eager: "replication_pad1d_cuda" not implemented for 'Bool'
run_test(1, "cuda", "inductor") # succeed on cuda with inductor: torch.bool
run_test(2, "cpu", "eager") # fail on cpu with eager: "replication_pad2d" not implemented for 'Bool'
run_test(2, "cpu", "inductor") # succeed on cpu with inductor: torch.bool
run_test(2, "cuda", "eager") # fail on cuda with eager: "replication_pad2d_cuda" not implemented for 'Bool'
run_test(2, "cuda", "inductor") # succeed on cuda with inductor: torch.bool
run_test(3, "cpu", "eager") # fail on cpu with eager: "replication_pad3d" not implemented for 'Bool'
run_test(3, "cpu", "inductor") # succeed on cpu with inductor: torch.bool
run_test(3, "cuda", "eager") # fail on cuda with eager: "replication_pad3d_cuda" not implemented for 'Bool'
run_test(3, "cuda", "inductor") # succeed on cuda with inductor: torch.bool
```
### Error logs
```
fail on cpu with eager: "replication_pad1d" not implemented for 'Bool'
succeed on cpu with inductor: torch.bool
fail on cuda with eager: "replication_pad1d_cuda" not implemented for 'Bool'
succeed on cuda with inductor: torch.bool
fail on cpu with eager: "replication_pad2d" not implemented for 'Bool'
succeed on cpu with inductor: torch.bool
fail on cuda with eager: "replication_pad2d_cuda" not implemented for 'Bool'
succeed on cuda with inductor: torch.bool
fail on cpu with eager: "replication_pad3d" not implemented for 'Bool'
succeed on cpu with inductor: torch.bool
fail on cuda with eager: "replication_pad3d_cuda" not implemented for 'Bool'
succeed on cuda with inductor: torch.bool
```
### Versions
the same as #143752
cc @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @aakhundov | true |
2,756,932,200 | Sort requirements.txt | Raymo111 | closed | [
"better-engineering",
"Merged",
"ciflow/trunk",
"topic: not user facing"
] | 4 | MEMBER | null | true |
2,756,924,519 | [CUDA][CUDA graphs][RNG] Skip replay prologue if `wholegraph_increment` is 0 | eqy | closed | [
"module: cuda",
"module: random",
"open source",
"Merged",
"module: cuda graphs",
"ciflow/trunk",
"topic: not user facing"
] | 3 | COLLABORATOR | #143572
cc @ptrblck @msaroufim @pbelevich @mcarilli @ezyang @eellison @penguinwu | true |
2,756,813,851 | Remove builder repo from workflows and scripts | atalman | closed | [
"Merged",
"ciflow/binaries",
"ciflow/trunk",
"release notes: releng"
] | 6 | CONTRIBUTOR | Part of https://github.com/pytorch/builder/issues/2054
Builder is repo is no longer used. Hence remove any references to builder repo
| true |
2,756,793,993 | [pytorch/et] Allow ET to save additional resources for completing a trace like generated kernels and index tensor data | sanrise | closed | [
"fb-exported",
"Merged",
"ciflow/trunk",
"topic: not user facing"
] | 48 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143775
The resources directory lets ET observer dump any additional data like Triton kernels while capturing the ET.
This allows us to use the ET trace to replay PT2 workloads and get visibility into data like generated kernels and their usage in a model, index tensor data etc.
We also added a few ways to enable ET and ET Resources through the OS environment variables.
Setting `ENABLE_PYTORCH_EXECUTION_TRACE` will enable default Execution Tracing in Pytorch.
Additionally setting `ENABLE_PYTORCH_EXECUTION_TRACE_EXTRAS` will enable ET to collect extra resources from the ET run like Triton Kernels.
Differential Revision: [D67610542](https://our.internmc.facebook.com/intern/diff/D67610542/)
**NOTE FOR REVIEWERS**: This PR has internal Meta-specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D67610542/)! | true |
2,756,762,298 | CUDA error when compiling loss function | tianyu-l | open | [
"module: activation checkpointing",
"triaged",
"oncall: pt2"
] | 1 | CONTRIBUTOR | ### 🐛 Describe the bug
In torchtitan, we recently turned on torch.compile on the loss function. It runs well until a recent pytorch nightly. As it broke CI, we have to turn it off in https://github.com/pytorch/torchtitan/pull/755. Please help resolve so that we can re-enable it.
### Error logs
There are various errors when running in different environments, CI vs. local, H100 vs. A100.
Here's the CI failure:
https://github.com/pytorch/torchtitan/actions/runs/12403557255/job/34627247992
### Versions
CI failure starts from Dec 12th or 13th pytorch nightly.
cc @soulitzer @chauhang @penguinwu | true |
2,756,744,467 | "Unknown builtin op" error during jit.load() of TorchScript module with @custom_op | borisfom | closed | [
"oncall: jit",
"triaged",
"module: custom-operators",
"oncall: pt2",
"module: pt2-dispatcher"
] | 23 | CONTRIBUTOR | ### 🐛 Describe the bug
Here is a simple repro:
1. Run the file below to produce "custom_module.pt"
2. Run: python -c 'import torch; torch.jit.load("custom_module.pt")'
```
import torch
@torch.library.custom_op("mylib::custom_add", mutates_args=())
def custom_add(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
def custom_add_direct(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
foo_lib = torch.library.Library("foo", "FRAGMENT")
def direct_register_custom_op(
op_name,
op_func,
mutates_args
):
schema_str = torch.library.infer_schema(op_func, mutates_args=mutates_args)
foo_lib.define(op_name + schema_str)
foo_lib.impl(op_name, op_func, "CUDA")
direct_register_custom_op("foo::custom_add", custom_add_direct, mutates_args=())
# Create a module that uses the custom operator
class CustomModule(torch.nn.Module):
def forward(self, x, y):
# Same result with decorator and direct registration, when jit.loaded standalone:
# Unknown builtin op: foo::custom_add.
return torch.ops.mylib.custom_add(x, y)
# return torch.ops.foo.custom_add(x, y)
# Create an instance and save it
module = CustomModule()
example_input1 = torch.randn(3, 4).cuda()
example_input2 = torch.randn(3, 4).cuda()
traced_module = torch.jit.trace(module, (example_input1, example_input2))
traced_module.save("custom_module.pt")
# This works here, but fails standalone, in both Python and C++:
traced_module = torch.jit.load("custom_module.pt")
out = traced_module(example_input1, example_input2)
print(out)
### Versions
Pytorch nightly
cc @EikanWang @jgong5 @wenzhe-nrv @sanchitintel @chauhang @penguinwu @zou3519 @bdhirsh @yf225 | true |
2,756,743,693 | cpp_wrapper: minimize pybind11 dependency | benjaminglass1 | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 4 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143909
* #143421
* #143223
* #141371
* __->__ #143772
Only include the parts of `pybind11` that handle GIL management within `cpp_wrapper`. This dramatically improves compilation times by reducing the number of headers we compile. Improvements on my local system are on the order of 2x.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,756,715,187 | [TGIF][Easy] Slightly improve the logging for tgif split pass | faran928 | closed | [
"fb-exported",
"Merged",
"ciflow/trunk",
"release notes: fx",
"fx"
] | 17 | CONTRIBUTOR | Summary:
1. Added more details for some of the assert statements.
2. Moved assert statements to use tgif_assert
Test Plan: all unit tests should pass
Reviewed By: jingsh
Differential Revision: D67608251
cc @ezyang @SherlockNoMad @EikanWang @jgong5 @wenzhe-nrv | true |
2,756,710,810 | [inductor][cpu] Accuracy failure on bmm max_autotune for offset input weights | frost-intel | open | [
"oncall: pt2",
"oncall: cpu inductor"
] | 2 | COLLABORATOR | ### 🐛 Describe the bug
Accuracy error is occurring for BMM max_autotune code when input weights have an offset. Issue is not reproducible on main due to #143102 but after #143141 lands, this issue shows up. Found testing torchbench `sam` model with `--amp`.
Here is a sample test to reproduce (could be added to `test/inductor/test_cpu_select_algorithm.py`):
```python
@patches
@torch.no_grad
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
@dtypes(torch.bfloat16)
def test_bmm_5d(self, dtype):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, w):
return x @ w[2]
counters.clear()
x = torch.randn(400, 196, 196).to(dtype=dtype)
w = torch.randn(3, 400, 196, 80).to(dtype=dtype)
mod = M().to(dtype=dtype).eval()
with verify(dtype) as (atol, rtol):
self.common(mod, (x, w), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
```
The error seems to be related to taking the `as_strided` tensor in `normalize_shapes` in `cpp_gemm_template.py`, but more investigation is needed.
### Versions
Seen in main after cherry-picking from #143141
cc @chauhang @penguinwu | true |
2,756,694,457 | [ROCm] Use `linux.rocm.gpu.2` for 2-GPU and `linux.rocm.gpu.4` for 4-GPU runners | jithunnair-amd | closed | [
"module: rocm",
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"ciflow/periodic",
"ciflow/rocm"
] | 5 | COLLABORATOR | * Will enable us to target `periodic`/distributed CI jobs to 4-GPU runners using a different label `linux.rocm.gpu.4`
* Use 2-GPU runners for `trunk`, `pull` and `slow` (in addition to `inductor-rocm`) as well (although this currently will not change anything, since all our MI2xx runners have both `linux.rocm.gpu` and `linux.rocm.gpu.2` labels... but this will change in the future: see next point)
* Continue to use `linux.rocm.gpu` label for any job that doesn't need more than 1-GPU eg. binary test jobs in `workflows/generated-linux-binary-manywheel-nightly.yml`
cc @jeffdaily @sunway513 @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd | true |
2,756,679,872 | Update tag_regex in filter_test_configs.py for workflows such as `inductor-rocm` | jithunnair-amd | closed | [
"module: rocm",
"open source",
"Merged",
"topic: not user facing",
"test-config/default",
"ciflow/rocm"
] | 3 | COLLABORATOR | This helps to make `continue-through-error`/`keep-going` work as expected on `inductor-rocm` workflow jobs.
Without this, the code here doesn't enter the `if` condition: https://github.com/pytorch/pytorch/blob/6ccb8ed1868984d9d2ea4e48a085508d1027cd9b/.github/scripts/filter_test_configs.py#L577
Tested via [this PR](https://github.com/pytorch/pytorch/pull/140989):
Without this change: https://hud.pytorch.org/pytorch/pytorch/pull/140989?sha=8232e18957f987d99c946efc0cf6da9be9b52067: https://github.com/pytorch/pytorch/actions/runs/12164558045/job/34192442187#step:13:144
With this change: https://hud.pytorch.org/pytorch/pytorch/pull/140989?sha=763179c5e421791ee05c8e2a600379b29a1c8c33: https://github.com/pytorch/pytorch/actions/runs/12261943684/job/34213300153#step:13:145
cc @jeffdaily @sunway513 @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd | true |
2,756,625,241 | Revert "Exclude py 31.3t triton package from PyTorch 3.13t wheel" | atalman | closed | [
"topic: not user facing"
] | 1 | CONTRIBUTOR | Reverts pytorch/pytorch#143244 | true |
2,756,601,513 | [inductor] Fix for extract_target with dots | jansel | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 3 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143766
Fixes #143650
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,756,584,605 | [inductor] Improve error message for assert_size_stride | jansel | closed | [
"Merged",
"ciflow/trunk",
"module: dynamo",
"ciflow/inductor",
"release notes: inductor"
] | 6 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143765
```
>>> torch._C._dynamo.guards.assert_size_stride(torch.randn(10), (10,), (2,))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AssertionError: expected size 10==10, stride 1==2 at dim=0
This error most often comes from an incorrect meta function for a custom op.
See https://pytorch.org/docs/stable/library.html#torch.library.opcheck
>>>
```
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @amjames | true |
2,756,562,153 | [dynamo] Add test for #143697 | jansel | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: dynamo",
"ciflow/inductor"
] | 3 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143764
The issue from #143697 seems to already be fixed.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @amjames | true |
2,756,551,140 | [BE] Only print MKL version on x86 platforms | malfet | closed | [
"Merged",
"ciflow/trunk",
"release notes: python_frontend",
"topic: docs"
] | 4 | CONTRIBUTOR | As it will obviously be missing on ARM/S390, etc
Test plan: run `python3 -c "import torch;print(torch.__config__.parallel_info())"` on both x86 and non-x86 system | true |
2,756,546,032 | [inductor] Make adaptive_max_pool2d error on int64 | jansel | closed | [
"Merged",
"ciflow/trunk",
"module: inductor",
"ciflow/inductor",
"release notes: inductor"
] | 6 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143762
Fixes #143752
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,756,442,904 | [BE]: Properly forward raise pickle exception with from | Skylion007 | closed | [
"open source",
"better-engineering",
"Merged",
"ciflow/trunk",
"release notes: package/deploy",
"topic: not user facing"
] | 3 | COLLABORATOR | Properly raises the pickle exception with from. Provides a more informative stack trace and forwards information about the exception that led to the current exception. | true |
2,756,380,932 | [DTensor] Add strategy for _scaled_mm | lw | closed | [
"oncall: distributed",
"Merged",
"ciflow/trunk",
"release notes: distributed (dtensor)"
] | 13 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143760
This is done by copying the one for a regular mm, and enforcing that the scales have the same sharding scheme as their respective operands. This works because scales are 2-d tensors that must "broadcast" to the operands. This broadcasting is trivial when scales have dimensions of 1 or N, which is the only options we currently support.
Note, however, that after this PR scales will be allowed to have the mesh's world size as a dimension (in certain cases). This works because, when mapped to the local shard, it becomes a dimension of 1, which can be handled by the operator. Note that when using row-wise _scaled_mm for tensor (sequence) parallelism, this situation arises naturally!
Because of these specificities, the test is rather complex, as it specifically tests all these behaviors.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o | true |
2,756,340,120 | compiled autograd tests should use expecttest | zou3519 | open | [
"module: tests",
"triaged",
"enhancement",
"oncall: pt2",
"module: compiled autograd"
] | 0 | CONTRIBUTOR | The current expected_logs mechanism make it difficult to see what is going on. If there's an error then it looks like the following:

The nice thing about expecttest is that it tells me what the expected output lines look like and what the new lines are and what the diff is.
cc @mruberry @ZainRizvi @chauhang @penguinwu @xmfan @yf225 @ezyang @gqchen @pearu @nikitaved @soulitzer @Varal7 | true |
2,756,272,487 | Inconsistent results between F.linear and manual computation | eliahuhorwitz | closed | [
"module: numerical-stability",
"module: nn"
] | 1 | NONE | ### 🐛 Describe the bug
I am observing an inconsistency between the results of F.linear and the manual computation of xW^T+b.
Below is a snipped that reproduces this (I ran it on a CPU, and on float16, float32, and float64):
```python
import torch
from torch import nn
from torch.nn import functional as F
lin_layer = nn.Linear(200, 300)
input = torch.randn(1, 400, 200)
out_lin_layer = lin_layer(input)
out_lin = F.linear(input, lin_layer.weight.data, lin_layer.bias.data)
out_manual = input @ lin_layer.weight.data.t() + lin_layer.bias.data
print(f"torch.allclose(out_lin_layer, out_lin): {torch.allclose(out_lin_layer, out_lin)}") # prints True
print(f"torch.allclose(out_lin_layer, out_manual): {torch.allclose(out_lin_layer, out_manual)}") # prints False
```
When looking at the out_manual tensor, some values have an error of 1e-7 to 1e-9. It's small, but in some cases may be enough to change the results.
This seems to be somehow related to the bias, when running the snipped below the manual calculation works as expected:
```python
import torch
from torch import nn
from torch.nn import functional as F
lin_layer = nn.Linear(200, 300, bias=False)
input = torch.randn(1, 400, 200)
out_lin_layer = lin_layer(input)
out_lin = F.linear(input, lin_layer.weight.data)
out_manual = input @ lin_layer.weight.data.t()
print(f"torch.allclose(out_lin_layer, out_lin): {torch.allclose(out_lin_layer, out_lin)}") # prints True
print(f"torch.allclose(out_lin_layer, out_manual): {torch.allclose(out_lin_layer, out_manual)}") # prints True
```
### Versions
Collecting environment information...
PyTorch version: 2.5.1+cu124
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: CS HUJI Debian GNU/Linux 12 (bookworm) 5785 (x86_64)
GCC version: (Debian 12.2.0-14) 12.2.0
Clang version: 14.0.6
CMake version: version 3.25.1
Libc version: glibc-2.36
Python version: 3.10.15 (main, Oct 3 2024, 07:27:34) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-6.6.20-aufs-1-x86_64-with-glibc2.36
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA RTX A5000
Nvidia driver version: 550.90.07
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 48
On-line CPU(s) list: 0-47
Vendor ID: AuthenticAMD
Model name: AMD EPYC 7443 24-Core Processor
CPU family: 25
Model: 1
Thread(s) per core: 1
Core(s) per socket: 24
Socket(s): 2
Stepping: 1
Frequency boost: enabled
CPU(s) scaling MHz: 65%
CPU max MHz: 4035.6440
CPU min MHz: 1500.0000
BogoMIPS: 5699.59
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd amd_ppin brs arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm debug_swap
Virtualization: AMD-V
L1d cache: 1.5 MiB (48 instances)
L1i cache: 1.5 MiB (48 instances)
L2 cache: 24 MiB (48 instances)
L3 cache: 256 MiB (8 instances)
NUMA node(s): 8
NUMA node0 CPU(s): 0-5
NUMA node1 CPU(s): 6-11
NUMA node2 CPU(s): 12-17
NUMA node3 CPU(s): 18-23
NUMA node4 CPU(s): 24-29
NUMA node5 CPU(s): 30-35
NUMA node6 CPU(s): 36-41
NUMA node7 CPU(s): 42-47
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Vulnerable: Safe RET, no microcode
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP disabled, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==2.2.0
[pip3] nvidia-cublas-cu12==12.4.5.8
[pip3] nvidia-cuda-cupti-cu12==12.4.127
[pip3] nvidia-cuda-nvrtc-cu12==12.4.127
[pip3] nvidia-cuda-runtime-cu12==12.4.127
[pip3] nvidia-cudnn-cu12==9.1.0.70
[pip3] nvidia-cufft-cu12==11.2.1.3
[pip3] nvidia-curand-cu12==10.3.5.147
[pip3] nvidia-cusolver-cu12==11.6.1.9
[pip3] nvidia-cusparse-cu12==12.3.1.170
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.4.127
[pip3] nvidia-nvtx-cu12==12.4.127
[pip3] torch==2.5.1
[pip3] torchaudio==2.5.1
[pip3] torchvision==0.20.1
[pip3] triton==3.1.0
[conda] numpy 2.2.0 pypi_0 pypi
[conda] nvidia-cublas-cu12 12.4.5.8 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.1.0.70 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.2.1.3 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.5.147 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.6.1.9 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.3.1.170 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.4.127 pypi_0 pypi
[conda] torch 2.5.1 pypi_0 pypi
[conda] torchaudio 2.5.1 pypi_0 pypi
[conda] torchvision 0.20.1 pypi_0 pypi
[conda] triton 3.1.0 pypi_0 pypi
cc @albanD @mruberry @jbschlosser @walterddr @mikaylagawarecki | true |
2,756,145,240 | [don't merge] disable xpu env installation. | xuhancn | closed | [
"open source",
"ciflow/binaries",
"topic: not user facing"
] | 1 | COLLABORATOR | Fixes #ISSUE_NUMBER
| true |
2,756,139,055 | `self.__dict__[...] = ...` produces a graph break | akihironitta | closed | [
"triaged",
"oncall: pt2",
"module: dynamo",
"dynamo-triage-jan2025"
] | 0 | CONTRIBUTOR | ### 🐛 Describe the bug
In https://github.com/pyg-team/pytorch_geometric/issues/9879, the issue author tries to create a `torch_geometric.data.Data` ([docs](https://pytorch-geometric.readthedocs.io/en/stable/generated/torch_geometric.data.Data.html)) in a region, however, it leads to a graph break on nightly.
Here's a minimal repro:
```python
import torch
class Something:
def __init__(self) -> None:
self.__dict__["something"] = 'whatever'
class MyModule(torch.nn.Module):
def forward(self, x) -> torch.Tensor:
Something()
return x
mod = torch.compile(MyModule())
mod(torch.randn(1))
```
### Error logs
```console
$ TORCH_LOGS=graph_breaks python test_export2.py
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] Graph break in user code at /home/aki/work/github.com/pyg-team/pytorch_geometric/test_export2.py:6
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] Reason: Unsupported: call_method GetAttrVariable(UserDefinedObjectVariable(Something), __dict__) __setitem__ [ConstantVariable(str: 'something'), ConstantVariable(str: 'whatever')] {}
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] User code traceback:
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] File "/home/aki/work/github.com/pyg-team/pytorch_geometric/test_export2.py", line 11, in forward
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] Something()
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] File "/home/aki/work/github.com/pyg-team/pytorch_geometric/test_export2.py", line 6, in __init__
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks] self.__dict__["something"] = 'whatever'
V1223 13:57:44.041000 1500643 site-packages/torch/_dynamo/symbolic_convert.py:450] [0/0] [__graph_breaks]
```
### Versions
```console
$ curl -OL https://raw.githubusercontent.com/pytorch/pytorch/main/torch/utils/collect_env.py
$ # For security purposes, please check the contents of collect_env.py before running it.
$ console python3 collect_env.py
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 24353 100 24353 0 0 139k 0 --:--:-- --:--:-- --:--:-- 139k
Collecting environment information...
PyTorch version: 2.6.0.dev20241221+cpu
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: Could not collect
CMake version: version 3.28.3
Libc version: glibc-2.31
Python version: 3.10.16 (main, Dec 11 2024, 16:24:50) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.15.0-1055-aws-x86_64-with-glibc2.31
Is CUDA available: False
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: GPU 0: Tesla T4
Nvidia driver version: 545.23.08
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 46 bits physical, 48 bits virtual
CPU(s): 16
On-line CPU(s) list: 0-15
Thread(s) per core: 2
Core(s) per socket: 8
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz
Stepping: 7
CPU MHz: 2499.998
BogoMIPS: 4999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 256 KiB
L1i cache: 256 KiB
L2 cache: 8 MiB
L3 cache: 35.8 MiB
NUMA node0 CPU(s): 0-15
Vulnerability Gather data sampling: Unknown: Dependent on hypervisor status
Vulnerability Itlb multihit: KVM: Mitigation: VMX unsupported
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Retbleed: Vulnerable
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Vulnerable
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, STIBP disabled, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke avx512_vnni
Versions of relevant libraries:
[pip3] executorch==0.5.0.dev20241206+cpu
[pip3] numpy==1.21.3
[pip3] torch==2.6.0.dev20241221+cpu
[pip3] torch-geometric==2.7.0
[conda] executorch 0.5.0.dev20241206+cpu pypi_0 pypi
[conda] numpy 1.21.3 pypi_0 pypi
[conda] torch 2.6.0.dev20241221+cpu pypi_0 pypi
[conda] torch-geometric 2.7.0 pypi_0 pypi
```
cc @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @amjames | true |
2,756,112,478 | [CompiledAutograd] No implicit dtype cast as expected | mieshkiwrk | closed | [
"triaged",
"oncall: pt2",
"module: compiled autograd"
] | 2 | NONE | ### 🐛 Describe the bug
Unfortunately I don't have simple reproducer for now, trying to get one but without success so far (also pytorch minifier ends up with error).
Problem observed is that given model when run with eager performs implicit dtype cast from fp32 to bf16
```
THPEngine_run_backward
-> PythonEngine::execute
-> Engine::execute
-> execute_with_graph_task
-> thread_main
-> evaluate_function
-> call_function
-> [validate_outputs -> validate_outputs_impl:940](https://github.com/pytorch/pytorch/blob/main/torch/csrc/autograd/engine.cpp#L940)
if (c10::typeMetaToScalarType(metadata.options().dtype()) !=
grad.scalar_type()) {
grad = grad.to(c10::typeMetaToScalarType(metadata.options().dtype()));
}
```
When enabled Compiled Autograd it goes different path
```
THPEngine_run_backward
-> PythonEngine::execute
-> Engine::execute
-> return (*compiled_autograd)(graph_root, *graph_task, accumulate_grad, outputs);
-> compiled_autograd
-> _compiled_autograd_impl
-> [validate_outputs -> validate_outputs_impl:729](https://github.com/pytorch/pytorch/blob/main/torch/csrc/dynamo/python_compiled_autograd.cpp#L729)
```
Leaving it without cast leading to dtype mismatch fp32 vs bf16 when both bf16 are expected looking at compiled autograd graph.
I'll be trying to get simple reproducer and update when one is available.
### Versions
PT 2.5.1
cc @chauhang @penguinwu @xmfan @yf225 | true |
2,756,071,215 | nn.LayerNorm is slower than naive implementation when dimension is low | qwertyforce | open | [
"module: performance",
"module: nn",
"triaged",
"module: norms and normalization"
] | 0 | NONE | ### 🐛 Describe the bug
```python
import torch
import torch.nn as nn
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
class ElementwiseLayerNorm(nn.Module):
def __init__(self, dim, eps=1e-5, elementwise_affine=True):
super(ElementwiseLayerNorm, self).__init__()
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(dim))
self.bias = nn.Parameter(torch.zeros(dim))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
var = x.var(dim=-1, keepdim=True, unbiased=False)
x_normalized = (x - mean) / torch.sqrt(var + self.eps)
if self.elementwise_affine:
return x_normalized * self.weight + self.bias
else:
return x_normalized
embedding_sizes = [8, 16, 32, 64, 128,256]
seq_lens = [8, 16, 32, 64, 128,256]
results = {}
for el in embedding_sizes:
results[el] = {}
for el2 in seq_lens:
results[el][el2] = {"builtin": [], "custom": [], "builtin_compiled": [], "custom_compiled": []}
# Benchmark parameters
n_trials = 100
n_runs = 100
warmup_trials = 20
batch_size = 1024
with torch.no_grad():
for i in tqdm(range(n_runs)):
for seq_len in seq_lens:
for embedding_size in embedding_sizes:
# Input tensor
x = torch.randn(batch_size, seq_len, embedding_size).cuda()
# Built-in LayerNorm
layernorm_builtin = nn.LayerNorm(embedding_size).cuda()
layernorm_builtin_compiled = torch.compile(layernorm_builtin)
layer_norm_custom = ElementwiseLayerNorm(embedding_size).cuda()
layernorm_custom_compiled = torch.compile(layer_norm_custom)
# Benchmark built-in LayerNorm
torch.cuda.synchronize()
start_time = time.time()
for _ in range(n_trials):
_ = layernorm_builtin(x)
torch.cuda.synchronize()
if i>warmup_trials:
results[embedding_size][seq_len]["builtin"].append((time.time() - start_time) / n_trials)
torch.cuda.synchronize()
start_time = time.time()
for _ in range(n_trials):
_ = layer_norm_custom(x)
torch.cuda.synchronize()
if i>warmup_trials:
results[embedding_size][seq_len]["custom"].append((time.time() - start_time) / n_trials)
torch.cuda.synchronize()
start_time = time.time()
for _ in range(n_trials):
_ = layernorm_builtin_compiled(x)
torch.cuda.synchronize()
if i>warmup_trials:
results[embedding_size][seq_len]["builtin_compiled"].append((time.time() - start_time) / n_trials)
torch.cuda.synchronize()
start_time = time.time()
for _ in range(n_trials):
_ = layernorm_custom_compiled(x)
torch.cuda.synchronize()
if i>warmup_trials:
results[embedding_size][seq_len]["custom_compiled"].append((time.time() - start_time) / n_trials)
```
```
from statistics import mean
for dim in results:
for seq in results[dim]:
for impl in results[dim][seq]:
print(f"dim={dim}",f"seq_len={seq}",impl, mean(results[dim][seq][impl]))
print(f"dim={dim}",f"seq_len={seq}","builtin/custom",mean(results[dim][seq]["builtin"])/mean(results[dim][seq]["custom"]))
print(f"dim={dim}",f"seq_len={seq}","builtin_compiled/custom_compiled",mean(results[dim][seq]["builtin_compiled"])/mean(results[dim][seq]["custom_compiled"]))
print()
```
output
```
dim=8 seq_len=8 builtin 9.678224974040744e-05
dim=8 seq_len=8 custom 0.000110510874398147
dim=8 seq_len=8 builtin_compiled 8.088670199430441e-05
dim=8 seq_len=8 custom_compiled 0.00012810453583922567
dim=8 seq_len=8 builtin/custom 0.8757712783243551
dim=8 seq_len=8 builtin_compiled/custom_compiled 0.631411694085674
dim=8 seq_len=16 builtin 0.00016347541084772423
dim=8 seq_len=16 custom 0.00010370945628685287
dim=8 seq_len=16 builtin_compiled 0.00014441260808630834
dim=8 seq_len=16 custom_compiled 0.00012443533426598658
dim=8 seq_len=16 builtin/custom 1.5762825946706638
dim=8 seq_len=16 builtin_compiled/custom_compiled 1.1605434174960254
dim=8 seq_len=32 builtin 0.00032524501221089424
dim=8 seq_len=32 custom 0.00010442410843281806
dim=8 seq_len=32 builtin_compiled 0.0002761233003833626
dim=8 seq_len=32 custom_compiled 0.00012560237812090524
dim=8 seq_len=32 builtin/custom 3.1146544326987744
dim=8 seq_len=32 builtin_compiled/custom_compiled 2.1983922957061006
dim=8 seq_len=64 builtin 0.0006188744834706753
dim=8 seq_len=64 custom 0.00012607903420170651
dim=8 seq_len=64 builtin_compiled 0.0005796107762976538
dim=8 seq_len=64 custom_compiled 0.0001332149928129172
dim=8 seq_len=64 builtin/custom 4.90862328847296
dim=8 seq_len=64 builtin_compiled/custom_compiled 4.350942518246728
dim=8 seq_len=128 builtin 0.0012050605423842805
dim=8 seq_len=128 custom 0.0002844446218466457
dim=8 seq_len=128 builtin_compiled 0.0011724761467945726
dim=8 seq_len=128 custom_compiled 0.0002772958369194707
dim=8 seq_len=128 builtin/custom 4.236538327077149
dim=8 seq_len=128 builtin_compiled/custom_compiled 4.228250087775644
dim=8 seq_len=256 builtin 0.002282433721083629
dim=8 seq_len=256 custom 0.0005463570280920101
dim=8 seq_len=256 builtin_compiled 0.002378028012529204
dim=8 seq_len=256 custom_compiled 0.0005396363101428069
dim=8 seq_len=256 builtin/custom 4.1775498506065745
dim=8 seq_len=256 builtin_compiled/custom_compiled 4.406723505873602
dim=16 seq_len=8 builtin 6.951238535627534e-05
dim=16 seq_len=8 custom 0.00010947852195063724
dim=16 seq_len=8 builtin_compiled 6.628769862500927e-05
dim=16 seq_len=8 custom_compiled 0.00012823382510414606
dim=16 seq_len=8 builtin/custom 0.6349408460923301
dim=16 seq_len=8 builtin_compiled/custom_compiled 0.5169283421996749
dim=16 seq_len=16 builtin 0.00013065977941585492
dim=16 seq_len=16 custom 0.00010479531710660911
dim=16 seq_len=16 builtin_compiled 0.00012786113763157327
dim=16 seq_len=16 custom_compiled 0.00012438583977614776
dim=16 seq_len=16 builtin/custom 1.2468093329297691
dim=16 seq_len=16 builtin_compiled/custom_compiled 1.0279396582575626
dim=16 seq_len=32 builtin 0.0002697024767911887
dim=16 seq_len=32 custom 0.00011774214008186437
dim=16 seq_len=32 builtin_compiled 0.00028936654706544513
dim=16 seq_len=32 custom_compiled 0.00012548968761782103
dim=16 seq_len=32 builtin/custom 2.2906197951189653
dim=16 seq_len=32 builtin_compiled/custom_compiled 2.305899014958992
dim=16 seq_len=64 builtin 0.0005626950384695319
dim=16 seq_len=64 custom 0.0002793135522287103
dim=16 seq_len=64 builtin_compiled 0.0006131733520121514
dim=16 seq_len=64 custom_compiled 0.00028033552290518074
dim=16 seq_len=64 builtin/custom 2.0145640409484336
dim=16 seq_len=64 builtin_compiled/custom_compiled 2.1872838149717757
dim=16 seq_len=128 builtin 0.0011427697652502905
dim=16 seq_len=128 custom 0.0005438440057295787
dim=16 seq_len=128 builtin_compiled 0.0012262067915518073
dim=16 seq_len=128 custom_compiled 0.0005439215973962712
dim=16 seq_len=128 builtin/custom 2.101282266993528
dim=16 seq_len=128 builtin_compiled/custom_compiled 2.254381509066022
dim=16 seq_len=256 builtin 0.0022997066642664655
dim=16 seq_len=256 custom 0.0011024737961684602
dim=16 seq_len=256 builtin_compiled 0.0023997911018661306
dim=16 seq_len=256 custom_compiled 0.0011086066765121267
dim=16 seq_len=256 builtin/custom 2.0859513144519815
dim=16 seq_len=256 builtin_compiled/custom_compiled 2.164691186432594
dim=32 seq_len=8 builtin 6.437950496432147e-05
dim=32 seq_len=8 custom 0.00010460446152505995
dim=32 seq_len=8 builtin_compiled 6.390468983710567e-05
dim=32 seq_len=8 custom_compiled 0.00012611235244364678
dim=32 seq_len=8 builtin/custom 0.6154565878521172
dim=32 seq_len=8 builtin_compiled/custom_compiled 0.5067282355680538
dim=32 seq_len=16 builtin 0.00013311337821091278
dim=32 seq_len=16 custom 0.00011503971075709862
dim=32 seq_len=16 builtin_compiled 0.00014025141921224473
dim=32 seq_len=16 custom_compiled 0.00012984505182580102
dim=32 seq_len=16 builtin/custom 1.1571080745498041
dim=32 seq_len=16 builtin_compiled/custom_compiled 1.0801445048549467
dim=32 seq_len=32 builtin 0.00029121899906593034
dim=32 seq_len=32 custom 0.0002813394763801671
dim=32 seq_len=32 builtin_compiled 0.0003161558320250692
dim=32 seq_len=32 custom_compiled 0.0002892188180851031
dim=32 seq_len=32 builtin/custom 1.0351160200227758
dim=32 seq_len=32 builtin_compiled/custom_compiled 1.0931371413461757
dim=32 seq_len=64 builtin 0.0005759487574613547
dim=32 seq_len=64 custom 0.000560284656814382
dim=32 seq_len=64 builtin_compiled 0.000689772955978973
dim=32 seq_len=64 custom_compiled 0.0005597002898590475
dim=32 seq_len=64 builtin/custom 1.0279573971131641
dim=32 seq_len=64 builtin_compiled/custom_compiled 1.2323969961721521
dim=32 seq_len=128 builtin 0.0011702884601641306
dim=32 seq_len=128 custom 0.0011110351659074614
dim=32 seq_len=128 builtin_compiled 0.0012642960005168674
dim=32 seq_len=128 custom_compiled 0.0011257228066649618
dim=32 seq_len=128 builtin/custom 1.0533316100829921
dim=32 seq_len=128 builtin_compiled/custom_compiled 1.1230970830753968
dim=32 seq_len=256 builtin 0.0023688038693198674
dim=32 seq_len=256 custom 0.0022182996665375144
dim=32 seq_len=256 builtin_compiled 0.0024688906307461897
dim=32 seq_len=256 custom_compiled 0.002304515567006944
dim=32 seq_len=256 builtin/custom 1.0678466507716116
dim=32 seq_len=256 builtin_compiled/custom_compiled 1.0713273826797067
dim=64 seq_len=8 builtin 6.682106211215635e-05
dim=64 seq_len=8 custom 0.00010525736627699453
dim=64 seq_len=8 builtin_compiled 6.782447235493721e-05
dim=64 seq_len=8 custom_compiled 0.00013141333302365074
dim=64 seq_len=8 builtin/custom 0.6348350189221961
dim=64 seq_len=8 builtin_compiled/custom_compiled 0.5161156086249687
dim=64 seq_len=16 builtin 0.00015420886534678785
dim=64 seq_len=16 custom 0.00023138170000873036
dim=64 seq_len=16 builtin_compiled 0.0001589152480982527
dim=64 seq_len=16 custom_compiled 0.00023725310458412653
dim=64 seq_len=16 builtin/custom 0.6664695839859822
dim=64 seq_len=16 builtin_compiled/custom_compiled 0.6698131448134692
dim=64 seq_len=32 builtin 0.00029486447949952715
dim=64 seq_len=32 custom 0.0004519757138022894
dim=64 seq_len=32 builtin_compiled 0.0003595852851867676
dim=64 seq_len=32 custom_compiled 0.00045755332029318507
dim=64 seq_len=32 builtin/custom 0.652390096403524
dim=64 seq_len=32 builtin_compiled/custom_compiled 0.7858871725733674
dim=64 seq_len=64 builtin 0.0005870274048817308
dim=64 seq_len=64 custom 0.0008933664273612107
dim=64 seq_len=64 builtin_compiled 0.0006842062443117552
dim=64 seq_len=64 custom_compiled 0.0008906397034850301
dim=64 seq_len=64 builtin/custom 0.6570958868643278
dim=64 seq_len=64 builtin_compiled/custom_compiled 0.7682188898995735
dim=64 seq_len=128 builtin 0.0012294705306427388
dim=64 seq_len=128 custom 0.0017652433733396891
dim=64 seq_len=128 builtin_compiled 0.0013268066056167022
dim=64 seq_len=128 custom_compiled 0.0017648941655702228
dim=64 seq_len=128 builtin/custom 0.6964878323359379
dim=64 seq_len=128 builtin_compiled/custom_compiled 0.7517768665680993
dim=64 seq_len=256 builtin 0.00250308896921858
dim=64 seq_len=256 custom 0.0035207106795492053
dim=64 seq_len=256 builtin_compiled 0.0026342577270314664
dim=64 seq_len=256 custom_compiled 0.0036150625687611255
dim=64 seq_len=256 builtin/custom 0.7109612794252885
dim=64 seq_len=256 builtin_compiled/custom_compiled 0.7286893869541575
dim=128 seq_len=8 builtin 7.666261890266515e-05
dim=128 seq_len=8 custom 0.00021290081965772412
dim=128 seq_len=8 builtin_compiled 7.994509950468812e-05
dim=128 seq_len=8 custom_compiled 0.00021764854841594455
dim=128 seq_len=8 builtin/custom 0.3600860674276122
dim=128 seq_len=8 builtin_compiled/custom_compiled 0.3673128081327993
dim=128 seq_len=16 builtin 0.0001518993438044681
dim=128 seq_len=16 custom 0.00040543532069725326
dim=128 seq_len=16 builtin_compiled 0.00017205464689037467
dim=128 seq_len=16 custom_compiled 0.00041323366044442863
dim=128 seq_len=16 builtin/custom 0.3746574016867524
dim=128 seq_len=16 builtin_compiled/custom_compiled 0.41636164562521755
dim=128 seq_len=32 builtin 0.0002870179429838929
dim=128 seq_len=32 custom 0.0007859975778603856
dim=128 seq_len=32 builtin_compiled 0.0003618722324129901
dim=128 seq_len=32 custom_compiled 0.0007874307753164557
dim=128 seq_len=32 builtin/custom 0.36516390262321524
dim=128 seq_len=32 builtin_compiled/custom_compiled 0.45956069251618914
dim=128 seq_len=64 builtin 0.0006045847603037387
dim=128 seq_len=64 custom 0.0015403484996361068
dim=128 seq_len=64 builtin_compiled 0.0007178449328941635
dim=128 seq_len=64 custom_compiled 0.0015334002881110468
dim=128 seq_len=64 builtin/custom 0.3924986848408437
dim=128 seq_len=64 builtin_compiled/custom_compiled 0.4681392969988657
dim=128 seq_len=128 builtin 0.0013390024100677876
dim=128 seq_len=128 custom 0.003025425234927407
dim=128 seq_len=128 builtin_compiled 0.001430505529234681
dim=128 seq_len=128 custom_compiled 0.003132360253152968
dim=128 seq_len=128 builtin/custom 0.4425832093318664
dim=128 seq_len=128 builtin_compiled/custom_compiled 0.45668614515037476
dim=128 seq_len=256 builtin 0.0027464558504804782
dim=128 seq_len=256 custom 0.006132948941822294
dim=128 seq_len=256 builtin_compiled 0.003066002900087381
dim=128 seq_len=256 custom_compiled 0.00601917876472956
dim=128 seq_len=256 builtin/custom 0.44781978075043605
dim=128 seq_len=256 builtin_compiled/custom_compiled 0.5093722947810033
dim=256 seq_len=8 builtin 9.337358836886249e-05
dim=256 seq_len=8 custom 0.00039830766146696067
dim=256 seq_len=8 builtin_compiled 9.896996655041659e-05
dim=256 seq_len=8 custom_compiled 0.0004038087023964411
dim=256 seq_len=8 builtin/custom 0.2344257904178119
dim=256 seq_len=8 builtin_compiled/custom_compiled 0.24509121760643077
dim=256 seq_len=16 builtin 0.00017236368565619746
dim=256 seq_len=16 custom 0.0007606318630749666
dim=256 seq_len=16 builtin_compiled 0.00021217062503476686
dim=256 seq_len=16 custom_compiled 0.0007664575154268289
dim=256 seq_len=16 builtin/custom 0.2266059233429846
dim=256 seq_len=16 builtin_compiled/custom_compiled 0.2768198116194505
dim=256 seq_len=32 builtin 0.0003308805634703817
dim=256 seq_len=32 custom 0.00148523535909532
dim=256 seq_len=32 builtin_compiled 0.0004414746127551115
dim=256 seq_len=32 custom_compiled 0.001480541108529779
dim=256 seq_len=32 builtin/custom 0.2227798856552447
dim=256 seq_len=32 builtin_compiled/custom_compiled 0.29818463682748314
dim=256 seq_len=64 builtin 0.0008268203916428964
dim=256 seq_len=64 custom 0.0029062589512595647
dim=256 seq_len=64 builtin_compiled 0.0009188679803775836
dim=256 seq_len=64 custom_compiled 0.002903341281263134
dim=256 seq_len=64 builtin/custom 0.2844964628098796
dim=256 seq_len=64 builtin_compiled/custom_compiled 0.31648638288152564
dim=256 seq_len=128 builtin 0.0018138287037233764
dim=256 seq_len=128 custom 0.00574048962774156
dim=256 seq_len=128 builtin_compiled 0.002191464750072624
dim=256 seq_len=128 custom_compiled 0.005744126416459868
dim=256 seq_len=128 builtin/custom 0.31597107935843066
dim=256 seq_len=128 builtin_compiled/custom_compiled 0.38151401817915315
dim=256 seq_len=256 builtin 0.0036927097658567792
dim=256 seq_len=256 custom 0.01159543124935295
dim=256 seq_len=256 builtin_compiled 0.003931530155713045
dim=256 seq_len=256 custom_compiled 0.011449624496170237
dim=256 seq_len=256 builtin/custom 0.31846247771619884
dim=256 seq_len=256 builtin_compiled/custom_compiled 0.34337634016103274
```
if builtin/custom is > 1, it means that nn.LayerNorm is slower than custom implementation
### Versions
Collecting environment information...
PyTorch version: 2.5.1+cu121
Is debug build: False
CUDA used to build PyTorch: 12.1
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.3 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: 14.0.0-1ubuntu1.1
CMake version: version 3.31.2
Libc version: glibc-2.35
Python version: 3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0] (64-bit runtime)
Python platform: Linux-6.1.85+-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: 12.2.140
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: Tesla T4
Nvidia driver version: 535.104.05
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.6
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.6
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 2
On-line CPU(s) list: 0,1
Vendor ID: GenuineIntel
Model name: Intel(R) Xeon(R) CPU @ 2.00GHz
CPU family: 6
Model: 85
Thread(s) per core: 2
Core(s) per socket: 1
Socket(s): 1
Stepping: 3
BogoMIPS: 4000.35
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32 KiB (1 instance)
L1i cache: 32 KiB (1 instance)
L2 cache: 1 MiB (1 instance)
L3 cache: 38.5 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0,1
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable; SMT Host state unknown
Vulnerability Meltdown: Vulnerable
Vulnerability Mmio stale data: Vulnerable
Vulnerability Reg file data sampling: Not affected
Vulnerability Retbleed: Vulnerable
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Vulnerable
Vulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers
Vulnerability Spectre v2: Vulnerable; IBPB: disabled; STIBP: disabled; PBRSB-eIBRS: Not affected; BHI: Vulnerable (Syscall hardening enabled)
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Vulnerable
Versions of relevant libraries:
[pip3] numpy==1.26.4
[pip3] nvidia-cublas-cu12==12.6.4.1
[pip3] nvidia-cuda-cupti-cu12==12.6.80
[pip3] nvidia-cuda-runtime-cu12==12.6.77
[pip3] nvidia-cudnn-cu12==9.6.0.74
[pip3] nvidia-cufft-cu12==11.3.0.4
[pip3] nvidia-curand-cu12==10.3.7.77
[pip3] nvidia-cusolver-cu12==11.7.1.2
[pip3] nvidia-cusparse-cu12==12.5.4.2
[pip3] nvidia-nccl-cu12==2.23.4
[pip3] nvidia-nvjitlink-cu12==12.6.85
[pip3] nvtx==0.2.10
[pip3] optree==0.13.1
[pip3] pynvjitlink-cu12==0.4.0
[pip3] torch==2.5.1+cu121
[pip3] torchaudio==2.5.1+cu121
[pip3] torchsummary==1.5.1
[pip3] torchvision==0.20.1+cu121
[pip3] triton==3.1.0
[conda] Could not collect
cc @msaroufim @albanD @mruberry @jbschlosser @walterddr @mikaylagawarecki | true |
2,756,025,579 | [BE][CI] bump `ruff` to 0.8.4 | XuehaiPan | closed | [
"oncall: distributed",
"module: cpu",
"module: lint",
"module: mkldnn",
"open source",
"Merged",
"ciflow/trunk",
"release notes: distributed (ddp)",
"topic: not user facing",
"fx",
"module: inductor",
"module: dynamo",
"ciflow/inductor",
"ciflow/linux-aarch64"
] | 9 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143753
Changes:
1. Bump `ruff` from 0.7.4 to 0.8.4
2. Change `%`-formatted strings to f-string
3. Change arguments with the `__`-prefix to positional-only arguments with the `/` separator in function signature.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 @gujinghui @PenghuiCheng @jianyuh @min-jean-cho @yanbing-j @Guobing-Chen @Xia-Weiwen @snadampal @ezyang @SherlockNoMad @EikanWang @wenzhe-nrv @voznesenskym @penguinwu @zhuhaozhe @blzheng @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov @LucasLLC @MeetVadakkanchery @mhorowitz @pradeepfn | true |
2,756,012,663 | [eager] [inductor] `AdaptiveMaxPool1d` (`AdaptiveMaxPool2d`) behave differently on eager and inductor when meeting internal int64 dtypes | shaoyuyoung | closed | [
"triaged",
"oncall: pt2",
"module: inductor"
] | 2 | CONTRIBUTOR | ### 🐛 Describe the bug
I think it's a problem with eager's internal processing mechanism. inductor works well for **implicit type conversion**, but unfortunately, eager will raise an error (although the external input looks fine because I used fp32 as the external input).
However, to be honest, I am not sure what happened after using `AdaptiveMaxPool3d`. Both inductor and eager fail!
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
from torch._inductor import config
config.fallback_random = True
class Model(torch.nn.Module):
def __init__(self, pool_operator):
super(Model, self).__init__()
self.pool = pool_operator
def forward(self, x):
x = torch.argmax(x, dim=1)
# when touching here, x.dtype=torch.int64
x = self.pool(x)
return x
def run_test(dim, device, backend):
op_inst = eval(f"nn.AdaptiveMaxPool{dim}d(5)")
model = Model(op_inst).to(device)
x = torch.randn([1] * (dim + 2)).to(device)
if backend == "inductor":
model = torch.compile(model)
try:
y = model(x)
print(f"succeed on {device} with {backend}: {y.dtype}")
except Exception as e:
print(f"fail on {device} with {backend}: {e}")
run_test(1, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
run_test(1, "cpu", "inductor") # succeed on cpu with inductor: torch.int64
run_test(1, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
run_test(1, "cuda", "inductor") # fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
run_test(2, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
run_test(2, "cpu", "inductor") # succeed on cpu with inductor: torch.int64
run_test(2, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
run_test(2, "cuda", "inductor") # # fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
run_test(3, "cpu", "eager") # fail on cpu with eager: "adaptive_max_pool3d_cpu" not implemented for 'Long'
run_test(3, "cpu", "inductor") # fail on cpu with inductor: "adaptive_max_pool3d_cpu" not implemented for 'Long'
run_test(3, "cuda", "eager") # fail on cuda with eager: "adaptive_max_pool3d_cuda" not implemented for 'Long'
run_test(3, "cuda", "inductor") # fail on cuda with inductor: "adaptive_max_pool3d_cuda" not implemented for 'Long'
```
### Error logs
```
fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
succeed on cpu with inductor: torch.int64
fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
fail on cpu with eager: "adaptive_max_pool2d" not implemented for 'Long'
succeed on cpu with inductor: torch.int64
fail on cuda with eager: "adaptive_max_pool2d_cuda" not implemented for 'Long'
fail on cuda with inductor: backend='inductor' raised: SubprocException: An exception occurred in a subprocess:
fail on cpu with eager: "adaptive_max_pool3d_cpu" not implemented for 'Long'
fail on cpu with inductor: "adaptive_max_pool3d_cpu" not implemented for 'Long'
fail on cuda with eager: "adaptive_max_pool3d_cuda" not implemented for 'Long'
fail on cuda with inductor: "adaptive_max_pool3d_cuda" not implemented for 'Long'
```
### Versions
PyTorch version: 2.6.0.dev20241218+cu126
OS: Ubuntu 20.04.6 LTS (x86_64)
CPU: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
GPU: V100
<details>
<summary>click for detailed env</summary>
```
PyTorch version: 2.6.0.dev20241218+cu126
Is debug build: False
CUDA used to build PyTorch: 12.6
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: 16.0.1
CMake version: version 3.26.0
Libc version: glibc-2.31
Python version: 3.12.7 | packaged by Anaconda, Inc. | (main, Oct 4 2024, 13:27:36) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-202-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 12.6.68
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: Tesla V100-SXM2-32GB
GPU 1: Tesla V100-SXM2-32GB
GPU 2: Tesla V100-SXM2-32GB
GPU 3: Tesla V100-SXM2-32GB
Nvidia driver version: 560.35.03
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_precompiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_runtime_compiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_graph.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_heuristic.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops.so.9.6.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 40 bits physical, 48 bits virtual
CPU(s): 20
On-line CPU(s) list: 0-19
Thread(s) per core: 1
Core(s) per socket: 20
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
Stepping: 7
CPU MHz: 2499.996
BogoMIPS: 4999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 640 KiB
L1i cache: 640 KiB
L2 cache: 80 MiB
L3 cache: 16 MiB
NUMA node0 CPU(s): 0-19
Vulnerability Gather data sampling: Unknown: Dependent on hypervisor status
Vulnerability Itlb multihit: KVM: Vulnerable
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Retbleed: Mitigation; IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; IBRS; IBPB conditional; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI SW loop, KVM SW loop
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch topoext cpuid_fault invpcid_single pti ssbd ibrs ibpb fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat umip pku ospke avx512_vnni
Versions of relevant libraries:
[pip3] numpy==1.26.4
[pip3] nvidia-cublas-cu12==12.6.4.1
[pip3] nvidia-cuda-cupti-cu12==12.6.80
[pip3] nvidia-cuda-nvrtc-cu12==12.6.77
[pip3] nvidia-cuda-runtime-cu12==12.6.77
[pip3] nvidia-cudnn-cu12==9.5.1.17
[pip3] nvidia-cufft-cu12==11.3.0.4
[pip3] nvidia-curand-cu12==10.3.7.77
[pip3] nvidia-cusolver-cu12==11.7.1.2
[pip3] nvidia-cusparse-cu12==12.5.4.2
[pip3] nvidia-cusparselt-cu12==0.6.3
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.6.85
[pip3] nvidia-nvtx-cu12==12.6.77
[pip3] onnx==1.17.0
[pip3] onnxruntime==1.20.1
[pip3] onnxscript==0.1.0.dev20241205
[pip3] optree==0.13.1
[pip3] pytorch-triton==3.2.0+gitf9cdf582
[pip3] torch==2.6.0.dev20241218+cu126
[pip3] torchaudio==2.6.0.dev20241218+cu126
[pip3] torchvision==0.22.0.dev20241218+cu126
[pip3] triton==3.0.0
[conda] numpy 1.26.4 pypi_0 pypi
[conda] nvidia-cublas-cu12 12.6.4.1 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.6.80 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.5.1.17 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.3.0.4 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.7.77 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.7.1.2 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.5.4.2 pypi_0 pypi
[conda] nvidia-cusparselt-cu12 0.6.3 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.6.85 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.6.77 pypi_0 pypi
[conda] optree 0.13.1 pypi_0 pypi
[conda] pytorch-triton 3.2.0+gitf9cdf582 pypi_0 pypi
[conda] torch 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchaudio 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchvision 0.22.0.dev20241218+cu126 pypi_0 pypi
[conda] triton 3.0.0 pypi_0 pypi
```
</details>
cc @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @aakhundov | true |
2,755,869,396 | [ONNX] exported model for Phi-2 is wrong before optimization and correct after | xadupre | closed | [
"module: onnx",
"triaged"
] | 2 | COLLABORATOR | ### 🐛 Describe the bug
If ``ep.optimize()`` is not run, the exporter model for Phi 2 is wrong.
```
onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Load model from dump_bash_bench/Phi2LM_2Layer-onnx_dynamo-cpu-float16-d1rt1/model_Phi2LM_2Layer-onnx_dynamo-d1rt1.onnx failed:Node (node_Concat_810) Op (Concat) [ShapeInferenceError] axis must be in [-rank, rank-1].
```
### Versions
```
--2024-12-23 12:37:09-- https://raw.githubusercontent.com/pytorch/pytorch/main/torch/utils/collect_env.py
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.110.133, 185.199.109.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 24353 (24K) [text/plain]
Saving to: ‘collect_env.py’
collect_env.py 100%[=======================================================================================================================>] 23.78K --.-KB/s in 0.003s
2024-12-23 12:37:09 (7.59 MB/s) - ‘collect_env.py’ saved [24353/24353]
xadupre@xavier2024:~/github/experimental-experiment$ python collect_env.py
Collecting environment information...
PyTorch version: 2.6.0.dev20241218+cu126
Is debug build: False
CUDA used to build PyTorch: 12.6
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.4 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: version 3.31.3
Libc version: glibc-2.35
Python version: 3.12.8 (main, Dec 4 2024, 08:54:12) [GCC 11.4.0] (64-bit runtime)
Python platform: Linux-5.15.167.4-microsoft-standard-WSL2-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: 12.6.68
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 4060 Laptop GPU
Nvidia driver version: 538.92
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_precompiled.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_runtime_compiled.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_graph.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_heuristic.so.9.3.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops.so.9.3.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 20
On-line CPU(s) list: 0-19
Vendor ID: GenuineIntel
Model name: 13th Gen Intel(R) Core(TM) i7-13800H
CPU family: 6
Model: 186
Thread(s) per core: 2
Core(s) per socket: 10
Socket(s): 1
Stepping: 2
BogoMIPS: 5836.79
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves avx_vnni umip waitpkg gfni vaes vpclmulqdq rdpid movdiri movdir64b fsrm md_clear serialize flush_l1d arch_capabilities
Virtualization: VT-x
Hypervisor vendor: Microsoft
Virtualization type: full
L1d cache: 480 KiB (10 instances)
L1i cache: 320 KiB (10 instances)
L2 cache: 12.5 MiB (10 instances)
L3 cache: 24 MiB (1 instance)
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Mitigation; Clear Register File
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec rstack overflow: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI BHI_DIS_S
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==2.2.0
[pip3] nvidia-cublas-cu12==12.6.4.1
[pip3] nvidia-cuda-cupti-cu12==12.6.80
[pip3] nvidia-cuda-nvrtc-cu12==12.6.77
[pip3] nvidia-cuda-runtime-cu12==12.6.77
[pip3] nvidia-cudnn-cu12==9.5.1.17
[pip3] nvidia-cufft-cu12==11.3.0.4
[pip3] nvidia-curand-cu12==10.3.7.77
[pip3] nvidia-cusolver-cu12==11.7.1.2
[pip3] nvidia-cusparse-cu12==12.5.4.2
[pip3] nvidia-cusparselt-cu12==0.6.3
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.6.85
[pip3] nvidia-nvtx-cu12==12.6.77
[pip3] onnx==1.18.0
[pip3] onnx-extended==0.3.0
[pip3] onnxruntime_extensions==0.13.0
[pip3] onnxruntime-training==1.21.0+cu126
[pip3] pytorch-triton==3.2.0+git0d4682f0
[pip3] torch==2.6.0.dev20241218+cu126
[pip3] torchaudio==2.6.0.dev20241218+cu126
[pip3] torchvision==0.22.0.dev20241218+cu126
[conda] Could not collect
``` | true |
2,755,814,475 | Update TorchDynamo-based ONNX Exporter example code. | fatcat-z | closed | [
"oncall: distributed",
"module: onnx",
"module: cpu",
"triaged",
"module: mkldnn",
"open source",
"ciflow/trunk",
"release notes: onnx",
"release notes: quantization",
"topic: docs",
"ciflow/mps",
"module: inductor",
"module: dynamo",
"ciflow/inductor",
"ciflow/linux-aarch64"
] | 10 | COLLABORATOR | Address comments earlier.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 @gujinghui @PenghuiCheng @jianyuh @min-jean-cho @yanbing-j @Guobing-Chen @Xia-Weiwen @snadampal @voznesenskym @penguinwu @EikanWang @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov @LucasLLC @MeetVadakkanchery @mhorowitz @pradeepfn | true |
2,755,789,505 | Update module.py as per #142306 | grussdorian | closed | [
"triaged",
"open source",
"Stale",
"release notes: fx"
] | 6 | NONE | release notes: fx Issue #142306
Minor work in Improve typing of args and kwargs with ParamSpec
**register_forward_hook**
Key changes:
1. Replace Any with Input/Output type vars for inputs/outputs
2. Ensure the output type of the hook matches its input type
3. Keep the Dict[str, Any] for kwargs as those are arbitrary
**register_forward_pre_hook**
Key changes:
1. Replace Any with Input type var for inputs
2. Ensure the return type matches the input type structure
3. Keep Dict[str, Any] for kwargs | true |
2,755,740,754 | [DTensor]`Linear` fails on 3D DTensor with `batch size > 1` and `Replicate` input redistributed from `Shard` | FindDefinition | open | [
"oncall: distributed",
"module: dtensor"
] | 1 | NONE | ### 🐛 Describe the bug
`Linear` fails on 3D DTensor with `batch size > 1` and Replicate input from shard (not divisible by TP size).
* Error Message
```
[rank3]: Traceback (most recent call last):
[rank3]: File "/path/to/pytorch_bug/linear_bug.py", line 34, in <module>
[rank3]: mod(x_dt)
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
[rank3]: return self._call_impl(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1750, in _call_impl
[rank3]: return forward_call(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/path/to/pytorch_bug/linear_bug.py", line 20, in forward
[rank3]: return self.layer(x)
[rank3]: ^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1739, in _wrapped_call_impl
[rank3]: return self._call_impl(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1845, in _call_impl
[rank3]: return inner()
[rank3]: ^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1793, in inner
[rank3]: result = forward_call(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/nn/modules/linear.py", line 125, in forward
[rank3]: return F.linear(input, self.weight, self.bias)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/_compile.py", line 32, in inner
[rank3]: return disable_fn(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 751, in _fn
[rank3]: return fn(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/distributed/tensor/_api.py", line 343, in __torch_dispatch__
[rank3]: return DTensor._op_dispatcher.dispatch(
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/distributed/tensor/_dispatch.py", line 216, in dispatch
[rank3]: local_results = op_call(*local_tensor_args, **op_info.local_kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: File "/opt/miniconda/envs/torchtitan/lib/python3.11/site-packages/torch/_ops.py", line 722, in __call__
[rank3]: return self._op(*args, **kwargs)
[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^
[rank3]: RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
```
* Code
`torchrun --nnodes=1 --nproc-per-node=4 --standalone /path/to/pytorch_bug/linear_bug.py`
```Python
from torch.distributed.tensor import Shard, DTensor, Replicate
import torch.distributed as dist
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
parallelize_module,
ColwiseParallel,
)
_world_size = int(os.environ["WORLD_SIZE"])
device_mesh = init_device_mesh(device_type="cuda", mesh_shape=(_world_size,))
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.layer = nn.Linear(64, 64)
def forward(self, x):
return self.layer(x)
mod_ref = Mod().cuda()
mod = Mod().cuda()
parallelize_module(mod, device_mesh, {
"layer": ColwiseParallel()
})
x = torch.randn(4, 111, 64).cuda()
x_dt = DTensor.from_local(x, device_mesh, [Replicate()])
x_shard = x_dt.redistribute(device_mesh, [Shard(1)])
x_dt = x_shard.redistribute(device_mesh, [Replicate()])
# works
mod_ref(x)
mod_ref(x_dt._local_tensor)
x_dt_2 = DTensor.from_local(x_dt._local_tensor, device_mesh, [Replicate()])
mod(x_dt_2)
# error
mod(x_dt)
dist.barrier()
dist.destroy_process_group()
```
### Versions
```
PyTorch version: 2.6.0.dev20241222+cu124
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.3 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: Could not collect
Libc version: glibc-2.35
Python version: 3.11.10 (main, Oct 3 2024, 07:29:13) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.210-4-velinux1-amd64-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: Could not collect
Nvidia driver version: 535.86.10
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==2.1.3
[pip3] nvidia-cublas-cu12==12.4.5.8
[pip3] nvidia-cuda-cupti-cu12==12.4.127
[pip3] nvidia-cuda-nvrtc-cu12==12.4.127
[pip3] nvidia-cuda-runtime-cu12==12.4.127
[pip3] nvidia-cudnn-cu12==9.1.0.70
[pip3] nvidia-cufft-cu12==11.2.1.3
[pip3] nvidia-curand-cu12==10.3.5.147
[pip3] nvidia-cusolver-cu12==11.6.1.9
[pip3] nvidia-cusparse-cu12==12.3.1.170
[pip3] nvidia-cusparselt-cu12==0.6.2
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.4.127
[pip3] nvidia-nvtx-cu12==12.4.127
[pip3] pytorch-triton==3.2.0+git0d4682f0
[pip3] torch==2.6.0.dev20241222+cu124
[pip3] torchaudio==2.6.0.dev20241222+cu124
[pip3] torchdata==0.9.0
[pip3] torchpippy==0.2.0+1bcb2bf
[pip3] torchtitan==0.0.2
[pip3] torchvision==0.22.0.dev20241222+cu124
[pip3] triton==3.1.0
[conda] blas 1.0 mkl
[conda] cuda-cudart 12.1.105 0 nvidia
[conda] cuda-cupti 12.1.105 0 nvidia
[conda] cuda-libraries 12.1.0 0 nvidia
[conda] cuda-nvrtc 12.1.105 0 nvidia
[conda] cuda-nvtx 12.1.105 0 nvidia
[conda] cuda-opencl 12.4.127 0 nvidia
[conda] cuda-runtime 12.1.0 0 nvidia
[conda] libcublas 12.1.0.26 0 nvidia
[conda] libcufft 11.0.2.4 0 nvidia
[conda] libcurand 10.3.5.147 0 nvidia
[conda] libcusolver 11.4.4.55 0 nvidia
[conda] libcusparse 12.0.2.55 0 nvidia
[conda] libnvjitlink 12.1.105 0 nvidia
[conda] mkl 2023.1.0 h213fc3f_46344
[conda] mkl-service 2.4.0 py311h5eee18b_1
[conda] mkl_fft 1.3.11 py311h5eee18b_0
[conda] mkl_random 1.2.8 py311ha02d727_0
[conda] numpy 2.1.3 py311h08b1b3b_0
[conda] numpy-base 2.1.3 py311hf175353_0
[conda] nvidia-cublas-cu12 12.4.5.8 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.1.0.70 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.2.1.3 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.5.147 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.6.1.9 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.3.1.170 pypi_0 pypi
[conda] nvidia-cusparselt-cu12 0.6.2 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.4.127 pypi_0 pypi
[conda] pytorch-cuda 12.1 ha16c6d3_6 pytorch
[conda] pytorch-triton 3.2.0+git0d4682f0 pypi_0 pypi
[conda] torch 2.6.0.dev20241222+cu124 pypi_0 pypi
[conda] torchaudio 2.6.0.dev20241222+cu124 pypi_0 pypi
[conda] torchdata 0.9.0 pypi_0 pypi
[conda] torchpippy 0.2.0+1bcb2bf pypi_0 pypi
[conda] torchtitan 0.0.2 pypi_0 pypi
[conda] torchvision 0.22.0.dev20241222+cu124 pypi_0 pypi
[conda] triton 3.1.0 pypi_0 pypi
```
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @tianyu-l @XilunWu | true |
2,755,560,485 | [DTensor] Add aten.amin/amax to linear_reduction_strategy | lw | closed | [
"oncall: distributed",
"Merged",
"ciflow/trunk",
"release notes: distributed (dtensor)"
] | 9 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143747
In the same vein as https://github.com/pytorch/pytorch/pull/134206, these two ops still seemed missing.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o | true |
2,755,552,013 | [Inductor][CPP] Fix Data Type issue of frexp | leslie-fang-intel | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 3 | COLLABORATOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143746
**Summary**
Fix issue: https://github.com/pytorch/pytorch/issues/143729. `frexp` has 1 input but 2 output tensor with different data type, current `deduce_dtype_for_cpp_cse_variable` can't deduce the data type for each output correctly due to missing of output index. In this PR, we set the data type of cse var in the codegen of `frexp` and avoid it being overridden in the following flow.
**Test Plan**
```
python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_frexp
```
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,755,426,592 | Update slow tests | pytorchupdatebot | closed | [
"open source",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"ciflow/slow",
"ci-no-td"
] | 6 | COLLABORATOR | This PR is auto-generated weekly by [this action](https://github.com/pytorch/pytorch/blob/main/.github/workflows/weekly.yml).
Update the list of slow tests. | true |
2,755,409,160 | [don't merge] use vs2019 build xpu | xuhancn | closed | [
"open source",
"ciflow/binaries",
"topic: not user facing"
] | 1 | COLLABORATOR | Fixes #ISSUE_NUMBER
| true |
2,755,382,921 | Enable onednn in pytorch for ppc64le architecture | Tiwari-Avanish | closed | [
"module: cpu",
"triaged",
"open source",
"Merged",
"Reverted",
"release notes: quantization",
"release notes: build",
"topic: improvements",
"ci-no-td"
] | 39 | CONTRIBUTOR | This PR will enable onednn for powerpc Architecture which will help to do quantization of the model via onednn for powerpc.
cc @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 | true |
2,755,322,641 | [Export] fake mode mismatch error inside `export_for_training` with multiple kwargs | Xia-Weiwen | closed | [
"oncall: pt2",
"oncall: export"
] | 3 | COLLABORATOR | ### 🐛 Describe the bug
Repro:
```python
import torch
from torch.export import export_for_training
from transformers import AlbertTokenizer, AlbertModel
print("[info] load model")
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v1')
model = AlbertModel.from_pretrained("albert-base-v1")
model = model.eval()
text = "Hello, how are you?"
example_input = tokenizer(text, return_tensors='pt') # it is a dict of length 3
with torch.no_grad():
print("[info] export model")
exported_model = export_for_training(
model,
args=(),
kwargs=example_input
).module() # error here
```
Error message:
```
AssertionError: fake mode (<torch._subclasses.fake_tensor.FakeTensorMode object at 0x7ffad765f190>) from active fake mode 0 doesn't match mode (<torch._subclasses.fake_tensor.FakeTensorMode object at 0x7ffad78716f0>) from fake tensor input 26
```
If we create `example_input` as
```
example_input = tokenizer(text, return_tensors='pt')
example_input = dict((list(example_input.items())[0],)) # get the first arg, which is input_ids
```
then it works fine.
If we use the old `capture_pre_autograd_graph` from PyTorch 2.5.1, it works fine with multiple kwargs.
### Versions
PyTorch version: 2.6.0.dev20241222+cpu
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: CentOS Stream 8 (x86_64)
GCC version: (conda-forge gcc 12.3.0-13) 12.3.0
Clang version: 12.0.1 (Red Hat 12.0.1-4.module_el8.5.0+1025+93159d6c)
CMake version: version 3.28.4
Libc version: glibc-2.28
Python version: 3.10.14 | packaged by conda-forge | (main, Mar 20 2024, 12:45:18) [GCC 12.3.0] (64-bit runtime)
Python platform: Linux-5.16.0-x86_64-with-glibc2.28
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 240
On-line CPU(s) list: 0-239
Thread(s) per core: 2
Core(s) per socket: 60
Socket(s): 2
NUMA node(s): 2
Vendor ID: GenuineIntel
CPU family: 6
Model: 143
Model name: Intel(R) Xeon(R) Platinum 8490H
Stepping: 8
CPU MHz: 1900.000
CPU max MHz: 3500.0000
CPU min MHz: 800.0000
BogoMIPS: 3800.00
Virtualization: VT-x
L1d cache: 48K
L1i cache: 32K
L2 cache: 2048K
L3 cache: 115200K
NUMA node0 CPU(s): 0-59,120-179
NUMA node1 CPU(s): 60-119,180-239
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cat_l2 cdp_l3 invpcid_single intel_ppin cdp_l2 ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local split_lock_detect avx_vnni avx512_bf16 wbnoinvd dtherm ida arat pln pts hwp hwp_act_window hwp_epp hwp_pkg_req avx512vbmi umip pku ospke waitpkg avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg tme avx512_vpopcntdq la57 rdpid bus_lock_detect cldemote movdiri movdir64b enqcmd fsrm md_clear serialize tsxldtrk pconfig arch_lbr avx512_fp16 amx_tile flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] flake8==3.8.2
[pip3] flake8-bugbear==20.1.4
[pip3] flake8-comprehensions==3.3.0
[pip3] flake8-executable==2.0.4
[pip3] flake8-logging-format==0.9.0
[pip3] flake8-pyi==20.5.0
[pip3] flake8-simplify==0.19.3
[pip3] mypy==1.13.0
[pip3] mypy-extensions==1.0.0
[pip3] numpy==2.2.0
[pip3] onnx==1.17.0
[pip3] optree==0.13.0
[pip3] torch==2.6.0.dev20241222+cpu
[pip3] torchao==0.7.0+git581d8e0
[pip3] torchvision==0.20.0a0+518ee93
[conda] mkl-include 2024.2.1 pypi_0 pypi
[conda] mkl-static 2024.2.1 pypi_0 pypi
[conda] numpy 2.2.0 pypi_0 pypi
[conda] optree 0.13.0 pypi_0 pypi
[conda] torch 2.6.0.dev20241222+cpu pypi_0 pypi
[conda] torchao 0.7.0+git581d8e0 dev_0 <develop>
[conda] torchfix 0.4.0 pypi_0 pypi
[conda] torchvision 0.20.0a0+518ee93 dev_0 <develop>
cc @chauhang @penguinwu @avikchaudhuri @gmagogsfm @zhxchen17 @tugsbayasgalan @angelayi @suo @ydwu4 | true |
2,755,308,374 | Enable SVE ACLE implementation for tanH Aten op for FP32 dType. | maajidkhann | closed | [
"module: cpu",
"triaged",
"open source",
"module: arm",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"ciflow/linux-aarch64"
] | 40 | CONTRIBUTOR | In deep learning models, the tanh (hyperbolic tangent) function is a widely used activation function, primarily in feedforward networks, recurrent neural networks (RNNs), and various other architectures.
Also, the tanh (hyperbolic tangent) function is commonly used in **Physics-Informed Neural Networks (PINNs).** PINNs are a class of machine learning models designed to solve partial differential equations (PDEs) by incorporating the governing physics directly into the loss function, along with data-driven terms.
In PINNs, activation functions like tanh are used in the neural network architecture to enable the model to learn complex mappings between inputs (such as spatial and temporal coordinates) and outputs (such as field variables).
**Operator: tanh()**
**Current Implementation in OSS in ATen Backend:**
**SVE Flow:** Uses SVE sleef when available else std implementation.
**With this PR :**
**SVE Flow:** Uses SVE ACLE implementation. (Faster Implementation)
**Here are the performance improvements.**
**Single core perf numbers:**

**Metric:** CPU time avg time per iteration (In ms)
As you can see with both gcc and clang compilers, we see a significant performance gain with SVE ACLE implementation over current OSS Implementation (Sleef) and also Neon.
**Hardware:** m7g.8xlarge (Graviton 3 Instance)
**Script used in benchmarking:**
```python
import os
#os.environ["ATEN_CPU_CAPABILITY"] = "default"
os.environ["ATEN_CPU_CAPABILITY"] = "sve256"
import torch
import torch.nn as nn
#Set the random seed for reproducibility
torch.manual_seed(1)
#Create a tensor of shape (8521, 50)
x = torch.randn(8521, 50)
for i in range(10):
output = x.tanh()
#Perform the tanh operation 1000 times and profile the performance
print("### CPU tanh")
with torch.autograd.profiler.profile(record_shapes=True) as prof:
for i in range(1000):
output = x.tanh()
#Print the profiling results sorted by self CPU time
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
#Optionally print the final output (if needed, uncomment the following line)
print(output)
```
cc @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 @malfet @snadampal @milpuz01 @aditew01 @nikhil-arm @fadara01 | true |
2,755,290,274 | Enable fx_quantization for arm | choudhary-devang | closed | [
"module: cpu",
"triaged",
"open source",
"module: arm",
"release notes: quantization",
"topic: not user facing"
] | 9 | NONE | FX Graph Mode Quantization (https://pytorch.org/docs/stable/quantization.html) is an automated quantization workflow in PyTorch and It improves upon Eager Mode Quantization by adding support for functionals and automating the quantization process.
Currently, this flow is enabled for CPU's only on x86 platforms.
**Goal of this PR:**
Enables FX Graph Mode Quantization for ARM CPU's
*This flow on ARM leverages ONEDNN kernels for computation and also picks the best pre-defined config for your choice/method of quantization.
*This PR also Introduces optimizations for few utility functions.
- Optimized utility functions (hsum, hsum_sq) using SIMD vectorization.
**Performance gain w.r.t Utility functions optimized:-**
**At function level** -> We observe 2x performance boost w.r.t utility functions introduced in comparison to scalar implementation (current OSS Implementation)
**At Model level :-**
**Model :-** vit_b_16
**with scalar implementation** -> 26772 microsec.
**with vectorized implementation** -> 26002 microsec.
cc @jgong5 @mingfeima @XiaobingSuper @sanchitintel @ashokei @jingxu10 @malfet @snadampal @milpuz01 | true |
2,755,271,047 | Modify the tolerance level in TIMM benchmark for XPU PreCI | xytintel | open | [
"triaged",
"open source",
"Stale",
"module: dynamo"
] | 5 | CONTRIBUTOR | cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @amjames | true |
2,755,193,049 | [inductor] [cpu] [silent] `avg_pool2d` incorrectly process int64 | shaoyuyoung | closed | [
"triaged",
"oncall: pt2",
"oncall: cpu inductor"
] | 1 | CONTRIBUTOR | ### 🐛 Describe the bug
I think this is related to #143729 but the symptom is different.
in #143729, CPU inductor raises `compileError` but this time, avg_pool2d outputs a silent incorrectness.
Should this be a **hig-pri**?
BTW, cuda would reject the Long dtype.
exposed area: `avg_pool1d`, `avg_pool2d` and `avg_pool3d`
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
from torch._inductor import config
config.fallback_random = True
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
torch.manual_seed(0)
x = torch.argsort(x, dim=3)
# x.dtype: torch.int64
x = F.avg_pool2d(x, kernel_size=2, stride=2)
return x
model = Model()
x = torch.randn(1, 1, 2, 4)
inputs = [x]
output = model(*inputs)
c_model = torch.compile(model)
c_output = c_model(*inputs)
print(output)
print(c_output)
```
### Error logs
tensor([[[[1, 2]]]])
tensor([[[[0, 0]]]])
### Versions
Exactly the same as #143729
cc @chauhang @penguinwu | true |
2,755,149,145 | Enable FSDP2 on XPU device | zhangxiaoli73 | closed | [
"oncall: distributed",
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"release notes: distributed (fsdp)"
] | 7 | CONTRIBUTOR | **Motivation:** Enabling FSDP2 on XPU device.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @gujinghui @jgong5 @guangyey | true |
2,755,143,999 | Add torch.topk indices vary description | zeshengzong | closed | [
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"release notes: python_frontend"
] | 3 | CONTRIBUTOR | Fixes #133542
**Test Result**
**Before**

**After**

| true |
2,755,133,648 | Enable coalescing path on XPU and dispatch to XPU tensor barrier if XCCL backend is specified. | zhangxiaoli73 | closed | [
"oncall: distributed",
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"release notes: distributed (c10d)"
] | 10 | CONTRIBUTOR | **Motivation:**
- Enable coalescing path on XPU for `batch_isend_irecv`.
- If XCCL backend is specified, then construct a XPU tensor to ensure `barrier` dispatch to XCCL backend.
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o @gujinghui @jgong5 @guangyey | true |
2,755,119,487 | [ROCm] [APU] Incorrect call of HIP mem outage | KISSEsWHISPERsFEEtBACKHUGs | open | [
"module: rocm",
"triaged"
] | 3 | NONE | ```
HSA_OVERRIDE_GFX_VERSION=9.0.0 \
CL_DEVICE_GLOBAL_FREE_MEMORY_AMD=24396768 \
CL_DEVICE_GLOBAL_MEM_SIZE=25189056512 \
CL_DEVICE_MAX_MEM_ALLOC_SIZE=21410698032 \
PYTORCH_HIP_MEM_ALLOC=strict PYTORCH_NO_HIP_MEMORY_CACHING=1 AMD_SERIALIZE_KERNEL=3 TORCH_USE_HIP_DSA=1 \
HSA_ENABLE_SDMA=0 HSA_ENABLE_INTERRUPT=0 AMD_LOG_LEVEL=1 AMD_LOG_MASK=0x1 HIP_LAUNCH_BLOCKING=0 GPU_DUMP_CODE_OBJECT=0 AMD_SERIALIZE_COPY=3 AMD_KERNEL_DISPATCH=1 GPU_MAX_HW_QUEUES=0 \
HIP_HIDDEN_FREE_MEM=16384 GPU_STREAMOPS_CP_WAIT=1 PAL_ALWAYS_RESIDENT=1 REMOTE_ALLOC=1 HIP_HOST_COHERENT=1 HIP_MEM_POOL_SUPPORT=1 GPU_MAX_REMOTE_MEM_SIZE=8192 HIP_VMEM_MANAGE_SUPPORT=0 \
PYTORCH_HIP_ALLOC_CONF=garbage_collection_threshold:0.25,max_split_size_mb:2560,expandable_segments:True \
python3.11 main.py --highvram --disable-smart-memory --disable-cuda-malloc --listen 127.0.0.1 --auto-launch \
--verbose ERROR --port 4096 --preview-method latent2rgb \
--output-directory /opt/local/.8A/terminal/ComfyAI/Downloads/04112025
```
```
## System Information
- **ComfyUI Version:** unknown
- **Arguments:** main.py --highvram --disable-smart-memory --disable-cuda-malloc --listen 127.0.0.1 --auto-launch --verbose ERROR --port 4096 --preview-method latent2rgb --output-directory /opt/local/.8A/terminal/ComfyAI/Downloads/04112025
- **OS:** posix
- **Python Version:** 3.11.9 (main, Jul 9 2024, 00:31:01) [GCC 14.1.1 20240522]
- **Embedded Python:** false
- **PyTorch Version:** 2.5.1+rocm6.3.0
## Devices
- **Name:** cuda:0 AMD Radeon Graphics : native
- **Type:** cuda
- **VRAM Total:** 25189052416
- **VRAM Free:** 4532178944
- **Torch VRAM Total:** 0
- **Torch VRAM Free:** 0
```
### ROCmInfo
```
ROCk module is loaded
=====================
HSA System Attributes
=====================
Runtime Version: 1.14
Runtime Ext Version: 1.6
System Timestamp Freq.: 1000.000000MHz
Sig. Max Wait Duration: 18446744073709551615 (0xFFFFFFFFFFFFFFFF) (timestamp count)
Machine Model: LARGE
System Endianness: LITTLE
Mwaitx: DISABLED
DMAbuf Support: YES
==========
HSA Agents
==========
*******
Agent 1
*******
Name: AMD Ryzen 7 PRO 4750G with Radeon Graphics
Uuid: CPU-XX
Marketing Name: AMD Ryzen 7 PRO 4750G with Radeon Graphics
Vendor Name: CPU
Feature: None specified
Profile: FULL_PROFILE
Float Round Mode: NEAR
Max Queue Number: 0(0x0)
Queue Min Size: 0(0x0)
Queue Max Size: 0(0x0)
Queue Type: MULTI
Node: 0
Device Type: CPU
Cache Info:
L1: 32768(0x8000) KB
Chip ID: 0(0x0)
ASIC Revision: 0(0x0)
Cacheline Size: 64(0x40)
Max Clock Freq. (MHz): 3600
BDFID: 0
Internal Node ID: 0
Compute Unit: 16
SIMDs per CU: 0
Shader Engines: 0
Shader Arrs. per Eng.: 0
WatchPts on Addr. Ranges:1
Memory Properties:
Features: None
Pool Info:
Pool 1
Segment: GLOBAL; FLAGS: FINE GRAINED
Size: 49197372(0x2eeb13c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:4KB
Alloc Alignment: 4KB
Accessible by all: TRUE
Pool 2
Segment: GLOBAL; FLAGS: EXTENDED FINE GRAINED
Size: 49197372(0x2eeb13c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:4KB
Alloc Alignment: 4KB
Accessible by all: TRUE
Pool 3
Segment: GLOBAL; FLAGS: KERNARG, FINE GRAINED
Size: 49197372(0x2eeb13c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:4KB
Alloc Alignment: 4KB
Accessible by all: TRUE
Pool 4
Segment: GLOBAL; FLAGS: COARSE GRAINED
Size: 49197372(0x2eeb13c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:4KB
Alloc Alignment: 4KB
Accessible by all: TRUE
ISA Info:
*******
Agent 2
*******
Name: gfx90c
Uuid: GPU-XX
Marketing Name: AMD Radeon Graphics
Vendor Name: AMD
Feature: KERNEL_DISPATCH
Profile: BASE_PROFILE
Float Round Mode: NEAR
Max Queue Number: 128(0x80)
Queue Min Size: 64(0x40)
Queue Max Size: 131072(0x20000)
Queue Type: MULTI
Node: 1
Device Type: GPU
Cache Info:
L1: 16(0x10) KB
L2: 1024(0x400) KB
Chip ID: 5686(0x1636)
ASIC Revision: 0(0x0)
Cacheline Size: 64(0x40)
Max Clock Freq. (MHz): 2100
BDFID: 1024
Internal Node ID: 1
Compute Unit: 8
SIMDs per CU: 4
Shader Engines: 1
Shader Arrs. per Eng.: 1
WatchPts on Addr. Ranges:4
Coherent Host Access: FALSE
Memory Properties: APU
Features: KERNEL_DISPATCH
Fast F16 Operation: TRUE
Wavefront Size: 64(0x40)
Workgroup Max Size: 1024(0x400)
Workgroup Max Size per Dimension:
x 1024(0x400)
y 1024(0x400)
z 1024(0x400)
Max Waves Per CU: 40(0x28)
Max Work-item Per CU: 2560(0xa00)
Grid Max Size: 4294967295(0xffffffff)
Grid Max Size per Dimension:
x 4294967295(0xffffffff)
y 4294967295(0xffffffff)
z 4294967295(0xffffffff)
Max fbarriers/Workgrp: 32
Packet Processor uCode:: 472
SDMA engine uCode:: 40
IOMMU Support:: None
Pool Info:
Pool 1
Segment: GLOBAL; FLAGS: COARSE GRAINED
Size: 24598684(0x177589c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:2048KB
Alloc Alignment: 4KB
Accessible by all: FALSE
Pool 2
Segment: GLOBAL; FLAGS: EXTENDED FINE GRAINED
Size: 24598684(0x177589c) KB
Allocatable: TRUE
Alloc Granule: 4KB
Alloc Recommended Granule:2048KB
Alloc Alignment: 4KB
Accessible by all: FALSE
Pool 3
Segment: GROUP
Size: 64(0x40) KB
Allocatable: FALSE
Alloc Granule: 0KB
Alloc Recommended Granule:0KB
Alloc Alignment: 0KB
Accessible by all: FALSE
ISA Info:
ISA 1
Name: amdgcn-amd-amdhsa--gfx90c:xnack-
Machine Models: HSA_MACHINE_MODEL_LARGE
Profiles: HSA_PROFILE_BASE
Default Rounding Mode: NEAR
Default Rounding Mode: NEAR
Fast f16: TRUE
Workgroup Max Size: 1024(0x400)
Workgroup Max Size per Dimension:
x 1024(0x400)
y 1024(0x400)
z 1024(0x400)
Grid Max Size: 4294967295(0xffffffff)
Grid Max Size per Dimension:
x 4294967295(0xffffffff)
y 4294967295(0xffffffff)
z 4294967295(0xffffffff)
FBarrier Max Size: 32
*** Done ***
```
### ImageOnlyCheckpointSave
```
!!! Exception during processing !!! HIP error: out of memory
Compile with `TORCH_USE_HIP_DSA` to enable device-side assertions.
Traceback (most recent call last):
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/execution.py", line 328, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/execution.py", line 203, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/execution.py", line 174, in _map_node_over_list
process_inputs(input_dict, i)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/execution.py", line 163, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy_extras/nodes_video_model.py", line 121, in save
comfy_extras.nodes_model_merging.save_checkpoint(model, clip_vision=clip_vision, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy_extras/nodes_model_merging.py", line 222, in save_checkpoint
comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata, extra_keys=extra_keys)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/sd.py", line 960, in save_checkpoint
model_management.load_models_gpu(load_models, force_patch_weights=True)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_management.py", line 526, in load_models_gpu
loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_management.py", line 342, in model_load
self.model_use_more_vram(use_more_vram, force_patch_weights=force_patch_weights)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_management.py", line 371, in model_use_more_vram
return self.model.partially_load(self.device, extra_memory, force_patch_weights=force_patch_weights)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_patcher.py", line 759, in partially_load
raise e
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_patcher.py", line 756, in partially_load
self.load(device_to, lowvram_model_memory=current_used + extra_memory, force_patch_weights=force_patch_weights, full_load=full_load)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_patcher.py", line 601, in load
self.patch_weight_to_device("{}.{}".format(n, param), device_to=device_to)
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/model_patcher.py", line 513, in patch_weight_to_device
out_weight = comfy.lora.calculate_weight(self.patches[key], temp_weight, key)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/local/.8A/terminal/ComfyAI/ComfyUI/comfy/lora.py", line 497, in calculate_weight
weight += function(strength * comfy.model_management.cast_to_device(diff, weight.device, weight.dtype))
~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RuntimeError: HIP error: out of memory
Compile with `TORCH_USE_HIP_DSA` to enable device-side assertions.
fatal: No names found, cannot describe anything.
```
### Reproductions,
(1) Download & run this workflow for Comfy on your AMD APU HW system. https://pastebin.com/qfKUrJc9, with downloading Shutt1eMix and Shutt1e3D Official SFT ckpt model images (CivitAI) onto `$COMFY_ROOT/models/unet`, and Comfy’s ClipVisionG (HF) onto `$COMFY_ROOT/models/clip_vision`,
(2) Look at checkpoints saving phase and also with Plasma system monitre,
(3) Workflow previews and demonstraitions of errors in pink and the not so outaged HW memories but still HIP mem outage somewhy,


### HW
AMD Zen2 R4750G APU (HSA 9.0.0) × 14.9GiB UniVRAM’s with OpenCL-ROCm 6.3.0
cc @ROCm @ComfyAnonymous @jeffdaily @sunway513 @jithunnair-amd @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd | true |
2,755,094,414 | [CI] enable operator benchmark on CPU | LifengWang | closed | [
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"release notes: releng",
"skip-pr-sanity-checks",
"ciflow/op-benchmark"
] | 25 | CONTRIBUTOR | This is to enable operator benchmark for CPU to track op level performance. This PR is motivated by PR: https://github.com/pytorch/pytorch/issues/120982 and investigate feasibility in https://github.com/pytorch/pytorch/pull/127216
cc @albanD | true |
2,755,068,410 | [export]`torch.export(strict=False)` produce wrong program when provide kwargs with arbitrary order | FindDefinition | closed | [
"oncall: pt2",
"oncall: export"
] | 2 | NONE | ### 🐛 Describe the bug
torch.export produce wrong program when we use kwargs that have different order with `forward` signature and `strict=False`.
* Reproduce Code
```Python
import torch
class TestKwMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(3, 16)
self.layer2 = torch.nn.Linear(3, 32)
def forward(self, x1, x2, flag=True):
x1o = self.layer1(x1)
x2o = self.layer2(x2)
return torch.cat([x1o, x2o], dim=1)
def main():
mod = TestKwMod()
gm = torch.export.export(mod, (torch.rand(1, 3), ), {
"flag": False,
"x2": torch.rand(1, 3),
}, strict=False)
print(gm)
if __name__ == "__main__":
main()
```
* Wrong program (`strict=False`) and graph

```
class GraphModule(torch.nn.Module):
def forward(self, p_layer1_weight: "f32[16, 3]", p_layer1_bias: "f32[16]", p_layer2_weight: "f32[32, 3]", p_layer2_bias: "f32[32]", x1: "f32[1, 3]", x2, flag: "f32[1, 3]"):
# File: /path/to/torch/nn/modules/linear.py:125 in forward, code: return F.linear(input, self.weight, self.bias)
linear: "f32[1, 16]" = torch.ops.aten.linear.default(x1, p_layer1_weight, p_layer1_bias); x1 = p_layer1_weight = p_layer1_bias = None
# File: /path/to/torch/nn/modules/linear.py:125 in forward, code: return F.linear(input, self.weight, self.bias)
linear_1: "f32[1, 32]" = torch.ops.aten.linear.default(flag, p_layer2_weight, p_layer2_bias); flag = p_layer2_weight = p_layer2_bias = None
# File: /path/to/export_bug2.py:15 in forward, code: return torch.cat([x1o, x2o], dim=1)
cat: "f32[1, 48]" = torch.ops.aten.cat.default([linear, linear_1], 1); linear = linear_1 = None
return (cat,)
```
* Correct Program (`strict=True`) and graph

```
class GraphModule(torch.nn.Module):
def forward(self, p_layer1_weight: "f32[16, 3]", p_layer1_bias: "f32[16]", p_layer2_weight: "f32[32, 3]", p_layer2_bias: "f32[32]", x1: "f32[1, 3]", flag, x2: "f32[1, 3]"):
# File: /path/to/export_bug2.py:12 in forward, code: x1o = self.layer1(x1)
linear: "f32[1, 16]" = torch.ops.aten.linear.default(x1, p_layer1_weight, p_layer1_bias); x1 = p_layer1_weight = p_layer1_bias = None
# File: /path/to/export_bug2.py:13 in forward, code: x2o = self.layer2(x2)
linear_1: "f32[1, 32]" = torch.ops.aten.linear.default(x2, p_layer2_weight, p_layer2_bias); x2 = p_layer2_weight = p_layer2_bias = None
# File: /path/to/export_bug2.py:15 in forward, code: return torch.cat([x1o, x2o], dim=1)
cat: "f32[1, 48]" = torch.ops.aten.cat.default([linear, linear_1], 1); linear = linear_1 = None
return (cat,)
```
### Versions
`2.6.0.dev20241222+cu124`
cc @chauhang @penguinwu @avikchaudhuri @gmagogsfm @zhxchen17 @tugsbayasgalan @angelayi @suo @ydwu4 | true |
2,755,054,593 | [Easy] Add torch.range, torch.arange params optional description | zeshengzong | closed | [
"triaged",
"open source",
"Merged",
"ciflow/trunk",
"release notes: python_frontend"
] | 12 | CONTRIBUTOR | Fixes #129333
**Test Result**
**Before**


**After**


| true |
2,755,046,035 | Apply clang-format for ATen/core/op_registration headers | zeshengzong | closed | [
"triaged",
"open source",
"Stale",
"topic: not user facing"
] | 4 | CONTRIBUTOR | Code change via add path config in `.lintrunner.toml` file and running
```bash
$ lintrunner -a --take CLANGFORMAT --all-files
```
cc @malfet | true |
2,755,023,386 | [inductor] [cpu] [CppCompileError] inductor can't pass the check for multiplication of different dtypes of tensor | shaoyuyoung | closed | [
"triaged",
"oncall: pt2",
"oncall: cpu inductor"
] | 2 | CONTRIBUTOR | ### 🐛 Describe the bug
In this situation, the first return value for `torch.frexp` is **int32** and the second is **float32**.
When these two elements are multiplied, the inductor raises the **CppCompileError** while eager passes the check and outputs the correct result.
Interestingly, The CPU backend will reject the int32 externally as follows:
```
RuntimeError: "normal_kernel_cpu" not implemented for 'Int'
```
But this time, int32 is internal. The behavior of eager and inductor is not aligned on CPU
```python
import torch
import torch.nn as nn
torch.manual_seed(0)
from torch._inductor import config
config.fallback_random = True
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
x_frac, x_exp = torch.frexp(x) # x_frac: int32, x_exp: float32
x = x_frac * x_exp
return x
x = torch.randn(4, 1) # the first element I set 4 can trigger the error
inputs = [x]
def run_test(inputs, mode, device):
model = Model()
if device == "cuda":
model = model.cuda()
inputs = [x.cuda() for x in inputs]
if mode == "inductor":
model = torch.compile(model)
try:
output = model(*inputs)
print(f"{mode} with {device} succeeds: {output}")
except Exception as e:
print(f"{mode} with {device} fails: {e}")
run_test(inputs, "eager", "cpu")
run_test(inputs, "inductor", "cpu") # fail
run_test(inputs, "eager", "cuda")
run_test(inputs, "inductor", "cuda")
```
### Error logs
```
eager with cpu succeeds: tensor([[ 0.7705],
[ 0.5869],
[-1.0894],
[ 0.0000]])
inductor with cpu fails: backend='inductor' raised:
CppCompileError: C++ compile error
eager with cuda succeeds: tensor([[ 0.7705],
[ 0.5869],
[-1.0894],
[ 0.0000]], device='cuda:0')
inductor with cuda succeeds: tensor([[ 0.7705],
[ 0.5869],
[-1.0894],
[ 0.0000]], device='cuda:0')
```
### Versions
PyTorch version: 2.6.0.dev20241218+cu126
OS: Ubuntu 20.04.6 LTS (x86_64)
CPU: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
GPU: V100
<details>
<summary>click for detailed env</summary>
```
PyTorch version: 2.6.0.dev20241218+cu126
Is debug build: False
CUDA used to build PyTorch: 12.6
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: 16.0.1
CMake version: version 3.26.0
Libc version: glibc-2.31
Python version: 3.12.7 | packaged by Anaconda, Inc. | (main, Oct 4 2024, 13:27:36) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-202-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 12.6.68
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: Tesla V100-SXM2-32GB
GPU 1: Tesla V100-SXM2-32GB
GPU 2: Tesla V100-SXM2-32GB
GPU 3: Tesla V100-SXM2-32GB
Nvidia driver version: 560.35.03
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_precompiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_engines_runtime_compiled.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_graph.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_heuristic.so.9.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops.so.9.6.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 40 bits physical, 48 bits virtual
CPU(s): 20
On-line CPU(s) list: 0-19
Thread(s) per core: 1
Core(s) per socket: 20
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
Stepping: 7
CPU MHz: 2499.996
BogoMIPS: 4999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 640 KiB
L1i cache: 640 KiB
L2 cache: 80 MiB
L3 cache: 16 MiB
NUMA node0 CPU(s): 0-19
Vulnerability Gather data sampling: Unknown: Dependent on hypervisor status
Vulnerability Itlb multihit: KVM: Vulnerable
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Retbleed: Mitigation; IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; IBRS; IBPB conditional; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI SW loop, KVM SW loop
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch topoext cpuid_fault invpcid_single pti ssbd ibrs ibpb fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat umip pku ospke avx512_vnni
Versions of relevant libraries:
[pip3] numpy==1.26.4
[pip3] nvidia-cublas-cu12==12.6.4.1
[pip3] nvidia-cuda-cupti-cu12==12.6.80
[pip3] nvidia-cuda-nvrtc-cu12==12.6.77
[pip3] nvidia-cuda-runtime-cu12==12.6.77
[pip3] nvidia-cudnn-cu12==9.5.1.17
[pip3] nvidia-cufft-cu12==11.3.0.4
[pip3] nvidia-curand-cu12==10.3.7.77
[pip3] nvidia-cusolver-cu12==11.7.1.2
[pip3] nvidia-cusparse-cu12==12.5.4.2
[pip3] nvidia-cusparselt-cu12==0.6.3
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.6.85
[pip3] nvidia-nvtx-cu12==12.6.77
[pip3] onnx==1.17.0
[pip3] onnxruntime==1.20.1
[pip3] onnxscript==0.1.0.dev20241205
[pip3] optree==0.13.1
[pip3] pytorch-triton==3.2.0+gitf9cdf582
[pip3] torch==2.6.0.dev20241218+cu126
[pip3] torchaudio==2.6.0.dev20241218+cu126
[pip3] torchvision==0.22.0.dev20241218+cu126
[pip3] triton==3.0.0
[conda] numpy 1.26.4 pypi_0 pypi
[conda] nvidia-cublas-cu12 12.6.4.1 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.6.80 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.6.77 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.5.1.17 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.3.0.4 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.7.77 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.7.1.2 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.5.4.2 pypi_0 pypi
[conda] nvidia-cusparselt-cu12 0.6.3 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.6.85 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.6.77 pypi_0 pypi
[conda] optree 0.13.1 pypi_0 pypi
[conda] pytorch-triton 3.2.0+gitf9cdf582 pypi_0 pypi
[conda] torch 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchaudio 2.6.0.dev20241218+cu126 pypi_0 pypi
[conda] torchvision 0.22.0.dev20241218+cu126 pypi_0 pypi
[conda] triton 3.0.0 pypi_0 pypi
```
</details>
cc @chauhang @penguinwu | true |
2,754,863,156 | [ROCm] PyTorch multiprocess fails to create memory with IPC | GZGavinZhao | closed | [
"needs reproduction",
"module: multiprocessing",
"module: rocm",
"triaged"
] | 12 | NONE | ### 🐛 Describe the bug
Run the `mnist_hogwild` example from pytorch/examples@1bef748fab064e2fc3beddcbda60fd51cb9612d2 (current HEAD) using the command `python3 main.py --cuda`, I get the following error:
```
Traceback (most recent call last):
File "/home/gavinzhao/CS/ML/examples/mnist_hogwild/main.py", line 96, in <module>
p.start()
File "/usr/lib/python3.11/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/multiprocessing/context.py", line 288, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/usr/lib/python3.11/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/usr/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/usr/lib/python3.11/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "/usr/lib/python3.11/site-packages/torch/multiprocessing/reductions.py", line 354, in reduce_tensor
) = storage._share_cuda_()
^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/torch/storage.py", line 1422, in _share_cuda_
return self._untyped_storage._share_cuda_(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: HIP error: invalid argument
HIP kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing AMD_SERIALIZE_KERNEL=3
Compile with `TORCH_USE_HIP_DSA` to enable device-side assertions.
```
I expect no errors to occur.
More info:
- The wheels are the official wheels from https://download.pytorch.org/whl/rocm6.2
- The error occurs on `gfx1032`, `gfx90c`, and `gfx900`. I have not tested on other architectures. Logs ran with the environment variable `AMD_LOG_LEVEL=7` are attached for `gfx1032` (masked as `gfx1030` using `HSA_OVERRIDE_GFX_VERSION=10.3.0`) and `gfx90c` (masked as `gfx900` using `HSA_OVERRIDE_GFX_VERSION=9.0.0`).
[log-gfx90c.txt](https://github.com/user-attachments/files/18223341/log-gfx90c.txt)
[log-gfx1032.txt](https://github.com/user-attachments/files/18223342/log-gfx1032.txt)
### Versions
Collecting environment information...
PyTorch version: 2.5.1+rocm6.2
Is debug build: False
CUDA used to build PyTorch: N/A
ROCM used to build PyTorch: 6.2.41133-dd7f95766
OS: Solus 4.6 Convergence (x86_64)
GCC version: (Solus) 14.2.0
Clang version: 19.1.5 (Solus 19.1.5-128)
CMake version: version 3.30.3
Libc version: glibc-2.40
Python version: 3.11.11 (main, Dec 4 2024, 21:40:29) [GCC 14.2.0] (64-bit runtime)
Python platform: Linux-6.12.5-311.current-x86_64-with-glibc2.40
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: AMD Radeon Graphics (gfx90c:xnack-)
Nvidia driver version: Could not collect
cuDNN version: Could not collect
HIP runtime version: 6.2.41133
MIOpen runtime version: 3.2.0
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 16
On-line CPU(s) list: 0-15
Vendor ID: AuthenticAMD
Model name: AMD Ryzen 7 5800H with Radeon Graphics
CPU family: 25
Model: 80
Thread(s) per core: 2
Core(s) per socket: 8
Socket(s): 1
Stepping: 0
Frequency boost: disabled
CPU(s) scaling MHz: 63%
CPU max MHz: 3201.0000
CPU min MHz: 400.0000
BogoMIPS: 6388.53
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local user_shstk clzero irperf xsaveerptr rdpru wbnoinvd cppc arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm debug_swap
Virtualization: AMD-V
L1d cache: 256 KiB (8 instances)
L1i cache: 256 KiB (8 instances)
L2 cache: 4 MiB (8 instances)
L3 cache: 16 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-15
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Reg file data sampling: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Mitigation; Safe RET
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines; IBPB conditional; IBRS_FW; STIBP always-on; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==1.26.3
[pip3] pytorch-triton-rocm==3.1.0
[pip3] torch==2.5.1+rocm6.2
[pip3] torchvision==0.20.1+rocm6.2
[conda] Could not collect
cc @VitalyFedyunin @albanD @jeffdaily @sunway513 @jithunnair-amd @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang @naromero77amd | true |
2,754,831,511 | [Codemod][AddExplicitStrictExportArg] Update export test harness | gmagogsfm | closed | [
"fb-exported",
"Stale",
"topic: not user facing"
] | 3 | CONTRIBUTOR | Differential Revision: D67580336
| true |
2,754,810,418 | Adding support for differentiable lr, weight_decay, and betas in Adam/AdamW | EmmettBicker | closed | [
"oncall: distributed",
"open source",
"Merged",
"ciflow/trunk",
"release notes: optim"
] | 19 | CONTRIBUTOR | Third PR in a series of PRs to broaden differentiable optimizer support w/ @janeyx99 (sorry for pinging over the holidays! I just wanted to put this one out but I am definitely not asking for review or anything like that rn)
This is also going to probably be my last PR before the holidays!
Note: This is a branch of #143710 -- I've never worked on a branch of a branch before so I wasn't sure about the protocol so I thought I'd just made the PR and wait until that one gets merged.
This is adding support for differentiable lr, weight_decay, and betas to Adam and AdamW (but after refactoring AdamW into an Adam subclass, it's really just changing code in torch/optim/adam.py)
I had one main thing I was wondering about, which is that adam already has a differentiable flag built in, so I have code like this
```py
if differentiable and isinstance(beta2, Tensor):
if beta2.requires_grad:
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj().mul(1 - beta2))
else:
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
else:
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
```
That I could definitely simplify to just
```py
if differentiable and isinstance(beta2, Tensor):
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj().mul(1 - beta2))
else:
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
```
It would definitely be a little slower in the case that it's differentiable but doesn't need a grad for beta2, but the code would also be a lot more clear and I'm debating speed vs future code usability.
Also the line in the above example:
```py
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj().mul(1 - beta2))
```
was concerning to me because it is considerably more expensive than `value=1 - beta2`, but I couldn't think of a better way to do it.
Further work on #141832
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o | true |
2,754,807,064 | Better fix for f-strings in set_linter for py3.12 | jansel | closed | [
"Merged",
"ciflow/trunk",
"topic: not user facing"
] | 5 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* __->__ #143725
#143628 didn't handle a few cases right for example:
```py
$ python3 tools/linter/adapters/set_linter.py torch/_inductor/scheduler.py
torch/_inductor/scheduler.py:261:24: Builtin `set` is deprecated
259 | multiline=False,
260 | )
261 | return f"{self}{data_str}"
^
262 |
263 | def log_details(self) -> None:
torch/_inductor/scheduler.py:261:33: Builtin `set` is deprecated
259 | multiline=False,
260 | )
261 | return f"{self}{data_str}"
^
262 |
263 | def log_details(self) -> None:
```
also multi-line fstrings | true |
2,754,782,528 | nn.MultiheadAttention string representation | jake-yukich | closed | [
"triaged",
"open source",
"Stale",
"topic: not user facing"
] | 6 | NONE | Fixes #143669
| true |
2,754,759,191 | Inductor Cutlass backend: Eliminate unused code. | kadeng | closed | [
"fb-exported",
"Merged",
"ciflow/trunk",
"topic: not user facing",
"module: inductor",
"ciflow/inductor"
] | 5 | CONTRIBUTOR | Summary: Eliminates an unused file and some smaller unused code fragments from the inductor cutlass codebase.
Test Plan: CI
Differential Revision: D67579837
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @muchulee8 @ColinPeppler @amjames @desertfire @chauhang @aakhundov | true |
2,754,725,140 | [dynamo] Remove DICT_SUBCLASS_GUARD_MANAGER and use dict.keys | anijain2305 | closed | [
"Merged",
"Reverted",
"ciflow/trunk",
"topic: not user facing",
"module: dynamo",
"ciflow/inductor",
"ci-no-td"
] | 4 | CONTRIBUTOR | Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #143698
* #143699
* __->__ #143722
In hinsight, we never needed a DICT_SUBCLASS_GUARD_MANAGER, because Dynamo would inline through the overridden keys method. In this PR, we ensure that while creating guards and constructing variable trackers, we get the `d.keys()` value by using `dict.keys(d)`. This ensures that we do not call overridden keys method. Therefore, the C++ guard can use `PyDict_Next` directly to check the guards.
cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @amjames | true |
2,754,690,800 | add "enabled=True" to DistributedDataParallel.no_sync() | avihu111 | open | [
"oncall: distributed",
"module: ddp"
] | 4 | NONE | ### 🚀 The feature, motivation and pitch
Training a model with DDP and gradient accumulation is quite common.
To avoid unnecessary sync, the no_sync() operation is used.
Providing an `enabled=True` argument is already done in pytorch, and is very useful in pytorch in `torch.amp.autocast` and `torch.amp.GradScaler`.
```
if (step % grad_accum_steps + 1) == 0:
# forward+ backward code
loss = ddp_model(inputs)
(loss / grad_accum_steps).backward()
else:
with ddp_model.no_sync():
# forward + backward code
loss = ddp_model(inputs)
(loss / grad_accum_steps).backward()
```
using the `enabled` argument this can be simplified, preventing bug-prone code duplications:
```
with ddp_model.no_sync(enabled=(step % grad_accum_steps + 1) != 0):
loss = ddp_model(inputs)
(loss / grad_accum_steps).backward()
```
The implementation doesn't seem hard, and it will be back-compatible.
### Alternatives
_No response_
### Additional context
DDP with grad accum:
https://discuss.pytorch.org/t/gradient-accumulation-with-ddp-no-sync-interface/169593/3
Current no_sync implementation:
https://github.com/pytorch/pytorch/blob/main/torch/nn/parallel/distributed.py#L1420
torch.amp.autocast enabled=True API:
https://github.com/pytorch/pytorch/blob/09c950cc872dfcee453307db47fa10553c3f5616/torch/amp/autocast_mode.py#L222
cc @H-Huang @awgu @kwen2501 @wanchaol @fegin @fduwjj @wz337 @wconstab @d4l3k @c-p-i-o | true |
2,754,621,034 | [inductor][gpu] torch.nn.functional.avg_pool1d outputs incorrect result when input.numel() is 1 | maybeLee | closed | [
"module: nn",
"triaged",
"oncall: pt2",
"module: inductor"
] | 3 | CONTRIBUTOR | ### 🐛 Describe the bug
This issue is similar to my previous one (https://github.com/pytorch/pytorch/issues/143719).
When the `input` argument contains only one element, torch.nn.functional.avg_pool1d will output incorrect result.
Here is the code to reproduce:
```
import torch
@torch.compile
def avg_pool1d(input, kernel_size, stride=None, padding=0):
return torch.nn.functional.avg_pool1d(input, kernel_size, stride, padding)
input = torch.tensor([[1.7641]])
kernel_size = 4
stride = 3
padding = 2
input = input.cuda()
print(f"[CUDA] AvgPool1d in compiled mode: {avg_pool1d(input, kernel_size, stride, padding)}")
print(f"[CUDA] AvgPool1d in eager mode: {torch.nn.functional.avg_pool1d(input, kernel_size, stride, padding)}")
input = input.cpu()
print(f"[CPU] AvgPool1d in compiled mode: {avg_pool1d(input, kernel_size, stride, padding)}")
print(f"[CPU] AvgPool1d in eager mode: {torch.nn.functional.avg_pool1d(input, kernel_size, stride, padding)}")
```
The output is:
```
[CUDA] AvgPool1d in compiled mode: tensor([[1.7641]], device='cuda:0')
[CUDA] AvgPool1d in eager mode: tensor([[0.4410]], device='cuda:0')
[CPU] AvgPool1d in compiled mode: tensor([[0.4410]])
[CPU] AvgPool1d in eager mode: tensor([[0.4410]])
```
### Versions
Collecting environment information...
PyTorch version: 2.6.0a0+gite15442a
Is debug build: False
CUDA used to build PyTorch: 12.4
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.4 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: version 3.31.2
Libc version: glibc-2.35
Python version: 3.11.10 | packaged by conda-forge | (main, Oct 16 2024, 01:27:36) [GCC 13.3.0] (64-bit runtime)
Python platform: Linux-5.14.0-427.37.1.el9_4.x86_64-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: 12.4.131
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA GeForce RTX 3090
GPU 1: NVIDIA GeForce RTX 3090
GPU 2: NVIDIA GeForce RTX 3090
GPU 3: NVIDIA GeForce RTX 3090
Nvidia driver version: 560.35.03
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 43 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 64
On-line CPU(s) list: 0-63
Vendor ID: AuthenticAMD
Model name: AMD Ryzen Threadripper PRO 3975WX 32-Cores
CPU family: 23
Model: 49
Thread(s) per core: 2
Core(s) per socket: 32
Socket(s): 1
Stepping: 0
Frequency boost: enabled
CPU max MHz: 4368.1641
CPU min MHz: 2200.0000
BogoMIPS: 7000.73
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd amd_ppin arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip rdpid overflow_recov succor smca sev sev_es
Virtualization: AMD-V
L1d cache: 1 MiB (32 instances)
L1i cache: 1 MiB (32 instances)
L2 cache: 16 MiB (32 instances)
L3 cache: 128 MiB (8 instances)
NUMA node(s): 1
NUMA node0 CPU(s): 0-63
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Mitigation; untrained return thunk; SMT enabled with STIBP protection
Vulnerability Spec rstack overflow: Mitigation; Safe RET
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] mypy==1.13.0
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.26.2
[pip3] nvidia-cublas-cu12==12.4.5.8
[pip3] nvidia-cuda-cupti-cu12==12.4.127
[pip3] nvidia-cuda-nvrtc-cu12==12.4.127
[pip3] nvidia-cuda-runtime-cu12==12.4.127
[pip3] nvidia-cudnn-cu12==9.1.0.70
[pip3] nvidia-cufft-cu12==11.2.1.3
[pip3] nvidia-curand-cu12==10.3.5.147
[pip3] nvidia-cusolver-cu12==11.6.1.9
[pip3] nvidia-cusparse-cu12==12.3.1.170
[pip3] nvidia-nccl-cu12==2.21.5
[pip3] nvidia-nvjitlink-cu12==12.4.127
[pip3] nvidia-nvtx-cu12==12.4.127
[pip3] onnx==1.17.0
[pip3] onnxscript==0.1.0.dev20240817
[pip3] optree==0.13.0
[pip3] torch==2.6.0a0+gite15442a
[pip3] triton==3.1.0
[conda] numpy 1.26.2 pypi_0 pypi
[conda] nvidia-cublas-cu12 12.4.5.8 pypi_0 pypi
[conda] nvidia-cuda-cupti-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-nvrtc-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cuda-runtime-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-cudnn-cu12 9.1.0.70 pypi_0 pypi
[conda] nvidia-cufft-cu12 11.2.1.3 pypi_0 pypi
[conda] nvidia-curand-cu12 10.3.5.147 pypi_0 pypi
[conda] nvidia-cusolver-cu12 11.6.1.9 pypi_0 pypi
[conda] nvidia-cusparse-cu12 12.3.1.170 pypi_0 pypi
[conda] nvidia-nccl-cu12 2.21.5 pypi_0 pypi
[conda] nvidia-nvjitlink-cu12 12.4.127 pypi_0 pypi
[conda] nvidia-nvtx-cu12 12.4.127 pypi_0 pypi
[conda] optree 0.13.0 pypi_0 pypi
[conda] torch 2.6.0a0+gite15442a pypi_0 pypi
[conda] triton 3.1.0 pypi_0 pypi
cc @albanD @mruberry @jbschlosser @walterddr @mikaylagawarecki @chauhang @penguinwu @voznesenskym @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @ipiszy @yf225 @chenyang78 @kadeng @muchulee8 @ColinPeppler @amjames @desertfire @aakhundov | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.