Morxos commited on
Commit
c2e4cd9
·
verified ·
1 Parent(s): 6cd5c5a

Upload 6 files

Browse files
dataset_analysis/process.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import json
4
+ import multiprocessing as mp
5
+ from functools import partial
6
+ from pathlib import Path
7
+
8
+ from core.analysis import (
9
+ get_module_statistics,
10
+ get_function_statistics,
11
+ add_dict_to_corpus,
12
+ )
13
+
14
+ # --------------------------------------------------------------------------- #
15
+ # Worker runs in a separate process
16
+ # --------------------------------------------------------------------------- #
17
+ def analyse_file(filename: str, wasm_dir: str, max_bytes: int):
18
+ """
19
+ Return (module_stats, [function_stats, …]) for one .wasm file,
20
+ or None if the file should be skipped.
21
+ """
22
+ if not filename.endswith(".wat"):
23
+ return None
24
+
25
+ path = Path(wasm_dir) / filename
26
+ try:
27
+ if path.stat().st_size > max_bytes:
28
+ print(f"[worker] Skipping {filename}: larger than {max_bytes} bytes")
29
+ return None
30
+
31
+ with path.open("r") as fh:
32
+ wasm_code = fh.read()
33
+
34
+ module_stats = get_module_statistics(wasm_code)
35
+ function_stats = get_function_statistics(wasm_code)
36
+ return module_stats, function_stats
37
+
38
+ except Exception as exc:
39
+ # Keep worker silent except for diagnostics; never crash the pool
40
+ print(f"[worker] Error in {filename}: {exc!r}")
41
+ return None
42
+
43
+
44
+ # --------------------------------------------------------------------------- #
45
+ # Main —runs in the parent process
46
+ # --------------------------------------------------------------------------- #
47
+ def main():
48
+ wasm_directory = "1_stack_effect_samples"
49
+ max_file_size_bytes = 100 * 1024 # 100 KB
50
+
51
+ file_list = os.listdir(wasm_directory)
52
+ total_file_count = len(file_list)
53
+
54
+ module_corpus = []
55
+ module_corpus_relative_opcodes = []
56
+ module_corpus_structural_counts = []
57
+ function_corpus = []
58
+ function_corpus_relative_opcodes = []
59
+ function_corpus_structural_counts = []
60
+
61
+ # Create a process pool; default size = #CPU cores
62
+ with mp.Pool() as pool:
63
+ worker_fn = partial(
64
+ analyse_file,
65
+ wasm_dir=wasm_directory,
66
+ max_bytes=max_file_size_bytes,
67
+ )
68
+
69
+ # imap_unordered streams results back as soon as each worker finishes
70
+ for idx, result in enumerate(
71
+ pool.imap_unordered(worker_fn, file_list), start=1
72
+ ):
73
+ print(f"[main] Processed {idx}/{total_file_count}")
74
+
75
+ if result is None:
76
+ continue
77
+
78
+ module_stats, function_stats = result
79
+ add_dict_to_corpus(module_corpus, module_stats)
80
+
81
+ module_relative_opcodes = {}
82
+ module_structural_counts = {}
83
+ for key, value in module_stats.items():
84
+ if key.startswith("[") and key.endswith("]"):
85
+ continue
86
+ module_relative_opcodes[key] = value
87
+
88
+ add_dict_to_corpus(module_corpus_relative_opcodes, module_relative_opcodes)
89
+ for key, value in module_stats.items():
90
+ if key.startswith("[") and key.endswith("]"):
91
+ module_structural_counts[key] = value
92
+ else:
93
+ continue
94
+
95
+ add_dict_to_corpus(module_corpus_structural_counts, module_structural_counts)
96
+ # Process function statistics
97
+
98
+ for fn_stats in function_stats:
99
+ add_dict_to_corpus(function_corpus, fn_stats)
100
+ function_relative_opcodes = {}
101
+ function_structural_counts = {}
102
+ for key, value in fn_stats.items():
103
+ if key.startswith("[") and key.endswith("]"):
104
+ continue
105
+ function_relative_opcodes[key] = value
106
+ add_dict_to_corpus(function_corpus_relative_opcodes, function_relative_opcodes)
107
+ for key, value in fn_stats.items():
108
+ if key.startswith("[") and key.endswith("]"):
109
+ function_structural_counts[key] = value
110
+ else:
111
+ continue
112
+ add_dict_to_corpus(function_corpus_structural_counts, function_structural_counts)
113
+
114
+ # ------------------------------------------------------------------- #
115
+ # Persist the corpora as JSON for later analysis
116
+ # ------------------------------------------------------------------- #
117
+ out_dir = Path("1_stack_effect_samples_meta")
118
+ out_dir.mkdir(exist_ok=True)
119
+
120
+ (out_dir / "module_corpus.json").write_text(json.dumps(module_corpus))
121
+ (out_dir / "function_corpus.json").write_text(json.dumps(function_corpus))
122
+ (out_dir / "module_corpus_opcodes.json").write_text(json.dumps(module_corpus_relative_opcodes))
123
+ (out_dir / "function_corpus_opcodes.json").write_text(json.dumps(function_corpus_relative_opcodes))
124
+ (out_dir / "module_corpus_structural_counts.json").write_text(json.dumps(module_corpus_structural_counts))
125
+ (out_dir / "function_corpus_structural_counts.json").write_text(json.dumps(function_corpus_structural_counts))
126
+
127
+ print("[main] Saved corpora:",
128
+ f"modules={len(module_corpus)}, functions={len(function_corpus)}")
129
+
130
+
131
+ if __name__ == "__main__":
132
+ # On Windows the "spawn" start‑method is mandatory for pools.
133
+ mp.freeze_support()
134
+ main()
dataset_analysis/visualization.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+
7
+ meta_dir = "1_stack_effect_samples_meta"
8
+ #Make dir visualization if not exists
9
+ if not os.path.exists(f"{meta_dir}/visualization"):
10
+ os.makedirs(f"{meta_dir}/visualization")
11
+ def plot_module_total_byte_distribution():
12
+ module_corpus = json.load(open(f"{meta_dir}/module_corpus.json"))
13
+ print(module_corpus[0].keys())
14
+
15
+ plt.figure(figsize=(5, 4))
16
+ plt.hist([module["[binary-bytes]"] for module in module_corpus], bins=100, color='blue', alpha=0.7)
17
+ plt.title("Distribution of Total Bytes in Modules")
18
+ plt.xlabel("Total Bytes")
19
+ plt.ylabel("Frequency")
20
+ plt.grid(axis='y', alpha=0.75)
21
+ plt.savefig(f"{meta_dir}/visualization/module_total_byte_distribution.png")
22
+ plt.show()
23
+
24
+ def plot_functions_total_byte_distribution():
25
+ function_corpus = json.load(open(f"{meta_dir}/function_corpus.json"))
26
+ print(function_corpus[0].keys())
27
+
28
+ plt.figure(figsize=(5, 4))
29
+ #Only plot 99th percentile
30
+ plt.hist([function["[binary-bytes]"] for function in function_corpus if function["[total]"] < 500], bins=100, color='blue', alpha=0.7)
31
+ plt.title("Distribution of Total Bytes in Functions")
32
+ plt.xlabel("Total Bytes")
33
+ plt.ylabel("Frequency")
34
+ plt.grid(axis='y', alpha=0.75)
35
+ plt.savefig(f"{meta_dir}/visualization/functions_total_byte_distribution.png")
36
+ plt.show()
37
+
38
+ def plot_module_byte_code_distribution():
39
+ byte_code_classes = {}
40
+ total_byte_counts = []
41
+ module_corpus = json.load(open(f"{meta_dir}/module_corpus.json"))
42
+ for module in module_corpus:
43
+ total_byte_count = 0
44
+ for key, value in module.items():
45
+ if key.startswith("[") and key.endswith("]"):
46
+ continue
47
+ if key not in byte_code_classes:
48
+ byte_code_classes[key] = []
49
+ byte_code_classes[key].append(value)
50
+ total_byte_count += value
51
+ total_byte_counts.append(total_byte_count)
52
+ print("Byte code classes:", byte_code_classes)
53
+
54
+ #Plot in percentage, descending order
55
+ for i in range(len(total_byte_counts)):
56
+ for key, value in byte_code_classes.items():
57
+ byte_code_classes[key][i] = (value[i] / total_byte_counts[i]) * 100
58
+
59
+ #Sort by median
60
+ byte_code_classes = dict(sorted(byte_code_classes.items(), key=lambda item: (np.median(item[1])), reverse=True))
61
+ #Sort by median
62
+
63
+ plt.figure(figsize=(5, 4))
64
+ #Use box plots
65
+ plt.boxplot(list(byte_code_classes.values()), labels=list(byte_code_classes.keys()), showfliers=False)
66
+ plt.title("Distribution of Byte Code Classes")
67
+ plt.xlabel("Byte Code Classes")
68
+ plt.ylabel("Percentage")
69
+ plt.xticks(rotation=90)
70
+ plt.grid(axis='y', alpha=0.75)
71
+ plt.tight_layout()
72
+ plt.savefig(f"{meta_dir}/visualization/module_byte_code_distribution.png")
73
+ plt.show()
74
+
75
+ def plot_functions_byte_code_distribution():
76
+ byte_code_classes = {}
77
+ total_byte_counts = []
78
+ function_corpus = json.load(open(f"{meta_dir}/function_corpus.json"))
79
+ for function in function_corpus:
80
+ total_byte_count = 0
81
+ for key, value in function.items():
82
+ if key.startswith("[") and key.endswith("]"):
83
+ continue
84
+ if key not in byte_code_classes:
85
+ byte_code_classes[key] = []
86
+ byte_code_classes[key].append(value)
87
+ total_byte_count += value
88
+ total_byte_counts.append(total_byte_count)
89
+ print("Byte code classes:", byte_code_classes)
90
+
91
+ #Plot in percentage, descending order
92
+ for i in range(len(total_byte_counts)):
93
+ for key, value in byte_code_classes.items():
94
+ byte_code_classes[key][i] = (value[i] / total_byte_counts[i]) * 100
95
+
96
+ #Sort by median
97
+ byte_code_classes = dict(sorted(byte_code_classes.items(), key=lambda item: (np.median(item[1])), reverse=True))
98
+ #Sort by median
99
+
100
+ plt.figure(figsize=(5, 4))
101
+ #Use box plots
102
+ plt.boxplot(list(byte_code_classes.values()), labels=list(byte_code_classes.keys()), showfliers=False)
103
+ plt.title("Distribution of Byte Code Classes")
104
+ plt.xlabel("Byte Code Classes")
105
+ plt.ylabel("Percentage")
106
+ plt.xticks(rotation=90)
107
+ plt.grid(axis='y', alpha=0.75)
108
+ plt.tight_layout()
109
+ plt.savefig(f"{meta_dir}/visualization/functions_byte_code_distribution.png")
110
+ plt.show()
111
+
112
+
113
+ def main():
114
+ #plot_module_total_byte_distribution()
115
+ #plot_functions_total_byte_distribution()
116
+ plot_module_byte_code_distribution()
117
+ plot_functions_byte_code_distribution()
118
+
119
+
120
+
121
+ if __name__ == "__main__":
122
+ main()
dataset_flags/croissant_metadata.jsonld ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "@context": {
3
+ "@version": 1.1,
4
+ "schema": "https://schema.org/",
5
+ "mlc": "https://mlcommons.org/croissant/schema/v0.9/",
6
+ "@vocab": "https://mlcommons.org/croissant/schema/v0.9/",
7
+ "name": "schema:name",
8
+ "description": "schema:description",
9
+ "license": "schema:license",
10
+ "url": "schema:url",
11
+ "authors": { "@id": "schema:author", "@container": "@list" },
12
+ "data": { "@id": "mlc:data", "@container": "@list" },
13
+ "features": { "@id": "mlc:features", "@container": "@list" },
14
+ "encodingFormat": "schema:encodingFormat",
15
+ "dataType": "mlc:dataType"
16
+ },
17
+ "@type": [ "schema:Dataset" ],
18
+ "name": "wasm-weaver-reachability-reasoning",
19
+ "description": "This dataset targets control-flow reasoning. Programs contain flags labelled ';;FLAG_1' … ';;FLAG_N'. The model must infer which flags are reachable given the program’s logic (branches, loops, dead code). Each record supplies the program, flag placements, and ground-truth reachability for every flag.",
20
+ "license": "https://creativecommons.org/licenses/by/4.0/",
21
+ "url": "https://huggingface.co/datasets/Morxos/WasmWeaver",
22
+ "authors": [
23
+ { "name": "Anonymous" }
24
+ ],
25
+ "data": [
26
+ {
27
+ "@type": "FileObject",
28
+ "name": "data.jsonl",
29
+ "encodingFormat": "application/jsonlines",
30
+ "features": [
31
+ { "name": "code", "dataType": "string" },
32
+ { "name": "flag_states", "dataType": "object" },
33
+ { "name": "return_values", "dataType": "array" },
34
+ { "name": "stack_values", "dataType": "array" }
35
+ ]
36
+ }
37
+ ]
38
+ }
dataset_flags/data.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
dataset_results/croissant_metadata.jsonld ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "@context": {
3
+ "@version": 1.1,
4
+ "schema": "https://schema.org/",
5
+ "mlc": "https://mlcommons.org/croissant/schema/v0.9/",
6
+ "@vocab": "https://mlcommons.org/croissant/schema/v0.9/",
7
+ "name": "schema:name",
8
+ "description": "schema:description",
9
+ "license": "schema:license",
10
+ "url": "schema:url",
11
+ "authors": { "@id": "schema:author", "@container": "@list" },
12
+ "data": { "@id": "mlc:data", "@container": "@list" },
13
+ "features": { "@id": "mlc:features", "@container": "@list" },
14
+ "encodingFormat": "schema:encodingFormat",
15
+ "dataType": "mlc:dataType"
16
+ },
17
+ "@type": [ "schema:Dataset" ],
18
+ "name": "wasm-weaver-result-reasoning",
19
+ "description": "This dataset evaluates a model’s ability to predict the final outputs of WebAssembly programs. Samples range from straight-line code to code with nested branches and loops. Each record contains the program text, any diagnostic flags, and the exact values the program returns when executed.",
20
+ "license": "https://creativecommons.org/licenses/by/4.0/",
21
+ "url": "https://huggingface.co/datasets/Morxos/WasmWeaver",
22
+ "authors": [
23
+ { "name": "Anonymous" }
24
+ ],
25
+ "data": [
26
+ {
27
+ "@type": "FileObject",
28
+ "name": "data.jsonl",
29
+ "encodingFormat": "application/jsonlines",
30
+ "features": [
31
+ { "name": "code", "dataType": "string" },
32
+ { "name": "flag_states", "dataType": "object" },
33
+ { "name": "return_values", "dataType": "array" },
34
+ { "name": "stack_values", "dataType": "array" }
35
+ ]
36
+ }
37
+ ]
38
+ }
dataset_results/data.jsonl ADDED
The diff for this file is too large to render. See raw diff