Commit ·
934a92d
1
Parent(s): 0750a1b
Add reformat_arrow script to the repo
Browse files- reformat_arrow.py +79 -0
reformat_arrow.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-format the idioms dataset to use its non-compact form in a json format friendly for uptake by pyarrow.
|
| 2 |
+
|
| 3 |
+
Also handy:
|
| 4 |
+
|
| 5 |
+
for BIN in "--" "--binary"; for OPT in O0 O{1,2,3}_noinline; echo $OPT $BIN; python reformat_arrow.py $BIN ~/Projects/idioms-data/published/idioms_dataset_{$OPT}_opt_parity; end; end
|
| 6 |
+
|
| 7 |
+
for opt in O0 O{1,2,3}_noinline; for t in binary function; for split in train test valid; echo $opt $t $split; set dir ~/Projects/idioms-realtype/by-{$t}-hex-rays-parity-{$opt}; mkdir -p "$dir"; cat arrow-idioms_dataset_{$opt}_opt_parity-{$t}.json | pv | jq -c .$split"[]" | zstd > "$dir/$split.jsonl.zst"; end; end; end
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import argparse
|
| 12 |
+
import json
|
| 13 |
+
from typing import Optional, Any
|
| 14 |
+
|
| 15 |
+
from idioms.data.types import TypeInfo
|
| 16 |
+
from idioms.dataiter import MatchedFunctionDataset, MatchedBinaryDataset
|
| 17 |
+
from idioms.data.dataset import MatchedBinary, MatchedFunction
|
| 18 |
+
|
| 19 |
+
def to_json(fn, type2id: Optional[dict[TypeInfo, int]] = None) -> dict[str, Any]:
|
| 20 |
+
# If given a MatchedBinary, return a binary-level JSON containing its matched functions
|
| 21 |
+
if isinstance(fn, MatchedBinary):
|
| 22 |
+
return {
|
| 23 |
+
"binary_hash": fn.binary_hash,
|
| 24 |
+
"repo": fn.repo,
|
| 25 |
+
# datasets/arrow really struggles with nested dicts
|
| 26 |
+
"call_graph": [([k], vs) for k, vs in fn.call_graph.items()],
|
| 27 |
+
"unmatched": [(k, v) for k, v in fn.unmatched.items()],
|
| 28 |
+
"matched_functions": [to_json(f, type2id) for f in fn.functions],
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
variable_types = {
|
| 32 |
+
name: (typ.declaration("") if type2id is None else type2id[typ])
|
| 33 |
+
for name, typ in fn.variable_types.items()
|
| 34 |
+
}
|
| 35 |
+
user_defined_types = [
|
| 36 |
+
(typ.declaration("") if type2id is None else type2id[typ]) for typ in fn.user_defined_types
|
| 37 |
+
]
|
| 38 |
+
return {
|
| 39 |
+
"name": fn.name,
|
| 40 |
+
"canonical_name": fn.canonical_name,
|
| 41 |
+
"repo": fn.repo,
|
| 42 |
+
"decompiled_code": fn.decompiled_code,
|
| 43 |
+
"canonical_decompiled_code": fn.canonical_decompiled_code,
|
| 44 |
+
"original_code": fn.original_code,
|
| 45 |
+
"canonical_original_code": fn.canonical_original_code,
|
| 46 |
+
# Ignore code tokens for now; we'll use just unigram tokenization
|
| 47 |
+
# "memory_layout": {loc.json_key(): var.to_json() for loc, var in self.memory_layout.items()},
|
| 48 |
+
# datasets/arrow really struggles with nested dicts
|
| 49 |
+
"variable_types": [(k,v) for k, v in variable_types.items()],
|
| 50 |
+
"return_type": (fn.return_type.declaration("") if hasattr(fn.return_type, "declaration") else str(fn.return_type)),
|
| 51 |
+
"user_defined_types": user_defined_types,
|
| 52 |
+
"function_decls": fn.function_decls,
|
| 53 |
+
"global_decls": fn.global_decls,
|
| 54 |
+
"binary_hash": fn.binary_hash,
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
def main():
|
| 58 |
+
parser = argparse.ArgumentParser()
|
| 59 |
+
parser.add_argument("dataset")
|
| 60 |
+
parser.add_argument("--binary", action="store_true")
|
| 61 |
+
args = parser.parse_args()
|
| 62 |
+
ds_path = Path(args.dataset)
|
| 63 |
+
|
| 64 |
+
ds_class = MatchedBinaryDataset if args.binary else MatchedFunctionDataset
|
| 65 |
+
|
| 66 |
+
train_set = ds_class(ds_path.glob("train*.tar"), shuffle=False)
|
| 67 |
+
validation_set = ds_class(ds_path.glob("valid*.tar"), shuffle=False)
|
| 68 |
+
test_set = ds_class(ds_path.glob("test*.tar"), shuffle=False)
|
| 69 |
+
|
| 70 |
+
arrow = {}
|
| 71 |
+
for name, partition in zip(["train", "valid", "test"], [train_set, validation_set, test_set]):
|
| 72 |
+
arrow[name] = [to_json(fn) for fn in partition]
|
| 73 |
+
|
| 74 |
+
filename = f"arrow-{ds_path.name}" + ("-binary" if args.binary else "-function") + ".json"
|
| 75 |
+
with open(filename, "w") as fp:
|
| 76 |
+
json.dump(arrow, fp, indent=2)
|
| 77 |
+
|
| 78 |
+
if __name__ == "__main__":
|
| 79 |
+
main()
|