MorVis / Generator.py
zParik's picture
Create Generator.py
d0d28ff verified
"""
Turns PE binaries into 6-channel 3D tensors for a CNN.
Each channel encodes a different semantic signal so the model isn't just
memorizing raw byte patterns:
0 - raw byte values (normalized)
1 - local entropy (high = encrypted/compressed/packed)
2 - executable section mask (where the actual code lives)
3 - import density (proximity to import tables, behavioral signal)
4 - string density (ASCII-heavy regions = function names, strings, etc.)
5 - data presence mask (1 where we have real bytes, 0 where it's padding)
Bytes get folded into 3D via a Morton/Z-order curve so spatially nearby
bytes stay nearby in the volume. This preserves locality better than
naive reshape.
Usage:
python malware_3d_multichannel.py -i ./samples -o ./tensors_5ch
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from tqdm import tqdm
import json
import struct
import math
from collections import defaultdict
class PEParserExtended:
"""
Rips apart PE headers and sections to get the bytes we actually care about,
plus metadata about what lives where (code vs imports vs data).
We skip resource/reloc/debug sections since they're mostly noise for
malware classification. .text, .rdata, .idata, .data are what matter.
"""
RELEVANT_SECTIONS = {
b'.text', b'.code', b'CODE', b'.TEXT',
b'.rdata', b'.rodata', b'.idata',
b'.data', b'.DATA',
b'.edata',
}
SKIP_SECTIONS = {
b'.rsrc', b'.reloc', b'.pdata', b'.tls',
b'.debug', b'.didat', b'.sxdata',
}
CODE_SECTIONS = {b'.text', b'.code', b'CODE', b'.TEXT'}
IMAGE_DIRECTORY_ENTRY_EXPORT = 0
IMAGE_DIRECTORY_ENTRY_IMPORT = 1
IMAGE_DIRECTORY_ENTRY_IAT = 12
def __init__(self, filepath: str):
self.filepath = filepath
self.valid = False
self.headers = b''
self.sections = {}
self.section_info = []
self.data_directories = []
self.import_ranges = []
self._parse()
def _parse(self):
try:
with open(self.filepath, 'rb') as f:
if not self._validate_pe(f):
return
self._parse_data_directories(f)
self._find_import_ranges(f)
f.seek(0)
self.headers = f.read(self.pe_header_end)
self._parse_sections(f)
self.valid = True
except (IOError, OSError, struct.error):
self.valid = False
def _validate_pe(self, f) -> bool:
f.seek(0, 2)
self.file_size = f.tell()
if self.file_size < 64:
return False
f.seek(0)
if f.read(2) != b'MZ':
return False
f.seek(0x3C)
pe_offset = struct.unpack('<I', f.read(4))[0]
if pe_offset + 4 > self.file_size:
return False
f.seek(pe_offset)
if f.read(4) != b'PE\x00\x00':
return False
self.pe_offset = pe_offset
self.machine = struct.unpack('<H', f.read(2))[0]
self.num_sections = struct.unpack('<H', f.read(2))[0]
f.read(12) # skip timestamp, symbol table ptr, symbol count
self.optional_header_size = struct.unpack('<H', f.read(2))[0]
self.characteristics = struct.unpack('<H', f.read(2))[0]
self.optional_header_offset = pe_offset + 24
self.section_table_offset = pe_offset + 24 + self.optional_header_size
self.pe_header_end = self.section_table_offset + (self.num_sections * 40)
return True
def _parse_data_directories(self, f):
"""Grab the data directory entries so we can find import/export tables."""
f.seek(self.optional_header_offset)
magic = struct.unpack('<H', f.read(2))[0]
# PE32 vs PE32+ have the data dirs at different offsets
if magic == 0x10b:
f.seek(self.optional_header_offset + 92)
elif magic == 0x20b:
f.seek(self.optional_header_offset + 108)
else:
return
num_data_dirs = struct.unpack('<I', f.read(4))[0]
num_data_dirs = min(num_data_dirs, 16)
for i in range(num_data_dirs):
rva = struct.unpack('<I', f.read(4))[0]
size = struct.unpack('<I', f.read(4))[0]
self.data_directories.append((rva, size))
def _rva_to_file_offset(self, rva, f):
"""Walk the section table to figure out where an RVA lands on disk."""
f.seek(self.section_table_offset)
for i in range(self.num_sections):
section_header = f.read(40)
if len(section_header) < 40:
break
virtual_size = struct.unpack('<I', section_header[8:12])[0]
virtual_addr = struct.unpack('<I', section_header[12:16])[0]
raw_size = struct.unpack('<I', section_header[16:20])[0]
raw_offset = struct.unpack('<I', section_header[20:24])[0]
if virtual_addr <= rva < virtual_addr + max(virtual_size, raw_size):
return raw_offset + (rva - virtual_addr)
return None
def _find_import_ranges(self, f):
"""Use the actual data directory entries to locate import data on disk."""
self.import_ranges = []
if len(self.data_directories) > self.IMAGE_DIRECTORY_ENTRY_IMPORT:
rva, size = self.data_directories[self.IMAGE_DIRECTORY_ENTRY_IMPORT]
if rva > 0 and size > 0:
offset = self._rva_to_file_offset(rva, f)
if offset is not None:
self.import_ranges.append((offset, size))
self._parse_import_descriptors(f, offset, rva)
# IAT is separate from the import directory
if len(self.data_directories) > self.IMAGE_DIRECTORY_ENTRY_IAT:
rva, size = self.data_directories[self.IMAGE_DIRECTORY_ENTRY_IAT]
if rva > 0 and size > 0:
offset = self._rva_to_file_offset(rva, f)
if offset is not None:
self.import_ranges.append((offset, size))
def _parse_import_descriptors(self, f, import_dir_offset, import_dir_rva):
"""
Chase the import descriptors to find DLL name strings and thunk arrays.
These are the regions that tell us what APIs the binary calls.
"""
try:
f.seek(import_dir_offset)
max_descriptors = 1000 # way beyond any legit PE, just a safety net
for _ in range(max_descriptors):
desc = f.read(20)
if len(desc) < 20:
break
original_first_thunk = struct.unpack('<I', desc[0:4])[0]
name_rva = struct.unpack('<I', desc[12:16])[0]
first_thunk = struct.unpack('<I', desc[16:20])[0]
# null terminator = end of import descriptors
if name_rva == 0 and first_thunk == 0 and original_first_thunk == 0:
break
if name_rva > self.file_size or first_thunk > self.file_size:
break
# grab the DLL name string region
if name_rva > 0 and len(self.import_ranges) < 500:
name_offset = self._rva_to_file_offset(name_rva, f)
if name_offset is not None and name_offset < self.file_size:
self.import_ranges.append((name_offset, min(256, self.file_size - name_offset)))
# grab the thunk array (function name hints / ordinals)
thunk_rva = original_first_thunk if original_first_thunk else first_thunk
if thunk_rva > 0 and len(self.import_ranges) < 500:
thunk_offset = self._rva_to_file_offset(thunk_rva, f)
if thunk_offset is not None and thunk_offset < self.file_size:
self.import_ranges.append((thunk_offset, min(512, self.file_size - thunk_offset)))
except:
pass # malware loves corrupt import tables, just bail
def _parse_sections(self, f):
f.seek(self.section_table_offset)
current_offset = len(self.headers)
for i in range(self.num_sections):
section_header = f.read(40)
if len(section_header) < 40:
break
name = section_header[0:8].rstrip(b'\x00')
virtual_size = struct.unpack('<I', section_header[8:12])[0]
virtual_addr = struct.unpack('<I', section_header[12:16])[0]
raw_size = struct.unpack('<I', section_header[16:20])[0]
raw_offset = struct.unpack('<I', section_header[20:24])[0]
characteristics = struct.unpack('<I', section_header[36:40])[0]
is_code = (characteristics & 0x20000000) != 0 # IMAGE_SCN_MEM_EXECUTE
info = {
'name': name,
'name_str': name.decode('utf-8', errors='replace'),
'virtual_size': virtual_size,
'virtual_addr': virtual_addr,
'raw_size': raw_size,
'raw_offset': raw_offset,
'characteristics': characteristics,
'is_code': is_code or name in self.CODE_SECTIONS,
'extracted': False,
'output_start': None,
'output_end': None,
}
is_relevant = name in self.RELEVANT_SECTIONS
is_skip = name in self.SKIP_SECTIONS
if (is_relevant or is_code) and not is_skip:
if raw_size > 0 and raw_offset + raw_size <= self.file_size:
current_pos = f.tell()
f.seek(raw_offset)
section_data = f.read(raw_size)
f.seek(current_pos)
self.sections[name] = section_data
info['extracted'] = True
info['output_start'] = current_offset
info['output_end'] = current_offset + len(section_data)
current_offset += len(section_data)
self.section_info.append(info)
def get_relevant_bytes(self) -> bytes:
if not self.valid:
return b''
result = bytearray(self.headers)
# deterministic ordering: code first, then read-only data, then writable
section_order = [
b'.text', b'.code', b'CODE', b'.TEXT',
b'.rdata', b'.rodata',
b'.idata',
b'.data', b'.DATA',
b'.edata',
]
for name in section_order:
if name in self.sections:
result.extend(self.sections[name])
# anything we didn't explicitly order goes at the end
for name, data in self.sections.items():
if name not in section_order:
result.extend(data)
return bytes(result)
def get_section_masks(self, total_length: int) -> dict:
"""
Build per-byte masks that say "this byte is code" or "this byte is
import-related". We need these as channels for the CNN.
Uses range-based mapping instead of a byte-by-byte dict because
that was absurdly slow on large binaries.
"""
code_mask = np.zeros(total_length, dtype=np.float32)
import_mask = np.zeros(total_length, dtype=np.float32)
header_len = len(self.headers)
section_order = [
b'.text', b'.code', b'CODE', b'.TEXT',
b'.rdata', b'.rodata',
b'.idata',
b'.data', b'.DATA',
b'.edata',
]
# we need to track the mapping from original file offsets to our
# rearranged output offsets so we can place import ranges correctly
offset_mappings = []
offset_mappings.append((0, 0, min(header_len, total_length)))
output_offset = header_len
for name in section_order:
if name in self.sections and output_offset < total_length:
section_len = len(self.sections[name])
for info in self.section_info:
if info['name'] == name and info['extracted']:
file_offset = info['raw_offset']
usable_len = min(section_len, total_length - output_offset)
offset_mappings.append((file_offset, output_offset, usable_len))
if info['is_code']:
end = min(output_offset + section_len, total_length)
code_mask[output_offset:end] = 1.0
break
output_offset += section_len
for name, data in self.sections.items():
if name not in section_order and output_offset < total_length:
section_len = len(data)
for info in self.section_info:
if info['name'] == name and info['extracted']:
file_offset = info['raw_offset']
usable_len = min(section_len, total_length - output_offset)
offset_mappings.append((file_offset, output_offset, usable_len))
if info['is_code']:
end = min(output_offset + section_len, total_length)
code_mask[output_offset:end] = 1.0
break
output_offset += section_len
# now project the import ranges (which are in original file coords)
# into our rearranged output coords
for import_file_offset, import_size in self.import_ranges:
for file_start, out_start, length in offset_mappings:
file_end = file_start + length
if import_file_offset < file_end and import_file_offset + import_size > file_start:
overlap_start = max(import_file_offset, file_start)
overlap_end = min(import_file_offset + import_size, file_end)
out_overlap_start = out_start + (overlap_start - file_start)
out_overlap_end = out_start + (overlap_end - file_start)
out_overlap_start = max(0, min(out_overlap_start, total_length))
out_overlap_end = max(0, min(out_overlap_end, total_length))
if out_overlap_end > out_overlap_start:
import_mask[out_overlap_start:out_overlap_end] = 1.0
return {
'code': code_mask,
'import': import_mask
}
def get_stats(self) -> dict:
extracted_size = len(self.headers) + sum(len(d) for d in self.sections.values())
return {
'file_size': self.file_size,
'extracted_size': extracted_size,
'compression_ratio': extracted_size / self.file_size if self.file_size > 0 else 0,
'num_sections': self.num_sections,
'extracted_sections': [s['name_str'] for s in self.section_info if s['extracted']],
'skipped_sections': [s['name_str'] for s in self.section_info if not s['extracted']],
'import_ranges_found': len(self.import_ranges),
}
# --- Feature extraction ---
# Each of these produces a 1D float32 array the same length as the input,
# which later gets folded into the 3D volume as a separate channel.
def compute_block_entropy(data: np.ndarray, block_size: int = 256) -> np.ndarray:
"""
Shannon entropy per fixed-size block, upsampled back to full resolution.
Using blocks instead of a sliding window keeps this O(n) and avoids
the weird edge artifacts you get with windowed approaches.
"""
n = len(data)
if n == 0:
return np.zeros(0, dtype=np.float32)
n_blocks = max(1, (n + block_size - 1) // block_size)
block_entropies = np.zeros(n_blocks, dtype=np.float32)
for i in range(n_blocks):
start = i * block_size
end = min(start + block_size, n)
block = data[start:end]
if len(block) == 0:
continue
counts = np.bincount(block, minlength=256)
probs = counts[counts > 0] / len(block)
block_entropies[i] = -np.sum(probs * np.log2(probs)) / 8.0 # normalize to [0,1]
entropy = np.repeat(block_entropies, block_size)[:n]
return entropy.astype(np.float32)
def compute_string_density(data: np.ndarray, window_size: int = 64) -> np.ndarray:
"""
Sliding window ratio of printable ASCII bytes. Regions with high density
are likely string tables, function names, debug info. stuff that's
semantically meaningful even if it's not code.
"""
n = len(data)
if n == 0:
return np.zeros(n, dtype=np.float32)
is_printable = ((data >= 32) & (data <= 126)).astype(np.float32)
kernel = np.ones(window_size) / window_size
density = np.convolve(is_printable, kernel, mode='same').astype(np.float32)
return density
def compute_import_density(data: np.ndarray, import_mask: np.ndarray,
window_size: int = 128) -> np.ndarray:
"""
Spread the binary import mask with a gaussian kernel so nearby bytes
also get some import signal. The idea is that the bytes surrounding
import tables are contextually related even if they're not literally
inside the directory entry.
"""
n = len(data)
if n == 0:
return np.zeros(n, dtype=np.float32)
kernel = np.exp(-np.linspace(-2, 2, window_size)**2)
kernel = kernel / kernel.sum()
density = np.convolve(import_mask, kernel, mode='same').astype(np.float32)
if density.max() > 0:
density = density / density.max()
return density
# --- Space-filling curve ---
class SpaceFillingCurve:
"""
Morton / Z-order curve: maps a linear byte index to (x, y, z) coords
by interleaving bits. This keeps bytes that are close in the file close
in 3D space, which matters for conv filters.
"""
def __init__(self, order: int):
self.order = order
self.size = 2 ** order
self.total_points = self.size ** 3
self._build_lookup_table()
def _build_lookup_table(self):
print(f"Building space-filling curve lookup table ({self.size}³ = {self.total_points:,} points)...")
self.lookup = np.zeros((self.total_points, 3), dtype=np.int32)
for d in tqdm(range(self.total_points), desc="Building lookup", leave=False):
x = y = z = 0
for i in range(self.order):
x |= ((d >> (3 * i)) & 1) << i
y |= ((d >> (3 * i + 1)) & 1) << i
z |= ((d >> (3 * i + 2)) & 1) << i
self.lookup[d] = (x, y, z)
def get_all_coords(self) -> np.ndarray:
return self.lookup
# --- Core conversion: PE file -> 6-channel 3D tensor ---
def pe_to_multichannel_3d(
filepath: str,
order: int = 6,
curve: SpaceFillingCurve = None,
entropy_block_size: int = 256,
string_window: int = 64,
import_window: int = 128,
) -> tuple:
"""
The main pipeline. Parses the PE, extracts relevant sections, computes
all the per-byte features, then folds everything into a 3D volume via
the Morton curve.
Returns (tensor [6, D, H, W], stats_dict).
"""
if curve is None:
curve = SpaceFillingCurve(order)
pe = PEParserExtended(filepath)
if not pe.valid:
raise ValueError(f"Invalid PE file: {filepath}")
relevant_bytes = pe.get_relevant_bytes()
stats = pe.get_stats()
# truncate to what fits in the volume (or pad with zeros implicitly)
max_bytes = curve.total_points
bytes_array = np.frombuffer(relevant_bytes[:max_bytes], dtype=np.uint8)
num_bytes = len(bytes_array)
masks = pe.get_section_masks(num_bytes)
# compute all 1D feature channels
raw_normalized = bytes_array.astype(np.float32) / 255.0
entropy = compute_block_entropy(bytes_array, block_size=entropy_block_size)
code_mask = masks['code']
import_density = compute_import_density(bytes_array, masks['import'], window_size=import_window)
string_density = compute_string_density(bytes_array, window_size=string_window)
# scatter 1D features into the 3D volume along the curve
tensor = np.zeros((6, curve.size, curve.size, curve.size), dtype=np.float32)
coords = curve.get_all_coords()[:num_bytes]
tensor[0, coords[:, 0], coords[:, 1], coords[:, 2]] = raw_normalized
tensor[1, coords[:, 0], coords[:, 1], coords[:, 2]] = entropy
tensor[2, coords[:, 0], coords[:, 1], coords[:, 2]] = code_mask
tensor[3, coords[:, 0], coords[:, 1], coords[:, 2]] = import_density
tensor[4, coords[:, 0], coords[:, 1], coords[:, 2]] = string_density
tensor[5, coords[:, 0], coords[:, 1], coords[:, 2]] = 1.0 # data presence mask
fill_ratio = num_bytes / curve.total_points
stats['bytes_mapped'] = num_bytes
stats['fill_ratio'] = fill_ratio
stats['channels'] = ['raw_bytes', 'entropy', 'code_mask', 'import_density', 'string_density', 'data_mask']
stats['channel_stats'] = {
'raw_bytes_mean': float(np.mean(raw_normalized)),
'entropy_mean': float(np.mean(entropy)),
'code_fraction': float(np.mean(code_mask)),
'import_fraction': float(np.mean(masks['import'])),
'string_density_mean': float(np.mean(string_density)),
}
return tensor, stats
# --- File discovery and batch processing ---
def is_valid_pe(filepath: str) -> bool:
"""Quick sniff test: MZ magic + valid PE offset + PE signature."""
try:
with open(filepath, 'rb') as f:
f.seek(0, 2)
if f.tell() < 64:
return False
f.seek(0)
if f.read(2) != b'MZ':
return False
f.seek(0x3C)
pe_offset = struct.unpack('<I', f.read(4))[0]
f.seek(0, 2)
if pe_offset + 4 > f.tell():
return False
f.seek(pe_offset)
if f.read(4) != b'PE\x00\x00':
return False
return True
except:
return False
def find_pe_files(input_dir: str, min_size: int = 10*1024, max_size: int = 50*1024*1024) -> list:
input_path = Path(input_dir)
if not input_path.exists():
raise ValueError(f"Input directory does not exist: {input_dir}")
pe_files = []
all_files = list(input_path.rglob('*'))
skipped_small = 0
skipped_large = 0
skipped_invalid = 0
print(f"Scanning {len(all_files)} items for valid PE files...")
print(f"Size filter: {min_size/1024:.1f}KB - {max_size/1024/1024:.1f}MB")
for filepath in tqdm(all_files, desc="Validating PE files"):
if not filepath.is_file():
continue
try:
file_size = filepath.stat().st_size
except OSError:
continue
if file_size < min_size:
skipped_small += 1
continue
if file_size > max_size:
skipped_large += 1
continue
if is_valid_pe(str(filepath)):
pe_files.append(filepath)
else:
skipped_invalid += 1
print(f"\nFiltering results:")
print(f" Valid PE files: {len(pe_files)}")
print(f" Too small (<{min_size/1024:.0f}KB): {skipped_small}")
print(f" Too large (>{max_size/1024/1024:.0f}MB): {skipped_large}")
print(f" Invalid PE: {skipped_invalid}")
return pe_files
def process_pe_files(pe_files: list, output_dir: str, order: int = 6) -> dict:
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
curve = SpaceFillingCurve(order)
metadata = {
'order': order,
'grid_size': curve.size,
'max_bytes': curve.total_points,
'channels': 6,
'channel_names': ['raw_bytes', 'entropy', 'code_mask', 'import_density', 'string_density', 'data_mask'],
'extraction_mode': 'multichannel_semantic',
'files': {}
}
print(f"\nProcessing {len(pe_files)} PE files...")
print(f"Grid size: {curve.size}³ = {curve.total_points:,} voxels")
print(f"Output channels: 6 (raw, entropy, code, import, strings, data_mask)\n")
for filepath in tqdm(pe_files, desc="Converting to 6ch 3D tensors"):
try:
tensor, stats = pe_to_multichannel_3d(str(filepath), order, curve)
safe_name = "".join(c if c.isalnum() or c in '._-' else '_' for c in filepath.name)
output_name = f"{safe_name}.npy"
output_file = output_path / output_name
# handle filename collisions
counter = 1
base_safe_name = safe_name
while output_file.exists():
safe_name = f"{base_safe_name}_{counter}"
output_name = f"{safe_name}.npy"
output_file = output_path / output_name
counter += 1
np.save(output_file, tensor)
metadata['files'][str(filepath)] = {
'output': str(output_file.name),
'original_size': stats['file_size'],
'extracted_size': stats['extracted_size'],
'compression_ratio': stats['compression_ratio'],
'bytes_mapped': stats['bytes_mapped'],
'fill_ratio': stats['fill_ratio'],
'extracted_sections': stats['extracted_sections'],
'skipped_sections': stats['skipped_sections'],
'import_ranges_found': stats.get('import_ranges_found', 0),
}
except Exception as e:
tqdm.write(f"Error processing {filepath.name}: {e}")
metadata['files'][str(filepath)] = {'error': str(e)}
metadata_file = output_path / 'metadata.json'
with open(metadata_file, 'w') as f:
json.dump(metadata, f, indent=2)
return metadata
def print_dataset_stats(metadata: dict):
"""Dump fill ratio distribution. important to check before training
since fill ratio can be a spurious feature if it correlates with labels."""
files_data = [v for v in metadata['files'].values() if 'error' not in v]
if not files_data:
print("No successfully processed files!")
return
fill_ratios = [f['fill_ratio'] for f in files_data]
print("\n" + "=" * 60)
print("DATASET STATISTICS")
print("=" * 60)
print(f"Files processed: {len(files_data)}")
print(f"Grid size: {metadata['grid_size']}³")
print(f"Channels: {metadata['channels']} ({', '.join(metadata['channel_names'])})")
print(f"\nFILL RATIO DISTRIBUTION:")
print(f" Min: {min(fill_ratios):.4f} ({min(fill_ratios)*100:.1f}%)")
print(f" Max: {max(fill_ratios):.4f} ({max(fill_ratios)*100:.1f}%)")
print(f" Mean: {np.mean(fill_ratios):.4f} ({np.mean(fill_ratios)*100:.1f}%)")
print(f" Std: {np.std(fill_ratios):.4f}")
print(f" Median: {np.median(fill_ratios):.4f}")
buckets = [0, 0.1, 0.25, 0.5, 0.75, 1.0]
print(f"\n Distribution:")
for i in range(len(buckets)-1):
count = sum(1 for r in fill_ratios if buckets[i] <= r < buckets[i+1])
pct = count / len(fill_ratios) * 100
bar = "█" * int(pct / 5)
print(f" {buckets[i]:.2f}-{buckets[i+1]:.2f}: {count:4d} ({pct:5.1f}%) {bar}")
full_count = sum(1 for r in fill_ratios if r >= 0.99)
print(f"\n Tensors at 100% fill: {full_count} ({full_count/len(fill_ratios)*100:.1f}%)")
if full_count < len(fill_ratios) * 0.5:
print(" WARNING: Fill ratio varies significantly!")
print(" Check correlation with labels before training, this can be a confound.")
def save_fill_ratio_report(metadata: dict, output_path: Path):
files_data = [(k, v) for k, v in metadata['files'].items() if 'error' not in v]
report = {
'total_files': len(files_data),
'fill_ratios': {k: v['fill_ratio'] for k, v in files_data},
'statistics': {
'min': min(v['fill_ratio'] for _, v in files_data),
'max': max(v['fill_ratio'] for _, v in files_data),
'mean': float(np.mean([v['fill_ratio'] for _, v in files_data])),
'std': float(np.std([v['fill_ratio'] for _, v in files_data])),
'median': float(np.median([v['fill_ratio'] for _, v in files_data])),
}
}
with open(output_path / 'fill_ratio_report.json', 'w') as f:
json.dump(report, f, indent=2)
print(f"Fill ratio report saved to {output_path / 'fill_ratio_report.json'}")
def main():
parser = argparse.ArgumentParser(
description='Convert PE files to 6-channel 3D tensors with semantic features'
)
parser.add_argument('--input_dir', '-i', type=str, required=True,
help='Input directory containing PE files')
parser.add_argument('--output_dir', '-o', type=str, required=True,
help='Output directory for tensor files')
parser.add_argument('--order', type=int, default=6, choices=[4, 5, 6, 7],
help='Curve order. Grid = 2^order. 6=64³. Default: 6')
parser.add_argument('--min_size', type=int, default=10,
help='Minimum file size in KB (default: 10)')
parser.add_argument('--max_size', type=int, default=50,
help='Maximum file size in MB (default: 50)')
args = parser.parse_args()
print("=" * 60)
print("PE -> 6-CHANNEL 3D TENSOR CONVERTER")
print("=" * 60)
print("Channels: raw bytes | entropy | code mask | import density | string density | data mask")
print("=" * 60)
min_bytes = args.min_size * 1024
max_bytes = args.max_size * 1024 * 1024
pe_files = find_pe_files(args.input_dir, min_size=min_bytes, max_size=max_bytes)
if not pe_files:
print("\nNo valid PE files found.")
return
print(f"\nFound {len(pe_files)} valid PE files")
metadata = process_pe_files(pe_files, args.output_dir, order=args.order)
successful = sum(1 for v in metadata['files'].values() if 'error' not in v)
failed = len(metadata['files']) - successful
print(f"\nDone. {successful}/{len(pe_files)} succeeded.")
if failed > 0:
print(f" Failed: {failed}")
print(f" Output: {args.output_dir}")
print_dataset_stats(metadata)
save_fill_ratio_report(metadata, Path(args.output_dir))
if __name__ == '__main__':
main()